aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/gcc/config/rx
diff options
context:
space:
mode:
authorBen Cheng <bccheng@google.com>2014-03-25 22:37:19 -0700
committerBen Cheng <bccheng@google.com>2014-03-25 22:37:19 -0700
commit1bc5aee63eb72b341f506ad058502cd0361f0d10 (patch)
treec607e8252f3405424ff15bc2d00aa38dadbb2518 /gcc-4.9/gcc/config/rx
parent283a0bf58fcf333c58a2a92c3ebbc41fb9eb1fdb (diff)
downloadtoolchain_gcc-1bc5aee63eb72b341f506ad058502cd0361f0d10.tar.gz
toolchain_gcc-1bc5aee63eb72b341f506ad058502cd0361f0d10.tar.bz2
toolchain_gcc-1bc5aee63eb72b341f506ad058502cd0361f0d10.zip
Initial checkin of GCC 4.9.0 from trunk (r208799).
Change-Id: I48a3c08bb98542aa215912a75f03c0890e497dba
Diffstat (limited to 'gcc-4.9/gcc/config/rx')
-rw-r--r--gcc-4.9/gcc/config/rx/constraints.md108
-rw-r--r--gcc-4.9/gcc/config/rx/predicates.md307
-rw-r--r--gcc-4.9/gcc/config/rx/rx-modes.def25
-rw-r--r--gcc-4.9/gcc/config/rx/rx-opts.h31
-rw-r--r--gcc-4.9/gcc/config/rx/rx-protos.h46
-rw-r--r--gcc-4.9/gcc/config/rx/rx.c3443
-rw-r--r--gcc-4.9/gcc/config/rx/rx.h665
-rw-r--r--gcc-4.9/gcc/config/rx/rx.md2641
-rw-r--r--gcc-4.9/gcc/config/rx/rx.opt141
-rw-r--r--gcc-4.9/gcc/config/rx/t-rx34
10 files changed, 7441 insertions, 0 deletions
diff --git a/gcc-4.9/gcc/config/rx/constraints.md b/gcc-4.9/gcc/config/rx/constraints.md
new file mode 100644
index 000000000..bf0edcc97
--- /dev/null
+++ b/gcc-4.9/gcc/config/rx/constraints.md
@@ -0,0 +1,108 @@
+;; Constraint definitions for Renesas RX.
+;; Copyright (C) 2008-2014 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+(define_constraint "Symbol"
+ "@internal Constraint on the type of rtx allowed in call insns"
+ (match_test "GET_CODE (op) == SYMBOL_REF")
+)
+
+
+(define_constraint "Int08"
+ "@internal A signed or unsigned 8-bit immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, (-1 << 8), (1 << 8) - 1)")
+ )
+)
+
+(define_constraint "Sint08"
+ "@internal A signed 8-bit immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, (-1 << 7), (1 << 7) - 1)")
+ )
+)
+
+(define_constraint "Sint16"
+ "@internal A signed 16-bit immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, (-1 << 15), (1 << 15) - 1)")
+ )
+)
+
+(define_constraint "Sint24"
+ "@internal A signed 24-bit immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, (-1 << 23), (1 << 23) - 1)")
+ )
+)
+
+;; This constraint is used by the SUBSI3 pattern because the
+;; RX SUB instruction can only take a 4-bit unsigned integer
+;; value. Also used by the MVTIPL instruction.
+(define_constraint "Uint04"
+ "@internal An unsigned 4-bit immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 15)")
+ )
+)
+
+(define_constraint "NEGint4"
+ "@internal An signed 4-bit negative immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -15, -1)")
+ )
+)
+
+;; This is used in arithmetic and logic instructions for
+;; a source operand that lies in memory and which satisfies
+;; rx_restricted_memory_address().
+
+(define_memory_constraint "Q"
+ "A MEM which only uses REG or REG+INT addressing."
+ (and (match_code "mem")
+ (ior (match_code "reg" "0")
+ (and (match_code "plus" "0")
+ (and (match_code "reg,subreg" "00")
+ (match_code "const_int" "01")
+ )
+ )
+ )
+ )
+)
+
+(define_constraint "Rpid"
+ "A MEM to a PID variable"
+ (and (match_code "mem")
+ (and (match_code "plus" "0")
+ (and (match_code "reg,subreg" "00")
+ (match_code "unspec" "01")
+ )
+ )
+ )
+)
+
+(define_constraint "Rpda"
+ "An address to a PID variable"
+ (and (match_code "plus" "")
+ (and (match_code "reg,subreg" "0")
+ (match_code "unspec" "1")
+ )
+ )
+)
diff --git a/gcc-4.9/gcc/config/rx/predicates.md b/gcc-4.9/gcc/config/rx/predicates.md
new file mode 100644
index 000000000..85c9521b2
--- /dev/null
+++ b/gcc-4.9/gcc/config/rx/predicates.md
@@ -0,0 +1,307 @@
+;; Predicate definitions for Renesas RX.
+;; Copyright (C) 2008-2014 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+
+;; Check that the operand is suitable for a call insn.
+;; Only registers and symbol refs are allowed.
+
+(define_predicate "rx_call_operand"
+ (match_code "symbol_ref,reg")
+)
+
+;; For sibcall operations we can only use a symbolic address.
+
+(define_predicate "rx_symbolic_call_operand"
+ (match_code "symbol_ref")
+)
+
+;; Check that the operand is suitable for a shift insn
+;; Only small integers or a value in a register are permitted.
+
+(define_predicate "rx_shift_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
+)
+
+(define_predicate "rx_constshift_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 31)"))
+)
+
+(define_predicate "rx_restricted_mem_operand"
+ (and (match_code "mem")
+ (match_test "rx_is_restricted_memory_address (XEXP (op, 0), mode)"))
+)
+
+;; Check that the operand is suitable as the source operand
+;; for a logic or arithmeitc instruction. Registers, integers
+;; and a restricted subset of memory addresses are allowed.
+
+(define_predicate "rx_source_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "immediate_operand")
+ (match_operand 0 "rx_restricted_mem_operand"))
+)
+
+;; Check that the operand is suitable as the source operand
+;; for a comparison instruction. This is the same as
+;; rx_source_operand except that SUBREGs are allowed but
+;; CONST_INTs are not.
+
+(define_predicate "rx_compare_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "rx_restricted_mem_operand"))
+)
+
+;; Check that the operand is suitable as the source operand
+;; for a min/max instruction. This is the same as
+;; rx_source_operand except that CONST_INTs are allowed but
+;; REGs and SUBREGs are not.
+
+(define_predicate "rx_minmaxex_operand"
+ (ior (match_operand 0 "immediate_operand")
+ (match_operand 0 "rx_restricted_mem_operand"))
+)
+
+;; Return true if OP is a store multiple operation. This looks like:
+;;
+;; [(set (SP) (MINUS (SP) (INT)))
+;; (set (MEM (SP)) (REG))
+;; (set (MEM (MINUS (SP) (INT))) (REG)) {optionally repeated}
+;; ]
+
+(define_special_predicate "rx_store_multiple_vector"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int src_regno;
+ rtx element;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 2)
+ return false;
+
+ /* Check that the first element of the vector is the stack adjust. */
+ element = XVECEXP (op, 0, 0);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || REGNO (SET_DEST (element)) != SP_REG
+ || GET_CODE (SET_SRC (element)) != MINUS
+ || ! REG_P (XEXP (SET_SRC (element), 0))
+ || REGNO (XEXP (SET_SRC (element), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (SET_SRC (element), 1)))
+ return false;
+
+ /* Check that the next element is the first push. */
+ element = XVECEXP (op, 0, 1);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_SRC (element))
+ || GET_MODE (SET_SRC (element)) != SImode
+ || ! MEM_P (SET_DEST (element))
+ || GET_MODE (SET_DEST (element)) != SImode
+ || GET_CODE (XEXP (SET_DEST (element), 0)) != MINUS
+ || ! REG_P (XEXP (XEXP (SET_DEST (element), 0), 0))
+ || REGNO (XEXP (XEXP (SET_DEST (element), 0), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (XEXP (SET_DEST (element), 0), 1))
+ || INTVAL (XEXP (XEXP (SET_DEST (element), 0), 1))
+ != GET_MODE_SIZE (SImode))
+ return false;
+
+ src_regno = REGNO (SET_SRC (element));
+
+ /* Check that the remaining elements use SP-<disp>
+ addressing and decreasing register numbers. */
+ for (i = 2; i < count; i++)
+ {
+ element = XVECEXP (op, 0, i);
+
+ if ( ! SET_P (element)
+ || ! REG_P (SET_SRC (element))
+ || GET_MODE (SET_SRC (element)) != SImode
+ || REGNO (SET_SRC (element)) != src_regno - (i - 1)
+ || ! MEM_P (SET_DEST (element))
+ || GET_MODE (SET_DEST (element)) != SImode
+ || GET_CODE (XEXP (SET_DEST (element), 0)) != MINUS
+ || ! REG_P (XEXP (XEXP (SET_DEST (element), 0), 0))
+ || REGNO (XEXP (XEXP (SET_DEST (element), 0), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (XEXP (SET_DEST (element), 0), 1))
+ || INTVAL (XEXP (XEXP (SET_DEST (element), 0), 1))
+ != i * GET_MODE_SIZE (SImode))
+ return false;
+ }
+ return true;
+})
+
+;; Return true if OP is a load multiple operation.
+;; This looks like:
+;; [(set (SP) (PLUS (SP) (INT)))
+;; (set (REG) (MEM (SP)))
+;; (set (REG) (MEM (PLUS (SP) (INT)))) {optionally repeated}
+;; ]
+
+(define_special_predicate "rx_load_multiple_vector"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx element;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 2)
+ return false;
+
+ /* Check that the first element of the vector is the stack adjust. */
+ element = XVECEXP (op, 0, 0);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || REGNO (SET_DEST (element)) != SP_REG
+ || GET_CODE (SET_SRC (element)) != PLUS
+ || ! REG_P (XEXP (SET_SRC (element), 0))
+ || REGNO (XEXP (SET_SRC (element), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (SET_SRC (element), 1)))
+ return false;
+
+ /* Check that the next element is the first push. */
+ element = XVECEXP (op, 0, 1);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || ! MEM_P (SET_SRC (element))
+ || ! REG_P (XEXP (SET_SRC (element), 0))
+ || REGNO (XEXP (SET_SRC (element), 0)) != SP_REG)
+ return false;
+
+ dest_regno = REGNO (SET_DEST (element));
+
+ /* Check that the remaining elements use SP+<disp>
+ addressing and incremental register numbers. */
+ for (i = 2; i < count; i++)
+ {
+ element = XVECEXP (op, 0, i);
+
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || GET_MODE (SET_DEST (element)) != SImode
+ || REGNO (SET_DEST (element)) != dest_regno + (i - 1)
+ || ! MEM_P (SET_SRC (element))
+ || GET_MODE (SET_SRC (element)) != SImode
+ || GET_CODE (XEXP (SET_SRC (element), 0)) != PLUS
+ || ! REG_P (XEXP (XEXP (SET_SRC (element), 0), 0))
+ || REGNO (XEXP (XEXP (SET_SRC (element), 0), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (XEXP (SET_SRC (element), 0), 1))
+ || INTVAL (XEXP (XEXP (SET_SRC (element), 0), 1))
+ != (i - 1) * GET_MODE_SIZE (SImode))
+ return false;
+ }
+ return true;
+})
+
+;; Return true if OP is a pop-and-return load multiple operation.
+;; This looks like:
+;; [(set (SP) (PLUS (SP) (INT)))
+;; (set (REG) (MEM (SP)))
+;; (set (REG) (MEM (PLUS (SP) (INT)))) {optional and possibly repeated}
+;; (return)
+;; ]
+
+(define_special_predicate "rx_rtsd_vector"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx element;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 2)
+ return false;
+
+ /* Check that the first element of the vector is the stack adjust. */
+ element = XVECEXP (op, 0, 0);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || REGNO (SET_DEST (element)) != SP_REG
+ || GET_CODE (SET_SRC (element)) != PLUS
+ || ! REG_P (XEXP (SET_SRC (element), 0))
+ || REGNO (XEXP (SET_SRC (element), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (SET_SRC (element), 1)))
+ return false;
+
+ /* Check that the next element is the first push. */
+ element = XVECEXP (op, 0, 1);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || ! MEM_P (SET_SRC (element))
+ || ! REG_P (XEXP (SET_SRC (element), 0))
+ || REGNO (XEXP (SET_SRC (element), 0)) != SP_REG)
+ return false;
+
+ dest_regno = REGNO (SET_DEST (element));
+
+ /* Check that the remaining elements, if any, and except
+ for the last one, use SP+<disp> addressing and incremental
+ register numbers. */
+ for (i = 2; i < count - 1; i++)
+ {
+ element = XVECEXP (op, 0, i);
+
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || GET_MODE (SET_DEST (element)) != SImode
+ || REGNO (SET_DEST (element)) != dest_regno + (i - 1)
+ || ! MEM_P (SET_SRC (element))
+ || GET_MODE (SET_SRC (element)) != SImode
+ || GET_CODE (XEXP (SET_SRC (element), 0)) != PLUS
+ || ! REG_P (XEXP (XEXP (SET_SRC (element), 0), 0))
+ || REGNO (XEXP (XEXP (SET_SRC (element), 0), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (XEXP (SET_SRC (element), 0), 1))
+ || INTVAL (XEXP (XEXP (SET_SRC (element), 0), 1))
+ != (i - 1) * GET_MODE_SIZE (SImode))
+ return false;
+ }
+
+ /* The last element must be a RETURN. */
+ element = XVECEXP (op, 0, count - 1);
+ return GET_CODE (element) == RETURN;
+})
+
+(define_predicate "label_ref_operand"
+ (match_code "label_ref")
+)
+
+(define_predicate "rx_z_comparison_operator"
+ (match_code "eq,ne")
+)
+
+(define_predicate "rx_zs_comparison_operator"
+ (match_code "eq,ne,lt,ge")
+)
+
+;; GT and LE omitted due to operand swap required.
+(define_predicate "rx_fp_comparison_operator"
+ (match_code "eq,ne,lt,ge,ordered,unordered")
+)
+
+(define_predicate "rshift_operator"
+ (match_code "ashiftrt,lshiftrt")
+)
diff --git a/gcc-4.9/gcc/config/rx/rx-modes.def b/gcc-4.9/gcc/config/rx/rx-modes.def
new file mode 100644
index 000000000..655d7575b
--- /dev/null
+++ b/gcc-4.9/gcc/config/rx/rx-modes.def
@@ -0,0 +1,25 @@
+/* Definitions of target specific machine modes for the RX.
+ Copyright (C) 2008-2014 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+CC_MODE (CC_ZS);
+CC_MODE (CC_ZSO);
+CC_MODE (CC_ZSC);
+
+CC_MODE (CC_F); /* fcmp */
diff --git a/gcc-4.9/gcc/config/rx/rx-opts.h b/gcc-4.9/gcc/config/rx/rx-opts.h
new file mode 100644
index 000000000..6451dc644
--- /dev/null
+++ b/gcc-4.9/gcc/config/rx/rx-opts.h
@@ -0,0 +1,31 @@
+/* GCC option-handling definitions for the Renesas RX processor.
+ Copyright (C) 2008-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RX_OPTS_H
+#define RX_OPTS_H
+
+enum rx_cpu_types
+{
+ RX600,
+ RX610,
+ RX200,
+ RX100
+};
+
+#endif
diff --git a/gcc-4.9/gcc/config/rx/rx-protos.h b/gcc-4.9/gcc/config/rx/rx-protos.h
new file mode 100644
index 000000000..189afb07e
--- /dev/null
+++ b/gcc-4.9/gcc/config/rx/rx-protos.h
@@ -0,0 +1,46 @@
+/* Exported function prototypes from the Renesas RX backend.
+ Copyright (C) 2008-2014 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RX_PROTOS_H
+#define GCC_RX_PROTOS_H
+
+extern bool rx_can_use_simple_return (void);
+extern void rx_expand_epilogue (bool);
+extern void rx_expand_prologue (void);
+extern int rx_initial_elimination_offset (int, int);
+
+#ifdef RTX_CODE
+extern int rx_adjust_insn_length (rtx, int);
+extern int rx_align_for_label (rtx, int);
+extern void rx_emit_stack_popm (rtx *, bool);
+extern void rx_emit_stack_pushm (rtx *);
+extern char * rx_gen_move_template (rtx *, bool);
+extern bool rx_is_legitimate_constant (enum machine_mode, rtx);
+extern bool rx_is_restricted_memory_address (rtx,
+ enum machine_mode);
+extern bool rx_match_ccmode (rtx, enum machine_mode);
+extern rtx rx_maybe_pidify_operand (rtx, int);
+extern void rx_notice_update_cc (rtx, rtx);
+extern void rx_split_cbranch (enum machine_mode, enum rtx_code,
+ rtx, rtx, rtx);
+extern enum machine_mode rx_select_cc_mode (enum rtx_code, rtx, rtx);
+#endif
+
+#endif /* GCC_RX_PROTOS_H */
diff --git a/gcc-4.9/gcc/config/rx/rx.c b/gcc-4.9/gcc/config/rx/rx.c
new file mode 100644
index 000000000..4242c1a97
--- /dev/null
+++ b/gcc-4.9/gcc/config/rx/rx.c
@@ -0,0 +1,3443 @@
+/* Subroutines used for code generation on Renesas RX processors.
+ Copyright (C) 2008-2014 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* To Do:
+
+ * Re-enable memory-to-memory copies and fix up reload. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "function.h"
+#include "expr.h"
+#include "optabs.h"
+#include "libfuncs.h"
+#include "recog.h"
+#include "diagnostic-core.h"
+#include "toplev.h"
+#include "reload.h"
+#include "df.h"
+#include "ggc.h"
+#include "tm_p.h"
+#include "debug.h"
+#include "target.h"
+#include "target-def.h"
+#include "langhooks.h"
+#include "opts.h"
+#include "cgraph.h"
+
+static unsigned int rx_gp_base_regnum_val = INVALID_REGNUM;
+static unsigned int rx_pid_base_regnum_val = INVALID_REGNUM;
+static unsigned int rx_num_interrupt_regs;
+
+static unsigned int
+rx_gp_base_regnum (void)
+{
+ if (rx_gp_base_regnum_val == INVALID_REGNUM)
+ gcc_unreachable ();
+ return rx_gp_base_regnum_val;
+}
+
+static unsigned int
+rx_pid_base_regnum (void)
+{
+ if (rx_pid_base_regnum_val == INVALID_REGNUM)
+ gcc_unreachable ();
+ return rx_pid_base_regnum_val;
+}
+
+/* Find a SYMBOL_REF in a "standard" MEM address and return its decl. */
+
+static tree
+rx_decl_for_addr (rtx op)
+{
+ if (GET_CODE (op) == MEM)
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == CONST)
+ op = XEXP (op, 0);
+ while (GET_CODE (op) == PLUS)
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == SYMBOL_REF)
+ return SYMBOL_REF_DECL (op);
+ return NULL_TREE;
+}
+
+static void rx_print_operand (FILE *, rtx, int);
+
+#define CC_FLAG_S (1 << 0)
+#define CC_FLAG_Z (1 << 1)
+#define CC_FLAG_O (1 << 2)
+#define CC_FLAG_C (1 << 3)
+#define CC_FLAG_FP (1 << 4) /* Fake, to differentiate CC_Fmode. */
+
+static unsigned int flags_from_mode (enum machine_mode mode);
+static unsigned int flags_from_code (enum rtx_code code);
+
+/* Return true if OP is a reference to an object in a PID data area. */
+
+enum pid_type
+{
+ PID_NOT_PID = 0, /* The object is not in the PID data area. */
+ PID_ENCODED, /* The object is in the PID data area. */
+ PID_UNENCODED /* The object will be placed in the PID data area, but it has not been placed there yet. */
+};
+
+static enum pid_type
+rx_pid_data_operand (rtx op)
+{
+ tree op_decl;
+
+ if (!TARGET_PID)
+ return PID_NOT_PID;
+
+ if (GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 0)) == REG
+ && GET_CODE (XEXP (op, 1)) == CONST
+ && GET_CODE (XEXP (XEXP (op, 1), 0)) == UNSPEC)
+ return PID_ENCODED;
+
+ op_decl = rx_decl_for_addr (op);
+
+ if (op_decl)
+ {
+ if (TREE_READONLY (op_decl))
+ return PID_UNENCODED;
+ }
+ else
+ {
+ /* Sigh, some special cases. */
+ if (GET_CODE (op) == SYMBOL_REF
+ || GET_CODE (op) == LABEL_REF)
+ return PID_UNENCODED;
+ }
+
+ return PID_NOT_PID;
+}
+
+static rtx
+rx_legitimize_address (rtx x,
+ rtx oldx ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ if (rx_pid_data_operand (x) == PID_UNENCODED)
+ {
+ rtx rv = gen_pid_addr (gen_rtx_REG (SImode, rx_pid_base_regnum ()), x);
+ return rv;
+ }
+
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && REG_P (XEXP (XEXP (x, 0), 0))
+ && REG_P (XEXP (x, 1)))
+ return force_reg (SImode, x);
+
+ return x;
+}
+
+/* Return true if OP is a reference to an object in a small data area. */
+
+static bool
+rx_small_data_operand (rtx op)
+{
+ if (rx_small_data_limit == 0)
+ return false;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ return SYMBOL_REF_SMALL_P (op);
+
+ return false;
+}
+
+static bool
+rx_is_legitimate_address (enum machine_mode mode, rtx x,
+ bool strict ATTRIBUTE_UNUSED)
+{
+ if (RTX_OK_FOR_BASE (x, strict))
+ /* Register Indirect. */
+ return true;
+
+ if ((GET_MODE_SIZE (mode) == 4
+ || GET_MODE_SIZE (mode) == 2
+ || GET_MODE_SIZE (mode) == 1)
+ && (GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC))
+ /* Pre-decrement Register Indirect or
+ Post-increment Register Indirect. */
+ return RTX_OK_FOR_BASE (XEXP (x, 0), strict);
+
+ switch (rx_pid_data_operand (x))
+ {
+ case PID_UNENCODED:
+ return false;
+ case PID_ENCODED:
+ return true;
+ default:
+ break;
+ }
+
+ if (GET_CODE (x) == PLUS)
+ {
+ rtx arg1 = XEXP (x, 0);
+ rtx arg2 = XEXP (x, 1);
+ rtx index = NULL_RTX;
+
+ if (REG_P (arg1) && RTX_OK_FOR_BASE (arg1, strict))
+ index = arg2;
+ else if (REG_P (arg2) && RTX_OK_FOR_BASE (arg2, strict))
+ index = arg1;
+ else
+ return false;
+
+ switch (GET_CODE (index))
+ {
+ case CONST_INT:
+ {
+ /* Register Relative: REG + INT.
+ Only positive, mode-aligned, mode-sized
+ displacements are allowed. */
+ HOST_WIDE_INT val = INTVAL (index);
+ int factor;
+
+ if (val < 0)
+ return false;
+
+ switch (GET_MODE_SIZE (mode))
+ {
+ default:
+ case 4: factor = 4; break;
+ case 2: factor = 2; break;
+ case 1: factor = 1; break;
+ }
+
+ if (val > (65535 * factor))
+ return false;
+ return (val % factor) == 0;
+ }
+
+ case REG:
+ /* Unscaled Indexed Register Indirect: REG + REG
+ Size has to be "QI", REG has to be valid. */
+ return GET_MODE_SIZE (mode) == 1 && RTX_OK_FOR_BASE (index, strict);
+
+ case MULT:
+ {
+ /* Scaled Indexed Register Indirect: REG + (REG * FACTOR)
+ Factor has to equal the mode size, REG has to be valid. */
+ rtx factor;
+
+ factor = XEXP (index, 1);
+ index = XEXP (index, 0);
+
+ return REG_P (index)
+ && RTX_OK_FOR_BASE (index, strict)
+ && CONST_INT_P (factor)
+ && GET_MODE_SIZE (mode) == INTVAL (factor);
+ }
+
+ default:
+ return false;
+ }
+ }
+
+ /* Small data area accesses turn into register relative offsets. */
+ return rx_small_data_operand (x);
+}
+
+/* Returns TRUE for simple memory addreses, ie ones
+ that do not involve register indirect addressing
+ or pre/post increment/decrement. */
+
+bool
+rx_is_restricted_memory_address (rtx mem, enum machine_mode mode)
+{
+ if (! rx_is_legitimate_address
+ (mode, mem, reload_in_progress || reload_completed))
+ return false;
+
+ switch (GET_CODE (mem))
+ {
+ case REG:
+ /* Simple memory addresses are OK. */
+ return true;
+
+ case PRE_DEC:
+ case POST_INC:
+ return false;
+
+ case PLUS:
+ {
+ rtx base, index;
+
+ /* Only allow REG+INT addressing. */
+ base = XEXP (mem, 0);
+ index = XEXP (mem, 1);
+
+ if (! RX_REG_P (base) || ! CONST_INT_P (index))
+ return false;
+
+ return IN_RANGE (INTVAL (index), 0, (0x10000 * GET_MODE_SIZE (mode)) - 1);
+ }
+
+ case SYMBOL_REF:
+ /* Can happen when small data is being supported.
+ Assume that it will be resolved into GP+INT. */
+ return true;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
+
+static bool
+rx_mode_dependent_address_p (const_rtx addr, addr_space_t as ATTRIBUTE_UNUSED)
+{
+ if (GET_CODE (addr) == CONST)
+ addr = XEXP (addr, 0);
+
+ switch (GET_CODE (addr))
+ {
+ /* --REG and REG++ only work in SImode. */
+ case PRE_DEC:
+ case POST_INC:
+ return true;
+
+ case MINUS:
+ case PLUS:
+ if (! REG_P (XEXP (addr, 0)))
+ return true;
+
+ addr = XEXP (addr, 1);
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ /* REG+REG only works in SImode. */
+ return true;
+
+ case CONST_INT:
+ /* REG+INT is only mode independent if INT is a
+ multiple of 4, positive and will fit into 16-bits. */
+ if (((INTVAL (addr) & 3) == 0)
+ && IN_RANGE (INTVAL (addr), 4, 0xfffc))
+ return false;
+ return true;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return true;
+
+ case MULT:
+ gcc_assert (REG_P (XEXP (addr, 0)));
+ gcc_assert (CONST_INT_P (XEXP (addr, 1)));
+ /* REG+REG*SCALE is always mode dependent. */
+ return true;
+
+ default:
+ /* Not recognized, so treat as mode dependent. */
+ return true;
+ }
+
+ case CONST_INT:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case REG:
+ /* These are all mode independent. */
+ return false;
+
+ default:
+ /* Everything else is unrecognized,
+ so treat as mode dependent. */
+ return true;
+ }
+}
+
+/* A C compound statement to output to stdio stream FILE the
+ assembler syntax for an instruction operand that is a memory
+ reference whose address is ADDR. */
+
+static void
+rx_print_operand_address (FILE * file, rtx addr)
+{
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ fprintf (file, "[");
+ rx_print_operand (file, addr, 0);
+ fprintf (file, "]");
+ break;
+
+ case PRE_DEC:
+ fprintf (file, "[-");
+ rx_print_operand (file, XEXP (addr, 0), 0);
+ fprintf (file, "]");
+ break;
+
+ case POST_INC:
+ fprintf (file, "[");
+ rx_print_operand (file, XEXP (addr, 0), 0);
+ fprintf (file, "+]");
+ break;
+
+ case PLUS:
+ {
+ rtx arg1 = XEXP (addr, 0);
+ rtx arg2 = XEXP (addr, 1);
+ rtx base, index;
+
+ if (REG_P (arg1) && RTX_OK_FOR_BASE (arg1, true))
+ base = arg1, index = arg2;
+ else if (REG_P (arg2) && RTX_OK_FOR_BASE (arg2, true))
+ base = arg2, index = arg1;
+ else
+ {
+ rx_print_operand (file, arg1, 0);
+ fprintf (file, " + ");
+ rx_print_operand (file, arg2, 0);
+ break;
+ }
+
+ if (REG_P (index) || GET_CODE (index) == MULT)
+ {
+ fprintf (file, "[");
+ rx_print_operand (file, index, 'A');
+ fprintf (file, ",");
+ }
+ else /* GET_CODE (index) == CONST_INT */
+ {
+ rx_print_operand (file, index, 'A');
+ fprintf (file, "[");
+ }
+ rx_print_operand (file, base, 0);
+ fprintf (file, "]");
+ break;
+ }
+
+ case CONST:
+ if (GET_CODE (XEXP (addr, 0)) == UNSPEC)
+ {
+ addr = XEXP (addr, 0);
+ gcc_assert (XINT (addr, 1) == UNSPEC_CONST);
+
+ /* FIXME: Putting this case label here is an appalling abuse of the C language. */
+ case UNSPEC:
+ addr = XVECEXP (addr, 0, 0);
+ gcc_assert (CONST_INT_P (addr));
+ }
+ /* Fall through. */
+ case LABEL_REF:
+ case SYMBOL_REF:
+ fprintf (file, "#");
+ /* Fall through. */
+ default:
+ output_addr_const (file, addr);
+ break;
+ }
+}
+
+static void
+rx_print_integer (FILE * file, HOST_WIDE_INT val)
+{
+ if (IN_RANGE (val, -64, 64))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, val);
+ else
+ fprintf (file,
+ TARGET_AS100_SYNTAX
+ ? "0%" HOST_WIDE_INT_PRINT "xH" : HOST_WIDE_INT_PRINT_HEX,
+ val);
+}
+
+static bool
+rx_assemble_integer (rtx x, unsigned int size, int is_aligned)
+{
+ const char * op = integer_asm_op (size, is_aligned);
+
+ if (! CONST_INT_P (x))
+ return default_assemble_integer (x, size, is_aligned);
+
+ if (op == NULL)
+ return false;
+ fputs (op, asm_out_file);
+
+ rx_print_integer (asm_out_file, INTVAL (x));
+ fputc ('\n', asm_out_file);
+ return true;
+}
+
+
+/* Handles the insertion of a single operand into the assembler output.
+ The %<letter> directives supported are:
+
+ %A Print an operand without a leading # character.
+ %B Print an integer comparison name.
+ %C Print a control register name.
+ %F Print a condition code flag name.
+ %G Register used for small-data-area addressing
+ %H Print high part of a DImode register, integer or address.
+ %L Print low part of a DImode register, integer or address.
+ %N Print the negation of the immediate value.
+ %P Register used for PID addressing
+ %Q If the operand is a MEM, then correctly generate
+ register indirect or register relative addressing.
+ %R Like %Q but for zero-extending loads. */
+
+static void
+rx_print_operand (FILE * file, rtx op, int letter)
+{
+ bool unsigned_load = false;
+ bool print_hash = true;
+
+ if (letter == 'A'
+ && ((GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == UNSPEC)
+ || GET_CODE (op) == UNSPEC))
+ {
+ print_hash = false;
+ letter = 0;
+ }
+
+ switch (letter)
+ {
+ case 'A':
+ /* Print an operand without a leading #. */
+ if (MEM_P (op))
+ op = XEXP (op, 0);
+
+ switch (GET_CODE (op))
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ output_addr_const (file, op);
+ break;
+ case CONST_INT:
+ fprintf (file, "%ld", (long) INTVAL (op));
+ break;
+ default:
+ rx_print_operand (file, op, 0);
+ break;
+ }
+ break;
+
+ case 'B':
+ {
+ enum rtx_code code = GET_CODE (op);
+ enum machine_mode mode = GET_MODE (XEXP (op, 0));
+ const char *ret;
+
+ if (mode == CC_Fmode)
+ {
+ /* C flag is undefined, and O flag carries unordered. None of the
+ branch combinations that include O use it helpfully. */
+ switch (code)
+ {
+ case ORDERED:
+ ret = "no";
+ break;
+ case UNORDERED:
+ ret = "o";
+ break;
+ case LT:
+ ret = "n";
+ break;
+ case GE:
+ ret = "pz";
+ break;
+ case EQ:
+ ret = "eq";
+ break;
+ case NE:
+ ret = "ne";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ unsigned int flags = flags_from_mode (mode);
+
+ switch (code)
+ {
+ case LT:
+ ret = (flags & CC_FLAG_O ? "lt" : "n");
+ break;
+ case GE:
+ ret = (flags & CC_FLAG_O ? "ge" : "pz");
+ break;
+ case GT:
+ ret = "gt";
+ break;
+ case LE:
+ ret = "le";
+ break;
+ case GEU:
+ ret = "geu";
+ break;
+ case LTU:
+ ret = "ltu";
+ break;
+ case GTU:
+ ret = "gtu";
+ break;
+ case LEU:
+ ret = "leu";
+ break;
+ case EQ:
+ ret = "eq";
+ break;
+ case NE:
+ ret = "ne";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ gcc_checking_assert ((flags_from_code (code) & ~flags) == 0);
+ }
+ fputs (ret, file);
+ break;
+ }
+
+ case 'C':
+ gcc_assert (CONST_INT_P (op));
+ switch (INTVAL (op))
+ {
+ case 0: fprintf (file, "psw"); break;
+ case 2: fprintf (file, "usp"); break;
+ case 3: fprintf (file, "fpsw"); break;
+ case 4: fprintf (file, "cpen"); break;
+ case 8: fprintf (file, "bpsw"); break;
+ case 9: fprintf (file, "bpc"); break;
+ case 0xa: fprintf (file, "isp"); break;
+ case 0xb: fprintf (file, "fintv"); break;
+ case 0xc: fprintf (file, "intb"); break;
+ default:
+ warning (0, "unrecognized control register number: %d - using 'psw'",
+ (int) INTVAL (op));
+ fprintf (file, "psw");
+ break;
+ }
+ break;
+
+ case 'F':
+ gcc_assert (CONST_INT_P (op));
+ switch (INTVAL (op))
+ {
+ case 0: case 'c': case 'C': fprintf (file, "C"); break;
+ case 1: case 'z': case 'Z': fprintf (file, "Z"); break;
+ case 2: case 's': case 'S': fprintf (file, "S"); break;
+ case 3: case 'o': case 'O': fprintf (file, "O"); break;
+ case 8: case 'i': case 'I': fprintf (file, "I"); break;
+ case 9: case 'u': case 'U': fprintf (file, "U"); break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case 'G':
+ fprintf (file, "%s", reg_names [rx_gp_base_regnum ()]);
+ break;
+
+ case 'H':
+ switch (GET_CODE (op))
+ {
+ case REG:
+ fprintf (file, "%s", reg_names [REGNO (op) + (WORDS_BIG_ENDIAN ? 0 : 1)]);
+ break;
+ case CONST_INT:
+ {
+ HOST_WIDE_INT v = INTVAL (op);
+
+ fprintf (file, "#");
+ /* Trickery to avoid problems with shifting 32 bits at a time. */
+ v = v >> 16;
+ v = v >> 16;
+ rx_print_integer (file, v);
+ break;
+ }
+ case CONST_DOUBLE:
+ fprintf (file, "#");
+ rx_print_integer (file, CONST_DOUBLE_HIGH (op));
+ break;
+ case MEM:
+ if (! WORDS_BIG_ENDIAN)
+ op = adjust_address (op, SImode, 4);
+ output_address (XEXP (op, 0));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case 'L':
+ switch (GET_CODE (op))
+ {
+ case REG:
+ fprintf (file, "%s", reg_names [REGNO (op) + (WORDS_BIG_ENDIAN ? 1 : 0)]);
+ break;
+ case CONST_INT:
+ fprintf (file, "#");
+ rx_print_integer (file, INTVAL (op) & 0xffffffff);
+ break;
+ case CONST_DOUBLE:
+ fprintf (file, "#");
+ rx_print_integer (file, CONST_DOUBLE_LOW (op));
+ break;
+ case MEM:
+ if (WORDS_BIG_ENDIAN)
+ op = adjust_address (op, SImode, 4);
+ output_address (XEXP (op, 0));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case 'N':
+ gcc_assert (CONST_INT_P (op));
+ fprintf (file, "#");
+ rx_print_integer (file, - INTVAL (op));
+ break;
+
+ case 'P':
+ fprintf (file, "%s", reg_names [rx_pid_base_regnum ()]);
+ break;
+
+ case 'R':
+ gcc_assert (GET_MODE_SIZE (GET_MODE (op)) < 4);
+ unsigned_load = true;
+ /* Fall through. */
+ case 'Q':
+ if (MEM_P (op))
+ {
+ HOST_WIDE_INT offset;
+ rtx mem = op;
+
+ op = XEXP (op, 0);
+
+ if (REG_P (op))
+ offset = 0;
+ else if (GET_CODE (op) == PLUS)
+ {
+ rtx displacement;
+
+ if (REG_P (XEXP (op, 0)))
+ {
+ displacement = XEXP (op, 1);
+ op = XEXP (op, 0);
+ }
+ else
+ {
+ displacement = XEXP (op, 0);
+ op = XEXP (op, 1);
+ gcc_assert (REG_P (op));
+ }
+
+ gcc_assert (CONST_INT_P (displacement));
+ offset = INTVAL (displacement);
+ gcc_assert (offset >= 0);
+
+ fprintf (file, "%ld", offset);
+ }
+ else
+ gcc_unreachable ();
+
+ fprintf (file, "[");
+ rx_print_operand (file, op, 0);
+ fprintf (file, "].");
+
+ switch (GET_MODE_SIZE (GET_MODE (mem)))
+ {
+ case 1:
+ gcc_assert (offset <= 65535 * 1);
+ fprintf (file, unsigned_load ? "UB" : "B");
+ break;
+ case 2:
+ gcc_assert (offset % 2 == 0);
+ gcc_assert (offset <= 65535 * 2);
+ fprintf (file, unsigned_load ? "UW" : "W");
+ break;
+ case 4:
+ gcc_assert (offset % 4 == 0);
+ gcc_assert (offset <= 65535 * 4);
+ fprintf (file, "L");
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ }
+
+ /* Fall through. */
+
+ default:
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == UNSPEC)
+ op = XEXP (op, 0);
+ else if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == UNSPEC
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
+ {
+ if (print_hash)
+ fprintf (file, "#");
+ fprintf (file, "(");
+ rx_print_operand (file, XEXP (XEXP (op, 0), 0), 'A');
+ fprintf (file, " + ");
+ output_addr_const (file, XEXP (XEXP (op, 0), 1));
+ fprintf (file, ")");
+ return;
+ }
+
+ switch (GET_CODE (op))
+ {
+ case MULT:
+ /* Should be the scaled part of an
+ indexed register indirect address. */
+ {
+ rtx base = XEXP (op, 0);
+ rtx index = XEXP (op, 1);
+
+ /* Check for a swaped index register and scaling factor.
+ Not sure if this can happen, but be prepared to handle it. */
+ if (CONST_INT_P (base) && REG_P (index))
+ {
+ rtx tmp = base;
+ base = index;
+ index = tmp;
+ }
+
+ gcc_assert (REG_P (base));
+ gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
+ gcc_assert (CONST_INT_P (index));
+ /* Do not try to verify the value of the scalar as it is based
+ on the mode of the MEM not the mode of the MULT. (Which
+ will always be SImode). */
+ fprintf (file, "%s", reg_names [REGNO (base)]);
+ break;
+ }
+
+ case MEM:
+ output_address (XEXP (op, 0));
+ break;
+
+ case PLUS:
+ output_address (op);
+ break;
+
+ case REG:
+ gcc_assert (REGNO (op) < FIRST_PSEUDO_REGISTER);
+ fprintf (file, "%s", reg_names [REGNO (op)]);
+ break;
+
+ case SUBREG:
+ gcc_assert (subreg_regno (op) < FIRST_PSEUDO_REGISTER);
+ fprintf (file, "%s", reg_names [subreg_regno (op)]);
+ break;
+
+ /* This will only be single precision.... */
+ case CONST_DOUBLE:
+ {
+ unsigned long val;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, val);
+ if (print_hash)
+ fprintf (file, "#");
+ fprintf (file, TARGET_AS100_SYNTAX ? "0%lxH" : "0x%lx", val);
+ break;
+ }
+
+ case CONST_INT:
+ if (print_hash)
+ fprintf (file, "#");
+ rx_print_integer (file, INTVAL (op));
+ break;
+
+ case UNSPEC:
+ switch (XINT (op, 1))
+ {
+ case UNSPEC_PID_ADDR:
+ {
+ rtx sym, add;
+
+ if (print_hash)
+ fprintf (file, "#");
+ sym = XVECEXP (op, 0, 0);
+ add = NULL_RTX;
+ fprintf (file, "(");
+ if (GET_CODE (sym) == PLUS)
+ {
+ add = XEXP (sym, 1);
+ sym = XEXP (sym, 0);
+ }
+ output_addr_const (file, sym);
+ if (add != NULL_RTX)
+ {
+ fprintf (file, "+");
+ output_addr_const (file, add);
+ }
+ fprintf (file, "-__pid_base");
+ fprintf (file, ")");
+ return;
+ }
+ }
+ /* Fall through */
+
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CODE_LABEL:
+ rx_print_operand_address (file, op);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ }
+}
+
+/* Maybe convert an operand into its PID format. */
+
+rtx
+rx_maybe_pidify_operand (rtx op, int copy_to_reg)
+{
+ if (rx_pid_data_operand (op) == PID_UNENCODED)
+ {
+ if (GET_CODE (op) == MEM)
+ {
+ rtx a = gen_pid_addr (gen_rtx_REG (SImode, rx_pid_base_regnum ()), XEXP (op, 0));
+ op = replace_equiv_address (op, a);
+ }
+ else
+ {
+ op = gen_pid_addr (gen_rtx_REG (SImode, rx_pid_base_regnum ()), op);
+ }
+
+ if (copy_to_reg)
+ op = copy_to_mode_reg (GET_MODE (op), op);
+ }
+ return op;
+}
+
+/* Returns an assembler template for a move instruction. */
+
+char *
+rx_gen_move_template (rtx * operands, bool is_movu)
+{
+ static char out_template [64];
+ const char * extension = TARGET_AS100_SYNTAX ? ".L" : "";
+ const char * src_template;
+ const char * dst_template;
+ rtx dest = operands[0];
+ rtx src = operands[1];
+
+ /* Decide which extension, if any, should be given to the move instruction. */
+ switch (CONST_INT_P (src) ? GET_MODE (dest) : GET_MODE (src))
+ {
+ case QImode:
+ /* The .B extension is not valid when
+ loading an immediate into a register. */
+ if (! REG_P (dest) || ! CONST_INT_P (src))
+ extension = ".B";
+ break;
+ case HImode:
+ if (! REG_P (dest) || ! CONST_INT_P (src))
+ /* The .W extension is not valid when
+ loading an immediate into a register. */
+ extension = ".W";
+ break;
+ case DFmode:
+ case DImode:
+ case SFmode:
+ case SImode:
+ extension = ".L";
+ break;
+ case VOIDmode:
+ /* This mode is used by constants. */
+ break;
+ default:
+ debug_rtx (src);
+ gcc_unreachable ();
+ }
+
+ if (MEM_P (src) && rx_pid_data_operand (XEXP (src, 0)) == PID_UNENCODED)
+ {
+ gcc_assert (GET_MODE (src) != DImode);
+ gcc_assert (GET_MODE (src) != DFmode);
+
+ src_template = "(%A1 - __pid_base)[%P1]";
+ }
+ else if (MEM_P (src) && rx_small_data_operand (XEXP (src, 0)))
+ {
+ gcc_assert (GET_MODE (src) != DImode);
+ gcc_assert (GET_MODE (src) != DFmode);
+
+ src_template = "%%gp(%A1)[%G1]";
+ }
+ else
+ src_template = "%1";
+
+ if (MEM_P (dest) && rx_small_data_operand (XEXP (dest, 0)))
+ {
+ gcc_assert (GET_MODE (dest) != DImode);
+ gcc_assert (GET_MODE (dest) != DFmode);
+
+ dst_template = "%%gp(%A0)[%G0]";
+ }
+ else
+ dst_template = "%0";
+
+ if (GET_MODE (dest) == DImode || GET_MODE (dest) == DFmode)
+ {
+ gcc_assert (! is_movu);
+
+ if (REG_P (src) && REG_P (dest) && (REGNO (dest) == REGNO (src) + 1))
+ sprintf (out_template, "mov.L\t%%H1, %%H0 ! mov.L\t%%1, %%0");
+ else
+ sprintf (out_template, "mov.L\t%%1, %%0 ! mov.L\t%%H1, %%H0");
+ }
+ else
+ sprintf (out_template, "%s%s\t%s, %s", is_movu ? "movu" : "mov",
+ extension, src_template, dst_template);
+ return out_template;
+}
+
+/* Return VALUE rounded up to the next ALIGNMENT boundary. */
+
+static inline unsigned int
+rx_round_up (unsigned int value, unsigned int alignment)
+{
+ alignment -= 1;
+ return (value + alignment) & (~ alignment);
+}
+
+/* Return the number of bytes in the argument registers
+ occupied by an argument of type TYPE and mode MODE. */
+
+static unsigned int
+rx_function_arg_size (enum machine_mode mode, const_tree type)
+{
+ unsigned int num_bytes;
+
+ num_bytes = (mode == BLKmode)
+ ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ return rx_round_up (num_bytes, UNITS_PER_WORD);
+}
+
+#define NUM_ARG_REGS 4
+#define MAX_NUM_ARG_BYTES (NUM_ARG_REGS * UNITS_PER_WORD)
+
+/* Return an RTL expression describing the register holding a function
+ parameter of mode MODE and type TYPE or NULL_RTX if the parameter should
+ be passed on the stack. CUM describes the previous parameters to the
+ function and NAMED is false if the parameter is part of a variable
+ parameter list, or the last named parameter before the start of a
+ variable parameter list. */
+
+static rtx
+rx_function_arg (cumulative_args_t cum, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ unsigned int next_reg;
+ unsigned int bytes_so_far = *get_cumulative_args (cum);
+ unsigned int size;
+ unsigned int rounded_size;
+
+ /* An exploded version of rx_function_arg_size. */
+ size = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ /* If the size is not known it cannot be passed in registers. */
+ if (size < 1)
+ return NULL_RTX;
+
+ rounded_size = rx_round_up (size, UNITS_PER_WORD);
+
+ /* Don't pass this arg via registers if there
+ are insufficient registers to hold all of it. */
+ if (rounded_size + bytes_so_far > MAX_NUM_ARG_BYTES)
+ return NULL_RTX;
+
+ /* Unnamed arguments and the last named argument in a
+ variadic function are always passed on the stack. */
+ if (!named)
+ return NULL_RTX;
+
+ /* Structures must occupy an exact number of registers,
+ otherwise they are passed on the stack. */
+ if ((type == NULL || AGGREGATE_TYPE_P (type))
+ && (size % UNITS_PER_WORD) != 0)
+ return NULL_RTX;
+
+ next_reg = (bytes_so_far / UNITS_PER_WORD) + 1;
+
+ return gen_rtx_REG (mode, next_reg);
+}
+
+static void
+rx_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ *get_cumulative_args (cum) += rx_function_arg_size (mode, type);
+}
+
+static unsigned int
+rx_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED)
+{
+ /* Older versions of the RX backend aligned all on-stack arguments
+ to 32-bits. The RX C ABI however says that they should be
+ aligned to their natural alignment. (See section 5.2.2 of the ABI). */
+ if (TARGET_GCC_ABI)
+ return STACK_BOUNDARY;
+
+ if (type)
+ {
+ if (DECL_P (type))
+ return DECL_ALIGN (type);
+ return TYPE_ALIGN (type);
+ }
+
+ return PARM_BOUNDARY;
+}
+
+/* Return an RTL describing where a function return value of type RET_TYPE
+ is held. */
+
+static rtx
+rx_function_value (const_tree ret_type,
+ const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode = TYPE_MODE (ret_type);
+
+ /* RX ABI specifies that small integer types are
+ promoted to int when returned by a function. */
+ if (GET_MODE_SIZE (mode) > 0
+ && GET_MODE_SIZE (mode) < 4
+ && ! COMPLEX_MODE_P (mode)
+ )
+ return gen_rtx_REG (SImode, FUNC_RETURN_REGNUM);
+
+ return gen_rtx_REG (mode, FUNC_RETURN_REGNUM);
+}
+
+/* TARGET_PROMOTE_FUNCTION_MODE must behave in the same way with
+ regard to function returns as does TARGET_FUNCTION_VALUE. */
+
+static enum machine_mode
+rx_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
+ enum machine_mode mode,
+ int * punsignedp ATTRIBUTE_UNUSED,
+ const_tree funtype ATTRIBUTE_UNUSED,
+ int for_return)
+{
+ if (for_return != 1
+ || GET_MODE_SIZE (mode) >= 4
+ || COMPLEX_MODE_P (mode)
+ || GET_MODE_SIZE (mode) < 1)
+ return mode;
+
+ return SImode;
+}
+
+static bool
+rx_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT size;
+
+ if (TYPE_MODE (type) != BLKmode
+ && ! AGGREGATE_TYPE_P (type))
+ return false;
+
+ size = int_size_in_bytes (type);
+ /* Large structs and those whose size is not an
+ exact multiple of 4 are returned in memory. */
+ return size < 1
+ || size > 16
+ || (size % UNITS_PER_WORD) != 0;
+}
+
+static rtx
+rx_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
+ int incoming ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (Pmode, STRUCT_VAL_REGNUM);
+}
+
+static bool
+rx_return_in_msb (const_tree valtype)
+{
+ return TARGET_BIG_ENDIAN_DATA
+ && (AGGREGATE_TYPE_P (valtype) || TREE_CODE (valtype) == COMPLEX_TYPE);
+}
+
+/* Returns true if the provided function has the specified attribute. */
+
+static inline bool
+has_func_attr (const_tree decl, const char * func_attr)
+{
+ if (decl == NULL_TREE)
+ decl = current_function_decl;
+
+ return lookup_attribute (func_attr, DECL_ATTRIBUTES (decl)) != NULL_TREE;
+}
+
+/* Returns true if the provided function has the "fast_interrupt" attribute. */
+
+static inline bool
+is_fast_interrupt_func (const_tree decl)
+{
+ return has_func_attr (decl, "fast_interrupt");
+}
+
+/* Returns true if the provided function has the "interrupt" attribute. */
+
+static inline bool
+is_interrupt_func (const_tree decl)
+{
+ return has_func_attr (decl, "interrupt");
+}
+
+/* Returns true if the provided function has the "naked" attribute. */
+
+static inline bool
+is_naked_func (const_tree decl)
+{
+ return has_func_attr (decl, "naked");
+}
+
+static bool use_fixed_regs = false;
+
+static void
+rx_conditional_register_usage (void)
+{
+ static bool using_fixed_regs = false;
+
+ if (TARGET_PID)
+ {
+ rx_pid_base_regnum_val = GP_BASE_REGNUM - rx_num_interrupt_regs;
+ fixed_regs[rx_pid_base_regnum_val] = call_used_regs [rx_pid_base_regnum_val] = 1;
+ }
+
+ if (rx_small_data_limit > 0)
+ {
+ if (TARGET_PID)
+ rx_gp_base_regnum_val = rx_pid_base_regnum_val - 1;
+ else
+ rx_gp_base_regnum_val = GP_BASE_REGNUM - rx_num_interrupt_regs;
+
+ fixed_regs[rx_gp_base_regnum_val] = call_used_regs [rx_gp_base_regnum_val] = 1;
+ }
+
+ if (use_fixed_regs != using_fixed_regs)
+ {
+ static char saved_fixed_regs[FIRST_PSEUDO_REGISTER];
+ static char saved_call_used_regs[FIRST_PSEUDO_REGISTER];
+
+ if (use_fixed_regs)
+ {
+ unsigned int r;
+
+ memcpy (saved_fixed_regs, fixed_regs, sizeof fixed_regs);
+ memcpy (saved_call_used_regs, call_used_regs, sizeof call_used_regs);
+
+ /* This is for fast interrupt handlers. Any register in
+ the range r10 to r13 (inclusive) that is currently
+ marked as fixed is now a viable, call-used register. */
+ for (r = 10; r <= 13; r++)
+ if (fixed_regs[r])
+ {
+ fixed_regs[r] = 0;
+ call_used_regs[r] = 1;
+ }
+
+ /* Mark r7 as fixed. This is just a hack to avoid
+ altering the reg_alloc_order array so that the newly
+ freed r10-r13 registers are the preferred registers. */
+ fixed_regs[7] = call_used_regs[7] = 1;
+ }
+ else
+ {
+ /* Restore the normal register masks. */
+ memcpy (fixed_regs, saved_fixed_regs, sizeof fixed_regs);
+ memcpy (call_used_regs, saved_call_used_regs, sizeof call_used_regs);
+ }
+
+ using_fixed_regs = use_fixed_regs;
+ }
+}
+
+struct decl_chain
+{
+ tree fndecl;
+ struct decl_chain * next;
+};
+
+/* Stack of decls for which we have issued warnings. */
+static struct decl_chain * warned_decls = NULL;
+
+static void
+add_warned_decl (tree fndecl)
+{
+ struct decl_chain * warned = (struct decl_chain *) xmalloc (sizeof * warned);
+
+ warned->fndecl = fndecl;
+ warned->next = warned_decls;
+ warned_decls = warned;
+}
+
+/* Returns TRUE if FNDECL is on our list of warned about decls. */
+
+static bool
+already_warned (tree fndecl)
+{
+ struct decl_chain * warned;
+
+ for (warned = warned_decls;
+ warned != NULL;
+ warned = warned->next)
+ if (warned->fndecl == fndecl)
+ return true;
+
+ return false;
+}
+
+/* Perform any actions necessary before starting to compile FNDECL.
+ For the RX we use this to make sure that we have the correct
+ set of register masks selected. If FNDECL is NULL then we are
+ compiling top level things. */
+
+static void
+rx_set_current_function (tree fndecl)
+{
+ /* Remember the last target of rx_set_current_function. */
+ static tree rx_previous_fndecl;
+ bool prev_was_fast_interrupt;
+ bool current_is_fast_interrupt;
+
+ /* Only change the context if the function changes. This hook is called
+ several times in the course of compiling a function, and we don't want
+ to slow things down too much or call target_reinit when it isn't safe. */
+ if (fndecl == rx_previous_fndecl)
+ return;
+
+ prev_was_fast_interrupt
+ = rx_previous_fndecl
+ ? is_fast_interrupt_func (rx_previous_fndecl) : false;
+
+ current_is_fast_interrupt
+ = fndecl ? is_fast_interrupt_func (fndecl) : false;
+
+ if (prev_was_fast_interrupt != current_is_fast_interrupt)
+ {
+ use_fixed_regs = current_is_fast_interrupt;
+ target_reinit ();
+ }
+
+ if (current_is_fast_interrupt && rx_warn_multiple_fast_interrupts)
+ {
+ /* We do not warn about the first fast interrupt routine that
+ we see. Instead we just push it onto the stack. */
+ if (warned_decls == NULL)
+ add_warned_decl (fndecl);
+
+ /* Otherwise if this fast interrupt is one for which we have
+ not already issued a warning, generate one and then push
+ it onto the stack as well. */
+ else if (! already_warned (fndecl))
+ {
+ warning (0, "multiple fast interrupt routines seen: %qE and %qE",
+ fndecl, warned_decls->fndecl);
+ add_warned_decl (fndecl);
+ }
+ }
+
+ rx_previous_fndecl = fndecl;
+}
+
+/* Typical stack layout should looks like this after the function's prologue:
+
+ | |
+ -- ^
+ | | \ |
+ | | arguments saved | Increasing
+ | | on the stack | addresses
+ PARENT arg pointer -> | | /
+ -------------------------- ---- -------------------
+ CHILD |ret | return address
+ --
+ | | \
+ | | call saved
+ | | registers
+ | | /
+ --
+ | | \
+ | | local
+ | | variables
+ frame pointer -> | | /
+ --
+ | | \
+ | | outgoing | Decreasing
+ | | arguments | addresses
+ current stack pointer -> | | / |
+ -------------------------- ---- ------------------ V
+ | | */
+
+static unsigned int
+bit_count (unsigned int x)
+{
+ const unsigned int m1 = 0x55555555;
+ const unsigned int m2 = 0x33333333;
+ const unsigned int m4 = 0x0f0f0f0f;
+
+ x -= (x >> 1) & m1;
+ x = (x & m2) + ((x >> 2) & m2);
+ x = (x + (x >> 4)) & m4;
+ x += x >> 8;
+
+ return (x + (x >> 16)) & 0x3f;
+}
+
+#define MUST_SAVE_ACC_REGISTER \
+ (TARGET_SAVE_ACC_REGISTER \
+ && (is_interrupt_func (NULL_TREE) \
+ || is_fast_interrupt_func (NULL_TREE)))
+
+/* Returns either the lowest numbered and highest numbered registers that
+ occupy the call-saved area of the stack frame, if the registers are
+ stored as a contiguous block, or else a bitmask of the individual
+ registers if they are stored piecemeal.
+
+ Also computes the size of the frame and the size of the outgoing
+ arguments block (in bytes). */
+
+static void
+rx_get_stack_layout (unsigned int * lowest,
+ unsigned int * highest,
+ unsigned int * register_mask,
+ unsigned int * frame_size,
+ unsigned int * stack_size)
+{
+ unsigned int reg;
+ unsigned int low;
+ unsigned int high;
+ unsigned int fixed_reg = 0;
+ unsigned int save_mask;
+ unsigned int pushed_mask;
+ unsigned int unneeded_pushes;
+
+ if (is_naked_func (NULL_TREE))
+ {
+ /* Naked functions do not create their own stack frame.
+ Instead the programmer must do that for us. */
+ * lowest = 0;
+ * highest = 0;
+ * register_mask = 0;
+ * frame_size = 0;
+ * stack_size = 0;
+ return;
+ }
+
+ for (save_mask = high = low = 0, reg = 1; reg < CC_REGNUM; reg++)
+ {
+ if ((df_regs_ever_live_p (reg)
+ /* Always save all call clobbered registers inside non-leaf
+ interrupt handlers, even if they are not live - they may
+ be used in (non-interrupt aware) routines called from this one. */
+ || (call_used_regs[reg]
+ && is_interrupt_func (NULL_TREE)
+ && ! crtl->is_leaf))
+ && (! call_used_regs[reg]
+ /* Even call clobbered registered must
+ be pushed inside interrupt handlers. */
+ || is_interrupt_func (NULL_TREE)
+ /* Likewise for fast interrupt handlers, except registers r10 -
+ r13. These are normally call-saved, but may have been set
+ to call-used by rx_conditional_register_usage. If so then
+ they can be used in the fast interrupt handler without
+ saving them on the stack. */
+ || (is_fast_interrupt_func (NULL_TREE)
+ && ! IN_RANGE (reg, 10, 13))))
+ {
+ if (low == 0)
+ low = reg;
+ high = reg;
+
+ save_mask |= 1 << reg;
+ }
+
+ /* Remember if we see a fixed register
+ after having found the low register. */
+ if (low != 0 && fixed_reg == 0 && fixed_regs [reg])
+ fixed_reg = reg;
+ }
+
+ /* If we have to save the accumulator register, make sure
+ that at least two registers are pushed into the frame. */
+ if (MUST_SAVE_ACC_REGISTER
+ && bit_count (save_mask) < 2)
+ {
+ save_mask |= (1 << 13) | (1 << 14);
+ if (low == 0)
+ low = 13;
+ if (high == 0 || low == high)
+ high = low + 1;
+ }
+
+ /* Decide if it would be faster fill in the call-saved area of the stack
+ frame using multiple PUSH instructions instead of a single PUSHM
+ instruction.
+
+ SAVE_MASK is a bitmask of the registers that must be stored in the
+ call-save area. PUSHED_MASK is a bitmask of the registers that would
+ be pushed into the area if we used a PUSHM instruction. UNNEEDED_PUSHES
+ is a bitmask of those registers in pushed_mask that are not in
+ save_mask.
+
+ We use a simple heuristic that says that it is better to use
+ multiple PUSH instructions if the number of unnecessary pushes is
+ greater than the number of necessary pushes.
+
+ We also use multiple PUSH instructions if there are any fixed registers
+ between LOW and HIGH. The only way that this can happen is if the user
+ has specified --fixed-<reg-name> on the command line and in such
+ circumstances we do not want to touch the fixed registers at all.
+
+ FIXME: Is it worth improving this heuristic ? */
+ pushed_mask = (-1 << low) & ~(-1 << (high + 1));
+ unneeded_pushes = (pushed_mask & (~ save_mask)) & pushed_mask;
+
+ if ((fixed_reg && fixed_reg <= high)
+ || (optimize_function_for_speed_p (cfun)
+ && bit_count (save_mask) < bit_count (unneeded_pushes)))
+ {
+ /* Use multiple pushes. */
+ * lowest = 0;
+ * highest = 0;
+ * register_mask = save_mask;
+ }
+ else
+ {
+ /* Use one push multiple instruction. */
+ * lowest = low;
+ * highest = high;
+ * register_mask = 0;
+ }
+
+ * frame_size = rx_round_up
+ (get_frame_size (), STACK_BOUNDARY / BITS_PER_UNIT);
+
+ if (crtl->args.size > 0)
+ * frame_size += rx_round_up
+ (crtl->args.size, STACK_BOUNDARY / BITS_PER_UNIT);
+
+ * stack_size = rx_round_up
+ (crtl->outgoing_args_size, STACK_BOUNDARY / BITS_PER_UNIT);
+}
+
+/* Generate a PUSHM instruction that matches the given operands. */
+
+void
+rx_emit_stack_pushm (rtx * operands)
+{
+ HOST_WIDE_INT last_reg;
+ rtx first_push;
+
+ gcc_assert (CONST_INT_P (operands[0]));
+ last_reg = (INTVAL (operands[0]) / UNITS_PER_WORD) - 1;
+
+ gcc_assert (GET_CODE (operands[1]) == PARALLEL);
+ first_push = XVECEXP (operands[1], 0, 1);
+ gcc_assert (SET_P (first_push));
+ first_push = SET_SRC (first_push);
+ gcc_assert (REG_P (first_push));
+
+ asm_fprintf (asm_out_file, "\tpushm\t%s-%s\n",
+ reg_names [REGNO (first_push) - last_reg],
+ reg_names [REGNO (first_push)]);
+}
+
+/* Generate a PARALLEL that will pass the rx_store_multiple_vector predicate. */
+
+static rtx
+gen_rx_store_vector (unsigned int low, unsigned int high)
+{
+ unsigned int i;
+ unsigned int count = (high - low) + 2;
+ rtx vector;
+
+ vector = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+
+ XVECEXP (vector, 0, 0) =
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_MINUS (SImode, stack_pointer_rtx,
+ GEN_INT ((count - 1) * UNITS_PER_WORD)));
+
+ for (i = 0; i < count - 1; i++)
+ XVECEXP (vector, 0, i + 1) =
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_MEM (SImode,
+ gen_rtx_MINUS (SImode, stack_pointer_rtx,
+ GEN_INT ((i + 1) * UNITS_PER_WORD))),
+ gen_rtx_REG (SImode, high - i));
+ return vector;
+}
+
+/* Mark INSN as being frame related. If it is a PARALLEL
+ then mark each element as being frame related as well. */
+
+static void
+mark_frame_related (rtx insn)
+{
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = PATTERN (insn);
+
+ if (GET_CODE (insn) == PARALLEL)
+ {
+ unsigned int i;
+
+ for (i = 0; i < (unsigned) XVECLEN (insn, 0); i++)
+ RTX_FRAME_RELATED_P (XVECEXP (insn, 0, i)) = 1;
+ }
+}
+
+static bool
+ok_for_max_constant (HOST_WIDE_INT val)
+{
+ if (rx_max_constant_size == 0 || rx_max_constant_size == 4)
+ /* If there is no constraint on the size of constants
+ used as operands, then any value is legitimate. */
+ return true;
+
+ /* rx_max_constant_size specifies the maximum number
+ of bytes that can be used to hold a signed value. */
+ return IN_RANGE (val, (-1 << (rx_max_constant_size * 8)),
+ ( 1 << (rx_max_constant_size * 8)));
+}
+
+/* Generate an ADD of SRC plus VAL into DEST.
+ Handles the case where VAL is too big for max_constant_value.
+ Sets FRAME_RELATED_P on the insn if IS_FRAME_RELATED is true. */
+
+static void
+gen_safe_add (rtx dest, rtx src, rtx val, bool is_frame_related)
+{
+ rtx insn;
+
+ if (val == NULL_RTX || INTVAL (val) == 0)
+ {
+ gcc_assert (dest != src);
+
+ insn = emit_move_insn (dest, src);
+ }
+ else if (ok_for_max_constant (INTVAL (val)))
+ insn = emit_insn (gen_addsi3 (dest, src, val));
+ else
+ {
+ /* Wrap VAL in an UNSPEC so that rx_is_legitimate_constant
+ will not reject it. */
+ val = gen_rtx_CONST (SImode, gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_CONST));
+ insn = emit_insn (gen_addsi3 (dest, src, val));
+
+ if (is_frame_related)
+ /* We have to provide our own frame related note here
+ as the dwarf2out code cannot be expected to grok
+ our unspec. */
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (SImode, dest,
+ gen_rtx_PLUS (SImode, src, val)));
+ return;
+ }
+
+ if (is_frame_related)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ return;
+}
+
+void
+rx_expand_prologue (void)
+{
+ unsigned int stack_size;
+ unsigned int frame_size;
+ unsigned int mask;
+ unsigned int low;
+ unsigned int high;
+ unsigned int reg;
+ rtx insn;
+
+ /* Naked functions use their own, programmer provided prologues. */
+ if (is_naked_func (NULL_TREE))
+ return;
+
+ rx_get_stack_layout (& low, & high, & mask, & frame_size, & stack_size);
+
+ if (flag_stack_usage_info)
+ current_function_static_stack_size = frame_size + stack_size;
+
+ /* If we use any of the callee-saved registers, save them now. */
+ if (mask)
+ {
+ /* Push registers in reverse order. */
+ for (reg = CC_REGNUM; reg --;)
+ if (mask & (1 << reg))
+ {
+ insn = emit_insn (gen_stack_push (gen_rtx_REG (SImode, reg)));
+ mark_frame_related (insn);
+ }
+ }
+ else if (low)
+ {
+ if (high == low)
+ insn = emit_insn (gen_stack_push (gen_rtx_REG (SImode, low)));
+ else
+ insn = emit_insn (gen_stack_pushm (GEN_INT (((high - low) + 1)
+ * UNITS_PER_WORD),
+ gen_rx_store_vector (low, high)));
+ mark_frame_related (insn);
+ }
+
+ if (MUST_SAVE_ACC_REGISTER)
+ {
+ unsigned int acc_high, acc_low;
+
+ /* Interrupt handlers have to preserve the accumulator
+ register if so requested by the user. Use the first
+ two pushed registers as intermediaries. */
+ if (mask)
+ {
+ acc_low = acc_high = 0;
+
+ for (reg = 1; reg < CC_REGNUM; reg ++)
+ if (mask & (1 << reg))
+ {
+ if (acc_low == 0)
+ acc_low = reg;
+ else
+ {
+ acc_high = reg;
+ break;
+ }
+ }
+
+ /* We have assumed that there are at least two registers pushed... */
+ gcc_assert (acc_high != 0);
+
+ /* Note - the bottom 16 bits of the accumulator are inaccessible.
+ We just assume that they are zero. */
+ emit_insn (gen_mvfacmi (gen_rtx_REG (SImode, acc_low)));
+ emit_insn (gen_mvfachi (gen_rtx_REG (SImode, acc_high)));
+ emit_insn (gen_stack_push (gen_rtx_REG (SImode, acc_low)));
+ emit_insn (gen_stack_push (gen_rtx_REG (SImode, acc_high)));
+ }
+ else
+ {
+ acc_low = low;
+ acc_high = low + 1;
+
+ /* We have assumed that there are at least two registers pushed... */
+ gcc_assert (acc_high <= high);
+
+ emit_insn (gen_mvfacmi (gen_rtx_REG (SImode, acc_low)));
+ emit_insn (gen_mvfachi (gen_rtx_REG (SImode, acc_high)));
+ emit_insn (gen_stack_pushm (GEN_INT (2 * UNITS_PER_WORD),
+ gen_rx_store_vector (acc_low, acc_high)));
+ }
+ }
+
+ /* If needed, set up the frame pointer. */
+ if (frame_pointer_needed)
+ gen_safe_add (frame_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (- (HOST_WIDE_INT) frame_size), true);
+
+ /* Allocate space for the outgoing args.
+ If the stack frame has not already been set up then handle this as well. */
+ if (stack_size)
+ {
+ if (frame_size)
+ {
+ if (frame_pointer_needed)
+ gen_safe_add (stack_pointer_rtx, frame_pointer_rtx,
+ GEN_INT (- (HOST_WIDE_INT) stack_size), true);
+ else
+ gen_safe_add (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (- (HOST_WIDE_INT) (frame_size + stack_size)),
+ true);
+ }
+ else
+ gen_safe_add (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (- (HOST_WIDE_INT) stack_size), true);
+ }
+ else if (frame_size)
+ {
+ if (! frame_pointer_needed)
+ gen_safe_add (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (- (HOST_WIDE_INT) frame_size), true);
+ else
+ gen_safe_add (stack_pointer_rtx, frame_pointer_rtx, NULL_RTX,
+ true);
+ }
+}
+
+static void
+rx_output_function_prologue (FILE * file,
+ HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
+{
+ if (is_fast_interrupt_func (NULL_TREE))
+ asm_fprintf (file, "\t; Note: Fast Interrupt Handler\n");
+
+ if (is_interrupt_func (NULL_TREE))
+ asm_fprintf (file, "\t; Note: Interrupt Handler\n");
+
+ if (is_naked_func (NULL_TREE))
+ asm_fprintf (file, "\t; Note: Naked Function\n");
+
+ if (cfun->static_chain_decl != NULL)
+ asm_fprintf (file, "\t; Note: Nested function declared "
+ "inside another function.\n");
+
+ if (crtl->calls_eh_return)
+ asm_fprintf (file, "\t; Note: Calls __builtin_eh_return.\n");
+}
+
+/* Generate a POPM or RTSD instruction that matches the given operands. */
+
+void
+rx_emit_stack_popm (rtx * operands, bool is_popm)
+{
+ HOST_WIDE_INT stack_adjust;
+ HOST_WIDE_INT last_reg;
+ rtx first_push;
+
+ gcc_assert (CONST_INT_P (operands[0]));
+ stack_adjust = INTVAL (operands[0]);
+
+ gcc_assert (GET_CODE (operands[1]) == PARALLEL);
+ last_reg = XVECLEN (operands[1], 0) - (is_popm ? 2 : 3);
+
+ first_push = XVECEXP (operands[1], 0, 1);
+ gcc_assert (SET_P (first_push));
+ first_push = SET_DEST (first_push);
+ gcc_assert (REG_P (first_push));
+
+ if (is_popm)
+ asm_fprintf (asm_out_file, "\tpopm\t%s-%s\n",
+ reg_names [REGNO (first_push)],
+ reg_names [REGNO (first_push) + last_reg]);
+ else
+ asm_fprintf (asm_out_file, "\trtsd\t#%d, %s-%s\n",
+ (int) stack_adjust,
+ reg_names [REGNO (first_push)],
+ reg_names [REGNO (first_push) + last_reg]);
+}
+
+/* Generate a PARALLEL which will satisfy the rx_rtsd_vector predicate. */
+
+static rtx
+gen_rx_rtsd_vector (unsigned int adjust, unsigned int low, unsigned int high)
+{
+ unsigned int i;
+ unsigned int bias = 3;
+ unsigned int count = (high - low) + bias;
+ rtx vector;
+
+ vector = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+
+ XVECEXP (vector, 0, 0) =
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx, adjust));
+
+ for (i = 0; i < count - 2; i++)
+ XVECEXP (vector, 0, i + 1) =
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (SImode, low + i),
+ gen_rtx_MEM (SImode,
+ i == 0 ? stack_pointer_rtx
+ : plus_constant (Pmode, stack_pointer_rtx,
+ i * UNITS_PER_WORD)));
+
+ XVECEXP (vector, 0, count - 1) = ret_rtx;
+
+ return vector;
+}
+
+/* Generate a PARALLEL which will satisfy the rx_load_multiple_vector predicate. */
+
+static rtx
+gen_rx_popm_vector (unsigned int low, unsigned int high)
+{
+ unsigned int i;
+ unsigned int count = (high - low) + 2;
+ rtx vector;
+
+ vector = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+
+ XVECEXP (vector, 0, 0) =
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ (count - 1) * UNITS_PER_WORD));
+
+ for (i = 0; i < count - 1; i++)
+ XVECEXP (vector, 0, i + 1) =
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (SImode, low + i),
+ gen_rtx_MEM (SImode,
+ i == 0 ? stack_pointer_rtx
+ : plus_constant (Pmode, stack_pointer_rtx,
+ i * UNITS_PER_WORD)));
+
+ return vector;
+}
+
+/* Returns true if a simple return insn can be used. */
+
+bool
+rx_can_use_simple_return (void)
+{
+ unsigned int low;
+ unsigned int high;
+ unsigned int frame_size;
+ unsigned int stack_size;
+ unsigned int register_mask;
+
+ if (is_naked_func (NULL_TREE)
+ || is_fast_interrupt_func (NULL_TREE)
+ || is_interrupt_func (NULL_TREE))
+ return false;
+
+ rx_get_stack_layout (& low, & high, & register_mask,
+ & frame_size, & stack_size);
+
+ return (register_mask == 0
+ && (frame_size + stack_size) == 0
+ && low == 0);
+}
+
+void
+rx_expand_epilogue (bool is_sibcall)
+{
+ unsigned int low;
+ unsigned int high;
+ unsigned int frame_size;
+ unsigned int stack_size;
+ unsigned int register_mask;
+ unsigned int regs_size;
+ unsigned int reg;
+ unsigned HOST_WIDE_INT total_size;
+
+ /* FIXME: We do not support indirect sibcalls at the moment becaause we
+ cannot guarantee that the register holding the function address is a
+ call-used register. If it is a call-saved register then the stack
+ pop instructions generated in the epilogue will corrupt the address
+ before it is used.
+
+ Creating a new call-used-only register class works but then the
+ reload pass gets stuck because it cannot always find a call-used
+ register for spilling sibcalls.
+
+ The other possible solution is for this pass to scan forward for the
+ sibcall instruction (if it has been generated) and work out if it
+ is an indirect sibcall using a call-saved register. If it is then
+ the address can copied into a call-used register in this epilogue
+ code and the sibcall instruction modified to use that register. */
+
+ if (is_naked_func (NULL_TREE))
+ {
+ gcc_assert (! is_sibcall);
+
+ /* Naked functions use their own, programmer provided epilogues.
+ But, in order to keep gcc happy we have to generate some kind of
+ epilogue RTL. */
+ emit_jump_insn (gen_naked_return ());
+ return;
+ }
+
+ rx_get_stack_layout (& low, & high, & register_mask,
+ & frame_size, & stack_size);
+
+ total_size = frame_size + stack_size;
+ regs_size = ((high - low) + 1) * UNITS_PER_WORD;
+
+ /* See if we are unable to use the special stack frame deconstruct and
+ return instructions. In most cases we can use them, but the exceptions
+ are:
+
+ - Sibling calling functions deconstruct the frame but do not return to
+ their caller. Instead they branch to their sibling and allow their
+ return instruction to return to this function's parent.
+
+ - Fast and normal interrupt handling functions have to use special
+ return instructions.
+
+ - Functions where we have pushed a fragmented set of registers into the
+ call-save area must have the same set of registers popped. */
+ if (is_sibcall
+ || is_fast_interrupt_func (NULL_TREE)
+ || is_interrupt_func (NULL_TREE)
+ || register_mask)
+ {
+ /* Cannot use the special instructions - deconstruct by hand. */
+ if (total_size)
+ gen_safe_add (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (total_size), false);
+
+ if (MUST_SAVE_ACC_REGISTER)
+ {
+ unsigned int acc_low, acc_high;
+
+ /* Reverse the saving of the accumulator register onto the stack.
+ Note we must adjust the saved "low" accumulator value as it
+ is really the middle 32-bits of the accumulator. */
+ if (register_mask)
+ {
+ acc_low = acc_high = 0;
+
+ for (reg = 1; reg < CC_REGNUM; reg ++)
+ if (register_mask & (1 << reg))
+ {
+ if (acc_low == 0)
+ acc_low = reg;
+ else
+ {
+ acc_high = reg;
+ break;
+ }
+ }
+ emit_insn (gen_stack_pop (gen_rtx_REG (SImode, acc_high)));
+ emit_insn (gen_stack_pop (gen_rtx_REG (SImode, acc_low)));
+ }
+ else
+ {
+ acc_low = low;
+ acc_high = low + 1;
+ emit_insn (gen_stack_popm (GEN_INT (2 * UNITS_PER_WORD),
+ gen_rx_popm_vector (acc_low, acc_high)));
+ }
+
+ emit_insn (gen_ashlsi3 (gen_rtx_REG (SImode, acc_low),
+ gen_rtx_REG (SImode, acc_low),
+ GEN_INT (16)));
+ emit_insn (gen_mvtaclo (gen_rtx_REG (SImode, acc_low)));
+ emit_insn (gen_mvtachi (gen_rtx_REG (SImode, acc_high)));
+ }
+
+ if (register_mask)
+ {
+ for (reg = 0; reg < CC_REGNUM; reg ++)
+ if (register_mask & (1 << reg))
+ emit_insn (gen_stack_pop (gen_rtx_REG (SImode, reg)));
+ }
+ else if (low)
+ {
+ if (high == low)
+ emit_insn (gen_stack_pop (gen_rtx_REG (SImode, low)));
+ else
+ emit_insn (gen_stack_popm (GEN_INT (regs_size),
+ gen_rx_popm_vector (low, high)));
+ }
+
+ if (is_fast_interrupt_func (NULL_TREE))
+ {
+ gcc_assert (! is_sibcall);
+ emit_jump_insn (gen_fast_interrupt_return ());
+ }
+ else if (is_interrupt_func (NULL_TREE))
+ {
+ gcc_assert (! is_sibcall);
+ emit_jump_insn (gen_exception_return ());
+ }
+ else if (! is_sibcall)
+ emit_jump_insn (gen_simple_return ());
+
+ return;
+ }
+
+ /* If we allocated space on the stack, free it now. */
+ if (total_size)
+ {
+ unsigned HOST_WIDE_INT rtsd_size;
+
+ /* See if we can use the RTSD instruction. */
+ rtsd_size = total_size + regs_size;
+ if (rtsd_size < 1024 && (rtsd_size % 4) == 0)
+ {
+ if (low)
+ emit_jump_insn (gen_pop_and_return
+ (GEN_INT (rtsd_size),
+ gen_rx_rtsd_vector (rtsd_size, low, high)));
+ else
+ emit_jump_insn (gen_deallocate_and_return (GEN_INT (total_size)));
+
+ return;
+ }
+
+ gen_safe_add (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (total_size), false);
+ }
+
+ if (low)
+ emit_jump_insn (gen_pop_and_return (GEN_INT (regs_size),
+ gen_rx_rtsd_vector (regs_size,
+ low, high)));
+ else
+ emit_jump_insn (gen_simple_return ());
+}
+
+
+/* Compute the offset (in words) between FROM (arg pointer
+ or frame pointer) and TO (frame pointer or stack pointer).
+ See ASCII art comment at the start of rx_expand_prologue
+ for more information. */
+
+int
+rx_initial_elimination_offset (int from, int to)
+{
+ unsigned int low;
+ unsigned int high;
+ unsigned int frame_size;
+ unsigned int stack_size;
+ unsigned int mask;
+
+ rx_get_stack_layout (& low, & high, & mask, & frame_size, & stack_size);
+
+ if (from == ARG_POINTER_REGNUM)
+ {
+ /* Extend the computed size of the stack frame to
+ include the registers pushed in the prologue. */
+ if (low)
+ frame_size += ((high - low) + 1) * UNITS_PER_WORD;
+ else
+ frame_size += bit_count (mask) * UNITS_PER_WORD;
+
+ /* Remember to include the return address. */
+ frame_size += 1 * UNITS_PER_WORD;
+
+ if (to == FRAME_POINTER_REGNUM)
+ return frame_size;
+
+ gcc_assert (to == STACK_POINTER_REGNUM);
+ return frame_size + stack_size;
+ }
+
+ gcc_assert (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM);
+ return stack_size;
+}
+
+/* Decide if a variable should go into one of the small data sections. */
+
+static bool
+rx_in_small_data (const_tree decl)
+{
+ int size;
+ const_tree section;
+
+ if (rx_small_data_limit == 0)
+ return false;
+
+ if (TREE_CODE (decl) != VAR_DECL)
+ return false;
+
+ /* We do not put read-only variables into a small data area because
+ they would be placed with the other read-only sections, far away
+ from the read-write data sections, and we only have one small
+ data area pointer.
+ Similarly commons are placed in the .bss section which might be
+ far away (and out of alignment with respect to) the .data section. */
+ if (TREE_READONLY (decl) || DECL_COMMON (decl))
+ return false;
+
+ section = DECL_SECTION_NAME (decl);
+ if (section)
+ {
+ const char * const name = TREE_STRING_POINTER (section);
+
+ return (strcmp (name, "D_2") == 0) || (strcmp (name, "B_2") == 0);
+ }
+
+ size = int_size_in_bytes (TREE_TYPE (decl));
+
+ return (size > 0) && (size <= rx_small_data_limit);
+}
+
+/* Return a section for X.
+ The only special thing we do here is to honor small data. */
+
+static section *
+rx_select_rtx_section (enum machine_mode mode,
+ rtx x,
+ unsigned HOST_WIDE_INT align)
+{
+ if (rx_small_data_limit > 0
+ && GET_MODE_SIZE (mode) <= rx_small_data_limit
+ && align <= (unsigned HOST_WIDE_INT) rx_small_data_limit * BITS_PER_UNIT)
+ return sdata_section;
+
+ return default_elf_select_rtx_section (mode, x, align);
+}
+
+static section *
+rx_select_section (tree decl,
+ int reloc,
+ unsigned HOST_WIDE_INT align)
+{
+ if (rx_small_data_limit > 0)
+ {
+ switch (categorize_decl_for_section (decl, reloc))
+ {
+ case SECCAT_SDATA: return sdata_section;
+ case SECCAT_SBSS: return sbss_section;
+ case SECCAT_SRODATA:
+ /* Fall through. We do not put small, read only
+ data into the C_2 section because we are not
+ using the C_2 section. We do not use the C_2
+ section because it is located with the other
+ read-only data sections, far away from the read-write
+ data sections and we only have one small data
+ pointer (r13). */
+ default:
+ break;
+ }
+ }
+
+ /* If we are supporting the Renesas assembler
+ we cannot use mergeable sections. */
+ if (TARGET_AS100_SYNTAX)
+ switch (categorize_decl_for_section (decl, reloc))
+ {
+ case SECCAT_RODATA_MERGE_CONST:
+ case SECCAT_RODATA_MERGE_STR_INIT:
+ case SECCAT_RODATA_MERGE_STR:
+ return readonly_data_section;
+
+ default:
+ break;
+ }
+
+ return default_elf_select_section (decl, reloc, align);
+}
+
+enum rx_builtin
+{
+ RX_BUILTIN_BRK,
+ RX_BUILTIN_CLRPSW,
+ RX_BUILTIN_INT,
+ RX_BUILTIN_MACHI,
+ RX_BUILTIN_MACLO,
+ RX_BUILTIN_MULHI,
+ RX_BUILTIN_MULLO,
+ RX_BUILTIN_MVFACHI,
+ RX_BUILTIN_MVFACMI,
+ RX_BUILTIN_MVFC,
+ RX_BUILTIN_MVTACHI,
+ RX_BUILTIN_MVTACLO,
+ RX_BUILTIN_MVTC,
+ RX_BUILTIN_MVTIPL,
+ RX_BUILTIN_RACW,
+ RX_BUILTIN_REVW,
+ RX_BUILTIN_RMPA,
+ RX_BUILTIN_ROUND,
+ RX_BUILTIN_SETPSW,
+ RX_BUILTIN_WAIT,
+ RX_BUILTIN_max
+};
+
+static GTY(()) tree rx_builtins[(int) RX_BUILTIN_max];
+
+static void
+rx_init_builtins (void)
+{
+#define ADD_RX_BUILTIN0(UC_NAME, LC_NAME, RET_TYPE) \
+ rx_builtins[RX_BUILTIN_##UC_NAME] = \
+ add_builtin_function ("__builtin_rx_" LC_NAME, \
+ build_function_type_list (RET_TYPE##_type_node, \
+ NULL_TREE), \
+ RX_BUILTIN_##UC_NAME, \
+ BUILT_IN_MD, NULL, NULL_TREE)
+
+#define ADD_RX_BUILTIN1(UC_NAME, LC_NAME, RET_TYPE, ARG_TYPE) \
+ rx_builtins[RX_BUILTIN_##UC_NAME] = \
+ add_builtin_function ("__builtin_rx_" LC_NAME, \
+ build_function_type_list (RET_TYPE##_type_node, \
+ ARG_TYPE##_type_node, \
+ NULL_TREE), \
+ RX_BUILTIN_##UC_NAME, \
+ BUILT_IN_MD, NULL, NULL_TREE)
+
+#define ADD_RX_BUILTIN2(UC_NAME, LC_NAME, RET_TYPE, ARG_TYPE1, ARG_TYPE2) \
+ rx_builtins[RX_BUILTIN_##UC_NAME] = \
+ add_builtin_function ("__builtin_rx_" LC_NAME, \
+ build_function_type_list (RET_TYPE##_type_node, \
+ ARG_TYPE1##_type_node,\
+ ARG_TYPE2##_type_node,\
+ NULL_TREE), \
+ RX_BUILTIN_##UC_NAME, \
+ BUILT_IN_MD, NULL, NULL_TREE)
+
+#define ADD_RX_BUILTIN3(UC_NAME,LC_NAME,RET_TYPE,ARG_TYPE1,ARG_TYPE2,ARG_TYPE3) \
+ rx_builtins[RX_BUILTIN_##UC_NAME] = \
+ add_builtin_function ("__builtin_rx_" LC_NAME, \
+ build_function_type_list (RET_TYPE##_type_node, \
+ ARG_TYPE1##_type_node,\
+ ARG_TYPE2##_type_node,\
+ ARG_TYPE3##_type_node,\
+ NULL_TREE), \
+ RX_BUILTIN_##UC_NAME, \
+ BUILT_IN_MD, NULL, NULL_TREE)
+
+ ADD_RX_BUILTIN0 (BRK, "brk", void);
+ ADD_RX_BUILTIN1 (CLRPSW, "clrpsw", void, integer);
+ ADD_RX_BUILTIN1 (SETPSW, "setpsw", void, integer);
+ ADD_RX_BUILTIN1 (INT, "int", void, integer);
+ ADD_RX_BUILTIN2 (MACHI, "machi", void, intSI, intSI);
+ ADD_RX_BUILTIN2 (MACLO, "maclo", void, intSI, intSI);
+ ADD_RX_BUILTIN2 (MULHI, "mulhi", void, intSI, intSI);
+ ADD_RX_BUILTIN2 (MULLO, "mullo", void, intSI, intSI);
+ ADD_RX_BUILTIN0 (MVFACHI, "mvfachi", intSI);
+ ADD_RX_BUILTIN0 (MVFACMI, "mvfacmi", intSI);
+ ADD_RX_BUILTIN1 (MVTACHI, "mvtachi", void, intSI);
+ ADD_RX_BUILTIN1 (MVTACLO, "mvtaclo", void, intSI);
+ ADD_RX_BUILTIN0 (RMPA, "rmpa", void);
+ ADD_RX_BUILTIN1 (MVFC, "mvfc", intSI, integer);
+ ADD_RX_BUILTIN2 (MVTC, "mvtc", void, integer, integer);
+ ADD_RX_BUILTIN1 (MVTIPL, "mvtipl", void, integer);
+ ADD_RX_BUILTIN1 (RACW, "racw", void, integer);
+ ADD_RX_BUILTIN1 (ROUND, "round", intSI, float);
+ ADD_RX_BUILTIN1 (REVW, "revw", intSI, intSI);
+ ADD_RX_BUILTIN0 (WAIT, "wait", void);
+}
+
+/* Return the RX builtin for CODE. */
+
+static tree
+rx_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (code >= RX_BUILTIN_max)
+ return error_mark_node;
+
+ return rx_builtins[code];
+}
+
+static rtx
+rx_expand_void_builtin_1_arg (rtx arg, rtx (* gen_func)(rtx), bool reg)
+{
+ if (reg && ! REG_P (arg))
+ arg = force_reg (SImode, arg);
+
+ emit_insn (gen_func (arg));
+
+ return NULL_RTX;
+}
+
+static rtx
+rx_expand_builtin_mvtc (tree exp)
+{
+ rtx arg1 = expand_normal (CALL_EXPR_ARG (exp, 0));
+ rtx arg2 = expand_normal (CALL_EXPR_ARG (exp, 1));
+
+ if (! CONST_INT_P (arg1))
+ return NULL_RTX;
+
+ if (! REG_P (arg2))
+ arg2 = force_reg (SImode, arg2);
+
+ emit_insn (gen_mvtc (arg1, arg2));
+
+ return NULL_RTX;
+}
+
+static rtx
+rx_expand_builtin_mvfc (tree t_arg, rtx target)
+{
+ rtx arg = expand_normal (t_arg);
+
+ if (! CONST_INT_P (arg))
+ return NULL_RTX;
+
+ if (target == NULL_RTX)
+ return NULL_RTX;
+
+ if (! REG_P (target))
+ target = force_reg (SImode, target);
+
+ emit_insn (gen_mvfc (target, arg));
+
+ return target;
+}
+
+static rtx
+rx_expand_builtin_mvtipl (rtx arg)
+{
+ /* The RX610 does not support the MVTIPL instruction. */
+ if (rx_cpu_type == RX610)
+ return NULL_RTX;
+
+ if (! CONST_INT_P (arg) || ! IN_RANGE (INTVAL (arg), 0, (1 << 4) - 1))
+ return NULL_RTX;
+
+ emit_insn (gen_mvtipl (arg));
+
+ return NULL_RTX;
+}
+
+static rtx
+rx_expand_builtin_mac (tree exp, rtx (* gen_func)(rtx, rtx))
+{
+ rtx arg1 = expand_normal (CALL_EXPR_ARG (exp, 0));
+ rtx arg2 = expand_normal (CALL_EXPR_ARG (exp, 1));
+
+ if (! REG_P (arg1))
+ arg1 = force_reg (SImode, arg1);
+
+ if (! REG_P (arg2))
+ arg2 = force_reg (SImode, arg2);
+
+ emit_insn (gen_func (arg1, arg2));
+
+ return NULL_RTX;
+}
+
+static rtx
+rx_expand_int_builtin_1_arg (rtx arg,
+ rtx target,
+ rtx (* gen_func)(rtx, rtx),
+ bool mem_ok)
+{
+ if (! REG_P (arg))
+ if (!mem_ok || ! MEM_P (arg))
+ arg = force_reg (SImode, arg);
+
+ if (target == NULL_RTX || ! REG_P (target))
+ target = gen_reg_rtx (SImode);
+
+ emit_insn (gen_func (target, arg));
+
+ return target;
+}
+
+static rtx
+rx_expand_int_builtin_0_arg (rtx target, rtx (* gen_func)(rtx))
+{
+ if (target == NULL_RTX || ! REG_P (target))
+ target = gen_reg_rtx (SImode);
+
+ emit_insn (gen_func (target));
+
+ return target;
+}
+
+static rtx
+rx_expand_builtin_round (rtx arg, rtx target)
+{
+ if ((! REG_P (arg) && ! MEM_P (arg))
+ || GET_MODE (arg) != SFmode)
+ arg = force_reg (SFmode, arg);
+
+ if (target == NULL_RTX || ! REG_P (target))
+ target = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lrintsf2 (target, arg));
+
+ return target;
+}
+
+static int
+valid_psw_flag (rtx op, const char *which)
+{
+ static int mvtc_inform_done = 0;
+
+ if (GET_CODE (op) == CONST_INT)
+ switch (INTVAL (op))
+ {
+ case 0: case 'c': case 'C':
+ case 1: case 'z': case 'Z':
+ case 2: case 's': case 'S':
+ case 3: case 'o': case 'O':
+ case 8: case 'i': case 'I':
+ case 9: case 'u': case 'U':
+ return 1;
+ }
+
+ error ("__builtin_rx_%s takes 'C', 'Z', 'S', 'O', 'I', or 'U'", which);
+ if (!mvtc_inform_done)
+ error ("use __builtin_rx_mvtc (0, ... ) to write arbitrary values to PSW");
+ mvtc_inform_done = 1;
+
+ return 0;
+}
+
+static rtx
+rx_expand_builtin (tree exp,
+ rtx target,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ tree arg = call_expr_nargs (exp) >= 1 ? CALL_EXPR_ARG (exp, 0) : NULL_TREE;
+ rtx op = arg ? expand_normal (arg) : NULL_RTX;
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+
+ switch (fcode)
+ {
+ case RX_BUILTIN_BRK: emit_insn (gen_brk ()); return NULL_RTX;
+ case RX_BUILTIN_CLRPSW:
+ if (!valid_psw_flag (op, "clrpsw"))
+ return NULL_RTX;
+ return rx_expand_void_builtin_1_arg (op, gen_clrpsw, false);
+ case RX_BUILTIN_SETPSW:
+ if (!valid_psw_flag (op, "setpsw"))
+ return NULL_RTX;
+ return rx_expand_void_builtin_1_arg (op, gen_setpsw, false);
+ case RX_BUILTIN_INT: return rx_expand_void_builtin_1_arg
+ (op, gen_int, false);
+ case RX_BUILTIN_MACHI: return rx_expand_builtin_mac (exp, gen_machi);
+ case RX_BUILTIN_MACLO: return rx_expand_builtin_mac (exp, gen_maclo);
+ case RX_BUILTIN_MULHI: return rx_expand_builtin_mac (exp, gen_mulhi);
+ case RX_BUILTIN_MULLO: return rx_expand_builtin_mac (exp, gen_mullo);
+ case RX_BUILTIN_MVFACHI: return rx_expand_int_builtin_0_arg
+ (target, gen_mvfachi);
+ case RX_BUILTIN_MVFACMI: return rx_expand_int_builtin_0_arg
+ (target, gen_mvfacmi);
+ case RX_BUILTIN_MVTACHI: return rx_expand_void_builtin_1_arg
+ (op, gen_mvtachi, true);
+ case RX_BUILTIN_MVTACLO: return rx_expand_void_builtin_1_arg
+ (op, gen_mvtaclo, true);
+ case RX_BUILTIN_RMPA: emit_insn (gen_rmpa ()); return NULL_RTX;
+ case RX_BUILTIN_MVFC: return rx_expand_builtin_mvfc (arg, target);
+ case RX_BUILTIN_MVTC: return rx_expand_builtin_mvtc (exp);
+ case RX_BUILTIN_MVTIPL: return rx_expand_builtin_mvtipl (op);
+ case RX_BUILTIN_RACW: return rx_expand_void_builtin_1_arg
+ (op, gen_racw, false);
+ case RX_BUILTIN_ROUND: return rx_expand_builtin_round (op, target);
+ case RX_BUILTIN_REVW: return rx_expand_int_builtin_1_arg
+ (op, target, gen_revw, false);
+ case RX_BUILTIN_WAIT: emit_insn (gen_wait ()); return NULL_RTX;
+
+ default:
+ internal_error ("bad builtin code");
+ break;
+ }
+
+ return NULL_RTX;
+}
+
+/* Place an element into a constructor or destructor section.
+ Like default_ctor_section_asm_out_constructor in varasm.c
+ except that it uses .init_array (or .fini_array) and it
+ handles constructor priorities. */
+
+static void
+rx_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor)
+{
+ section * s;
+
+ if (priority != DEFAULT_INIT_PRIORITY)
+ {
+ char buf[18];
+
+ sprintf (buf, "%s.%.5u",
+ is_ctor ? ".init_array" : ".fini_array",
+ priority);
+ s = get_section (buf, SECTION_WRITE, NULL_TREE);
+ }
+ else if (is_ctor)
+ s = ctors_section;
+ else
+ s = dtors_section;
+
+ switch_to_section (s);
+ assemble_align (POINTER_SIZE);
+ assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+}
+
+static void
+rx_elf_asm_constructor (rtx symbol, int priority)
+{
+ rx_elf_asm_cdtor (symbol, priority, /* is_ctor= */true);
+}
+
+static void
+rx_elf_asm_destructor (rtx symbol, int priority)
+{
+ rx_elf_asm_cdtor (symbol, priority, /* is_ctor= */false);
+}
+
+/* Check "fast_interrupt", "interrupt" and "naked" attributes. */
+
+static tree
+rx_handle_func_attribute (tree * node,
+ tree name,
+ tree args,
+ int flags ATTRIBUTE_UNUSED,
+ bool * no_add_attrs)
+{
+ gcc_assert (DECL_P (* node));
+ gcc_assert (args == NULL_TREE);
+
+ if (TREE_CODE (* node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ * no_add_attrs = true;
+ }
+
+ /* FIXME: We ought to check for conflicting attributes. */
+
+ /* FIXME: We ought to check that the interrupt and exception
+ handler attributes have been applied to void functions. */
+ return NULL_TREE;
+}
+
+/* Table of RX specific attributes. */
+const struct attribute_spec rx_attribute_table[] =
+{
+ /* Name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
+ affects_type_identity. */
+ { "fast_interrupt", 0, 0, true, false, false, rx_handle_func_attribute,
+ false },
+ { "interrupt", 0, 0, true, false, false, rx_handle_func_attribute,
+ false },
+ { "naked", 0, 0, true, false, false, rx_handle_func_attribute,
+ false },
+ { NULL, 0, 0, false, false, false, NULL, false }
+};
+
+/* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
+
+static void
+rx_override_options_after_change (void)
+{
+ static bool first_time = TRUE;
+
+ if (first_time)
+ {
+ /* If this is the first time through and the user has not disabled
+ the use of RX FPU hardware then enable -ffinite-math-only,
+ since the FPU instructions do not support NaNs and infinities. */
+ if (TARGET_USE_FPU)
+ flag_finite_math_only = 1;
+
+ first_time = FALSE;
+ }
+ else
+ {
+ /* Alert the user if they are changing the optimization options
+ to use IEEE compliant floating point arithmetic with RX FPU insns. */
+ if (TARGET_USE_FPU
+ && !flag_finite_math_only)
+ warning (0, "RX FPU instructions do not support NaNs and infinities");
+ }
+}
+
+static void
+rx_option_override (void)
+{
+ unsigned int i;
+ cl_deferred_option *opt;
+ vec<cl_deferred_option> *v = (vec<cl_deferred_option> *) rx_deferred_options;
+
+ if (v)
+ FOR_EACH_VEC_ELT (*v, i, opt)
+ {
+ switch (opt->opt_index)
+ {
+ case OPT_mint_register_:
+ switch (opt->value)
+ {
+ case 4:
+ fixed_regs[10] = call_used_regs [10] = 1;
+ /* Fall through. */
+ case 3:
+ fixed_regs[11] = call_used_regs [11] = 1;
+ /* Fall through. */
+ case 2:
+ fixed_regs[12] = call_used_regs [12] = 1;
+ /* Fall through. */
+ case 1:
+ fixed_regs[13] = call_used_regs [13] = 1;
+ /* Fall through. */
+ case 0:
+ rx_num_interrupt_regs = opt->value;
+ break;
+ default:
+ rx_num_interrupt_regs = 0;
+ /* Error message already given because rx_handle_option
+ returned false. */
+ break;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* This target defaults to strict volatile bitfields. */
+ if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
+ flag_strict_volatile_bitfields = 1;
+
+ rx_override_options_after_change ();
+
+ if (align_jumps == 0 && ! optimize_size)
+ align_jumps = 3;
+ if (align_loops == 0 && ! optimize_size)
+ align_loops = 3;
+ if (align_labels == 0 && ! optimize_size)
+ align_labels = 3;
+}
+
+
+static bool
+rx_allocate_stack_slots_for_args (void)
+{
+ /* Naked functions should not allocate stack slots for arguments. */
+ return ! is_naked_func (NULL_TREE);
+}
+
+static bool
+rx_func_attr_inlinable (const_tree decl)
+{
+ return ! is_fast_interrupt_func (decl)
+ && ! is_interrupt_func (decl)
+ && ! is_naked_func (decl);
+}
+
+static bool
+rx_warn_func_return (tree decl)
+{
+ /* Naked functions are implemented entirely in assembly, including the
+ return sequence, so suppress warnings about this. */
+ return !is_naked_func (decl);
+}
+
+/* Return nonzero if it is ok to make a tail-call to DECL,
+ a function_decl or NULL if this is an indirect call, using EXP */
+
+static bool
+rx_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+{
+ /* Do not allow indirect tailcalls. The
+ sibcall patterns do not support them. */
+ if (decl == NULL)
+ return false;
+
+ /* Never tailcall from inside interrupt handlers or naked functions. */
+ if (is_fast_interrupt_func (NULL_TREE)
+ || is_interrupt_func (NULL_TREE)
+ || is_naked_func (NULL_TREE))
+ return false;
+
+ return true;
+}
+
+static void
+rx_file_start (void)
+{
+ if (! TARGET_AS100_SYNTAX)
+ default_file_start ();
+}
+
+static bool
+rx_is_ms_bitfield_layout (const_tree record_type ATTRIBUTE_UNUSED)
+{
+ /* The packed attribute overrides the MS behaviour. */
+ return ! TYPE_PACKED (record_type);
+}
+
+/* Returns true if X a legitimate constant for an immediate
+ operand on the RX. X is already known to satisfy CONSTANT_P. */
+
+bool
+rx_is_legitimate_constant (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+ switch (GET_CODE (x))
+ {
+ case CONST:
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) == PLUS)
+ {
+ if (! CONST_INT_P (XEXP (x, 1)))
+ return false;
+
+ /* GCC would not pass us CONST_INT + CONST_INT so we
+ know that we have {SYMBOL|LABEL} + CONST_INT. */
+ x = XEXP (x, 0);
+ gcc_assert (! CONST_INT_P (x));
+ }
+
+ switch (GET_CODE (x))
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ return true;
+
+ case UNSPEC:
+ return XINT (x, 1) == UNSPEC_CONST || XINT (x, 1) == UNSPEC_PID_ADDR;
+
+ default:
+ /* FIXME: Can this ever happen ? */
+ gcc_unreachable ();
+ }
+ break;
+
+ case LABEL_REF:
+ case SYMBOL_REF:
+ return true;
+ case CONST_DOUBLE:
+ return (rx_max_constant_size == 0 || rx_max_constant_size == 4);
+ case CONST_VECTOR:
+ return false;
+ default:
+ gcc_assert (CONST_INT_P (x));
+ break;
+ }
+
+ return ok_for_max_constant (INTVAL (x));
+}
+
+static int
+rx_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED, bool speed)
+{
+ rtx a, b;
+
+ if (GET_CODE (addr) != PLUS)
+ return COSTS_N_INSNS (1);
+
+ a = XEXP (addr, 0);
+ b = XEXP (addr, 1);
+
+ if (REG_P (a) && REG_P (b))
+ /* Try to discourage REG+REG addressing as it keeps two registers live. */
+ return COSTS_N_INSNS (4);
+
+ if (speed)
+ /* [REG+OFF] is just as fast as [REG]. */
+ return COSTS_N_INSNS (1);
+
+ if (CONST_INT_P (b)
+ && ((INTVAL (b) > 128) || INTVAL (b) < -127))
+ /* Try to discourage REG + <large OFF> when optimizing for size. */
+ return COSTS_N_INSNS (2);
+
+ return COSTS_N_INSNS (1);
+}
+
+static bool
+rx_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ /* We can always eliminate to the frame pointer.
+ We can eliminate to the stack pointer unless a frame
+ pointer is needed. */
+
+ return to == FRAME_POINTER_REGNUM
+ || ( to == STACK_POINTER_REGNUM && ! frame_pointer_needed);
+}
+
+
+static void
+rx_trampoline_template (FILE * file)
+{
+ /* Output assembler code for a block containing the constant
+ part of a trampoline, leaving space for the variable parts.
+
+ On the RX, (where r8 is the static chain regnum) the trampoline
+ looks like:
+
+ mov #<static chain value>, r8
+ mov #<function's address>, r9
+ jmp r9
+
+ In big-endian-data-mode however instructions are read into the CPU
+ 4 bytes at a time. These bytes are then swapped around before being
+ passed to the decoder. So...we must partition our trampoline into
+ 4 byte packets and swap these packets around so that the instruction
+ reader will reverse the process. But, in order to avoid splitting
+ the 32-bit constants across these packet boundaries, (making inserting
+ them into the constructed trampoline very difficult) we have to pad the
+ instruction sequence with NOP insns. ie:
+
+ nop
+ nop
+ mov.l #<...>, r8
+ nop
+ nop
+ mov.l #<...>, r9
+ jmp r9
+ nop
+ nop */
+
+ if (! TARGET_BIG_ENDIAN_DATA)
+ {
+ asm_fprintf (file, "\tmov.L\t#0deadbeefH, r%d\n", STATIC_CHAIN_REGNUM);
+ asm_fprintf (file, "\tmov.L\t#0deadbeefH, r%d\n", TRAMPOLINE_TEMP_REGNUM);
+ asm_fprintf (file, "\tjmp\tr%d\n", TRAMPOLINE_TEMP_REGNUM);
+ }
+ else
+ {
+ char r8 = '0' + STATIC_CHAIN_REGNUM;
+ char r9 = '0' + TRAMPOLINE_TEMP_REGNUM;
+
+ if (TARGET_AS100_SYNTAX)
+ {
+ asm_fprintf (file, "\t.BYTE 0%c2H, 0fbH, 003H, 003H\n", r8);
+ asm_fprintf (file, "\t.BYTE 0deH, 0adH, 0beH, 0efH\n");
+ asm_fprintf (file, "\t.BYTE 0%c2H, 0fbH, 003H, 003H\n", r9);
+ asm_fprintf (file, "\t.BYTE 0deH, 0adH, 0beH, 0efH\n");
+ asm_fprintf (file, "\t.BYTE 003H, 003H, 00%cH, 07fH\n", r9);
+ }
+ else
+ {
+ asm_fprintf (file, "\t.byte 0x%c2, 0xfb, 0x03, 0x03\n", r8);
+ asm_fprintf (file, "\t.byte 0xde, 0xad, 0xbe, 0xef\n");
+ asm_fprintf (file, "\t.byte 0x%c2, 0xfb, 0x03, 0x03\n", r9);
+ asm_fprintf (file, "\t.byte 0xde, 0xad, 0xbe, 0xef\n");
+ asm_fprintf (file, "\t.byte 0x03, 0x03, 0x0%c, 0x7f\n", r9);
+ }
+ }
+}
+
+static void
+rx_trampoline_init (rtx tramp, tree fndecl, rtx chain)
+{
+ rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
+
+ emit_block_move (tramp, assemble_trampoline_template (),
+ GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
+
+ if (TARGET_BIG_ENDIAN_DATA)
+ {
+ emit_move_insn (adjust_address (tramp, SImode, 4), chain);
+ emit_move_insn (adjust_address (tramp, SImode, 12), fnaddr);
+ }
+ else
+ {
+ emit_move_insn (adjust_address (tramp, SImode, 2), chain);
+ emit_move_insn (adjust_address (tramp, SImode, 6 + 2), fnaddr);
+ }
+}
+
+static int
+rx_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t regclass ATTRIBUTE_UNUSED,
+ bool in)
+{
+ return (in ? 2 : 0) + REGISTER_MOVE_COST (mode, regclass, regclass);
+}
+
+/* Convert a CC_MODE to the set of flags that it represents. */
+
+static unsigned int
+flags_from_mode (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case CC_ZSmode:
+ return CC_FLAG_S | CC_FLAG_Z;
+ case CC_ZSOmode:
+ return CC_FLAG_S | CC_FLAG_Z | CC_FLAG_O;
+ case CC_ZSCmode:
+ return CC_FLAG_S | CC_FLAG_Z | CC_FLAG_C;
+ case CCmode:
+ return CC_FLAG_S | CC_FLAG_Z | CC_FLAG_O | CC_FLAG_C;
+ case CC_Fmode:
+ return CC_FLAG_FP;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Convert a set of flags to a CC_MODE that can implement it. */
+
+static enum machine_mode
+mode_from_flags (unsigned int f)
+{
+ if (f & CC_FLAG_FP)
+ return CC_Fmode;
+ if (f & CC_FLAG_O)
+ {
+ if (f & CC_FLAG_C)
+ return CCmode;
+ else
+ return CC_ZSOmode;
+ }
+ else if (f & CC_FLAG_C)
+ return CC_ZSCmode;
+ else
+ return CC_ZSmode;
+}
+
+/* Convert an RTX_CODE to the set of flags needed to implement it.
+ This assumes an integer comparison. */
+
+static unsigned int
+flags_from_code (enum rtx_code code)
+{
+ switch (code)
+ {
+ case LT:
+ case GE:
+ return CC_FLAG_S;
+ case GT:
+ case LE:
+ return CC_FLAG_S | CC_FLAG_O | CC_FLAG_Z;
+ case GEU:
+ case LTU:
+ return CC_FLAG_C;
+ case GTU:
+ case LEU:
+ return CC_FLAG_C | CC_FLAG_Z;
+ case EQ:
+ case NE:
+ return CC_FLAG_Z;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return a CC_MODE of which both M1 and M2 are subsets. */
+
+static enum machine_mode
+rx_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
+{
+ unsigned f;
+
+ /* Early out for identical modes. */
+ if (m1 == m2)
+ return m1;
+
+ /* There's no valid combination for FP vs non-FP. */
+ f = flags_from_mode (m1) | flags_from_mode (m2);
+ if (f & CC_FLAG_FP)
+ return VOIDmode;
+
+ /* Otherwise, see what mode can implement all the flags. */
+ return mode_from_flags (f);
+}
+
+/* Return the minimal CC mode needed to implement (CMP_CODE X Y). */
+
+enum machine_mode
+rx_select_cc_mode (enum rtx_code cmp_code, rtx x, rtx y)
+{
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ return CC_Fmode;
+
+ if (y != const0_rtx)
+ return CCmode;
+
+ return mode_from_flags (flags_from_code (cmp_code));
+}
+
+/* Split the conditional branch. Emit (COMPARE C1 C2) into CC_REG with
+ CC_MODE, and use that in branches based on that compare. */
+
+void
+rx_split_cbranch (enum machine_mode cc_mode, enum rtx_code cmp1,
+ rtx c1, rtx c2, rtx label)
+{
+ rtx flags, x;
+
+ flags = gen_rtx_REG (cc_mode, CC_REG);
+ x = gen_rtx_COMPARE (cc_mode, c1, c2);
+ x = gen_rtx_SET (VOIDmode, flags, x);
+ emit_insn (x);
+
+ x = gen_rtx_fmt_ee (cmp1, VOIDmode, flags, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label, pc_rtx);
+ x = gen_rtx_SET (VOIDmode, pc_rtx, x);
+ emit_jump_insn (x);
+}
+
+/* A helper function for matching parallels that set the flags. */
+
+bool
+rx_match_ccmode (rtx insn, enum machine_mode cc_mode)
+{
+ rtx op1, flags;
+ enum machine_mode flags_mode;
+
+ gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
+
+ op1 = XVECEXP (PATTERN (insn), 0, 1);
+ gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
+
+ flags = SET_DEST (op1);
+ flags_mode = GET_MODE (flags);
+
+ if (GET_MODE (SET_SRC (op1)) != flags_mode)
+ return false;
+ if (GET_MODE_CLASS (flags_mode) != MODE_CC)
+ return false;
+
+ /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
+ if (flags_from_mode (flags_mode) & ~flags_from_mode (cc_mode))
+ return false;
+
+ return true;
+}
+
+int
+rx_align_for_label (rtx lab, int uses_threshold)
+{
+ /* This is a simple heuristic to guess when an alignment would not be useful
+ because the delay due to the inserted NOPs would be greater than the delay
+ due to the misaligned branch. If uses_threshold is zero then the alignment
+ is always useful. */
+ if (LABEL_P (lab) && LABEL_NUSES (lab) < uses_threshold)
+ return 0;
+
+ return optimize_size ? 1 : 3;
+}
+
+static int
+rx_max_skip_for_label (rtx lab)
+{
+ int opsize;
+ rtx op;
+
+ if (lab == NULL_RTX)
+ return 0;
+
+ op = lab;
+ do
+ {
+ op = next_nonnote_nondebug_insn (op);
+ }
+ while (op && (LABEL_P (op)
+ || (INSN_P (op) && GET_CODE (PATTERN (op)) == USE)));
+ if (!op)
+ return 0;
+
+ opsize = get_attr_length (op);
+ if (opsize >= 0 && opsize < 8)
+ return opsize - 1;
+ return 0;
+}
+
+/* Compute the real length of the extending load-and-op instructions. */
+
+int
+rx_adjust_insn_length (rtx insn, int current_length)
+{
+ rtx extend, mem, offset;
+ bool zero;
+ int factor;
+
+ switch (INSN_CODE (insn))
+ {
+ default:
+ return current_length;
+
+ case CODE_FOR_plussi3_zero_extendhi:
+ case CODE_FOR_andsi3_zero_extendhi:
+ case CODE_FOR_iorsi3_zero_extendhi:
+ case CODE_FOR_xorsi3_zero_extendhi:
+ case CODE_FOR_divsi3_zero_extendhi:
+ case CODE_FOR_udivsi3_zero_extendhi:
+ case CODE_FOR_minussi3_zero_extendhi:
+ case CODE_FOR_smaxsi3_zero_extendhi:
+ case CODE_FOR_sminsi3_zero_extendhi:
+ case CODE_FOR_multsi3_zero_extendhi:
+ case CODE_FOR_comparesi3_zero_extendhi:
+ zero = true;
+ factor = 2;
+ break;
+
+ case CODE_FOR_plussi3_sign_extendhi:
+ case CODE_FOR_andsi3_sign_extendhi:
+ case CODE_FOR_iorsi3_sign_extendhi:
+ case CODE_FOR_xorsi3_sign_extendhi:
+ case CODE_FOR_divsi3_sign_extendhi:
+ case CODE_FOR_udivsi3_sign_extendhi:
+ case CODE_FOR_minussi3_sign_extendhi:
+ case CODE_FOR_smaxsi3_sign_extendhi:
+ case CODE_FOR_sminsi3_sign_extendhi:
+ case CODE_FOR_multsi3_sign_extendhi:
+ case CODE_FOR_comparesi3_sign_extendhi:
+ zero = false;
+ factor = 2;
+ break;
+
+ case CODE_FOR_plussi3_zero_extendqi:
+ case CODE_FOR_andsi3_zero_extendqi:
+ case CODE_FOR_iorsi3_zero_extendqi:
+ case CODE_FOR_xorsi3_zero_extendqi:
+ case CODE_FOR_divsi3_zero_extendqi:
+ case CODE_FOR_udivsi3_zero_extendqi:
+ case CODE_FOR_minussi3_zero_extendqi:
+ case CODE_FOR_smaxsi3_zero_extendqi:
+ case CODE_FOR_sminsi3_zero_extendqi:
+ case CODE_FOR_multsi3_zero_extendqi:
+ case CODE_FOR_comparesi3_zero_extendqi:
+ zero = true;
+ factor = 1;
+ break;
+
+ case CODE_FOR_plussi3_sign_extendqi:
+ case CODE_FOR_andsi3_sign_extendqi:
+ case CODE_FOR_iorsi3_sign_extendqi:
+ case CODE_FOR_xorsi3_sign_extendqi:
+ case CODE_FOR_divsi3_sign_extendqi:
+ case CODE_FOR_udivsi3_sign_extendqi:
+ case CODE_FOR_minussi3_sign_extendqi:
+ case CODE_FOR_smaxsi3_sign_extendqi:
+ case CODE_FOR_sminsi3_sign_extendqi:
+ case CODE_FOR_multsi3_sign_extendqi:
+ case CODE_FOR_comparesi3_sign_extendqi:
+ zero = false;
+ factor = 1;
+ break;
+ }
+
+ /* We are expecting: (SET (REG) (<OP> (REG) (<EXTEND> (MEM)))). */
+ extend = single_set (insn);
+ gcc_assert (extend != NULL_RTX);
+
+ extend = SET_SRC (extend);
+ if (GET_CODE (XEXP (extend, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (extend, 0)) == SIGN_EXTEND)
+ extend = XEXP (extend, 0);
+ else
+ extend = XEXP (extend, 1);
+
+ gcc_assert ((zero && (GET_CODE (extend) == ZERO_EXTEND))
+ || (! zero && (GET_CODE (extend) == SIGN_EXTEND)));
+
+ mem = XEXP (extend, 0);
+ gcc_checking_assert (MEM_P (mem));
+ if (REG_P (XEXP (mem, 0)))
+ return (zero && factor == 1) ? 2 : 3;
+
+ /* We are expecting: (MEM (PLUS (REG) (CONST_INT))). */
+ gcc_checking_assert (GET_CODE (XEXP (mem, 0)) == PLUS);
+ gcc_checking_assert (REG_P (XEXP (XEXP (mem, 0), 0)));
+
+ offset = XEXP (XEXP (mem, 0), 1);
+ gcc_checking_assert (GET_CODE (offset) == CONST_INT);
+
+ if (IN_RANGE (INTVAL (offset), 0, 255 * factor))
+ return (zero && factor == 1) ? 3 : 4;
+
+ return (zero && factor == 1) ? 4 : 5;
+}
+
+static bool
+rx_narrow_volatile_bitfield (void)
+{
+ return true;
+}
+
+static bool
+rx_ok_to_inline (tree caller, tree callee)
+{
+ /* Do not inline functions with local variables
+ into a naked CALLER - naked function have no stack frame and
+ locals need a frame in order to have somewhere to live.
+
+ Unfortunately we have no way to determine the presence of
+ local variables in CALLEE, so we have to be cautious and
+ assume that there might be some there.
+
+ We do allow inlining when CALLEE has the "inline" type
+ modifier or the "always_inline" or "gnu_inline" attributes. */
+ return lookup_attribute ("naked", DECL_ATTRIBUTES (caller)) == NULL_TREE
+ || DECL_DECLARED_INLINE_P (callee)
+ || lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee)) != NULL_TREE
+ || lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (callee)) != NULL_TREE;
+}
+
+static bool
+rx_enable_lra (void)
+{
+ return TARGET_ENABLE_LRA;
+}
+
+
+#undef TARGET_NARROW_VOLATILE_BITFIELD
+#define TARGET_NARROW_VOLATILE_BITFIELD rx_narrow_volatile_bitfield
+
+#undef TARGET_CAN_INLINE_P
+#define TARGET_CAN_INLINE_P rx_ok_to_inline
+
+#undef TARGET_ASM_JUMP_ALIGN_MAX_SKIP
+#define TARGET_ASM_JUMP_ALIGN_MAX_SKIP rx_max_skip_for_label
+#undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
+#define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rx_max_skip_for_label
+#undef TARGET_LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP
+#define TARGET_LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP rx_max_skip_for_label
+#undef TARGET_ASM_LABEL_ALIGN_MAX_SKIP
+#define TARGET_ASM_LABEL_ALIGN_MAX_SKIP rx_max_skip_for_label
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE rx_function_value
+
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB rx_return_in_msb
+
+#undef TARGET_IN_SMALL_DATA_P
+#define TARGET_IN_SMALL_DATA_P rx_in_small_data
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY rx_return_in_memory
+
+#undef TARGET_HAVE_SRODATA_SECTION
+#define TARGET_HAVE_SRODATA_SECTION true
+
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION rx_select_rtx_section
+
+#undef TARGET_ASM_SELECT_SECTION
+#define TARGET_ASM_SELECT_SECTION rx_select_section
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS rx_init_builtins
+
+#undef TARGET_BUILTIN_DECL
+#define TARGET_BUILTIN_DECL rx_builtin_decl
+
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN rx_expand_builtin
+
+#undef TARGET_ASM_CONSTRUCTOR
+#define TARGET_ASM_CONSTRUCTOR rx_elf_asm_constructor
+
+#undef TARGET_ASM_DESTRUCTOR
+#define TARGET_ASM_DESTRUCTOR rx_elf_asm_destructor
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX rx_struct_value_rtx
+
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE rx_attribute_table
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START rx_file_start
+
+#undef TARGET_MS_BITFIELD_LAYOUT_P
+#define TARGET_MS_BITFIELD_LAYOUT_P rx_is_ms_bitfield_layout
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P rx_is_legitimate_address
+
+#undef TARGET_MODE_DEPENDENT_ADDRESS_P
+#define TARGET_MODE_DEPENDENT_ADDRESS_P rx_mode_dependent_address_p
+
+#undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
+#define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS rx_allocate_stack_slots_for_args
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE rx_output_function_prologue
+
+#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
+#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P rx_func_attr_inlinable
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL rx_function_ok_for_sibcall
+
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG rx_function_arg
+
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE rx_function_arg_advance
+
+#undef TARGET_FUNCTION_ARG_BOUNDARY
+#define TARGET_FUNCTION_ARG_BOUNDARY rx_function_arg_boundary
+
+#undef TARGET_SET_CURRENT_FUNCTION
+#define TARGET_SET_CURRENT_FUNCTION rx_set_current_function
+
+#undef TARGET_ASM_INTEGER
+#define TARGET_ASM_INTEGER rx_assemble_integer
+
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
+
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET 32
+
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST rx_address_cost
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE rx_can_eliminate
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE rx_conditional_register_usage
+
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define TARGET_ASM_TRAMPOLINE_TEMPLATE rx_trampoline_template
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT rx_trampoline_init
+
+#undef TARGET_PRINT_OPERAND
+#define TARGET_PRINT_OPERAND rx_print_operand
+
+#undef TARGET_PRINT_OPERAND_ADDRESS
+#define TARGET_PRINT_OPERAND_ADDRESS rx_print_operand_address
+
+#undef TARGET_CC_MODES_COMPATIBLE
+#define TARGET_CC_MODES_COMPATIBLE rx_cc_modes_compatible
+
+#undef TARGET_MEMORY_MOVE_COST
+#define TARGET_MEMORY_MOVE_COST rx_memory_move_cost
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE rx_option_override
+
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE rx_promote_function_mode
+
+#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
+#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE rx_override_options_after_change
+
+#undef TARGET_FLAGS_REGNUM
+#define TARGET_FLAGS_REGNUM CC_REG
+
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P rx_is_legitimate_constant
+
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS rx_legitimize_address
+
+#undef TARGET_WARN_FUNC_RETURN
+#define TARGET_WARN_FUNC_RETURN rx_warn_func_return
+
+#undef TARGET_LRA_P
+#define TARGET_LRA_P rx_enable_lra
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-rx.h"
diff --git a/gcc-4.9/gcc/config/rx/rx.h b/gcc-4.9/gcc/config/rx/rx.h
new file mode 100644
index 000000000..d99b19ad2
--- /dev/null
+++ b/gcc-4.9/gcc/config/rx/rx.h
@@ -0,0 +1,665 @@
+/* GCC backend definitions for the Renesas RX processor.
+ Copyright (C) 2008-2014 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__RX__"); \
+ builtin_assert ("cpu=RX"); \
+ if (rx_cpu_type == RX610) \
+ { \
+ builtin_define ("__RX610__"); \
+ builtin_assert ("machine=RX610"); \
+ } \
+ else if (rx_cpu_type == RX100) \
+ { \
+ builtin_define ("__RX100__"); \
+ builtin_assert ("machine=RX100"); \
+ } \
+ else if (rx_cpu_type == RX200) \
+ { \
+ builtin_define ("__RX200__"); \
+ builtin_assert ("machine=RX200"); \
+ } \
+ else if (rx_cpu_type == RX600) \
+ { \
+ builtin_define ("__RX600__"); \
+ builtin_assert ("machine=RX600"); \
+ } \
+ \
+ if (TARGET_BIG_ENDIAN_DATA) \
+ builtin_define ("__RX_BIG_ENDIAN__"); \
+ else \
+ builtin_define ("__RX_LITTLE_ENDIAN__");\
+ \
+ if (TARGET_64BIT_DOUBLES) \
+ builtin_define ("__RX_64BIT_DOUBLES__");\
+ else \
+ builtin_define ("__RX_32BIT_DOUBLES__");\
+ \
+ if (ALLOW_RX_FPU_INSNS) \
+ builtin_define ("__RX_FPU_INSNS__"); \
+ \
+ if (TARGET_AS100_SYNTAX) \
+ builtin_define ("__RX_AS100_SYNTAX__"); \
+ else \
+ builtin_define ("__RX_GAS_SYNTAX__"); \
+ \
+ if (TARGET_GCC_ABI) \
+ builtin_define ("__RX_GCC_ABI__"); \
+ else \
+ builtin_define ("__RX_ABI__"); \
+ } \
+ while (0)
+
+#undef CC1_SPEC
+#define CC1_SPEC "\
+ %{mas100-syntax:%{gdwarf*:%e-mas100-syntax is incompatible with -gdwarf}} \
+ %{mcpu=rx100:%{fpu:%erx100 cpu does not have FPU hardware}} \
+ %{mcpu=rx200:%{fpu:%erx200 cpu does not have FPU hardware}}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{pg:gcrt0.o%s}%{!pg:crt0.o%s} crtbegin.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+#undef CPP_SPEC
+#define CPP_SPEC "\
+%{mpid:-D_RX_PID=1} \
+%{mint-register=*:-D_RX_INT_REGISTERS=%*} \
+%{msmall-data-limit*:-D_RX_SMALL_DATA} \
+"
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{mbig-endian-data:-mbig-endian-data} \
+%{m64bit-doubles:-m64bit-doubles} \
+%{!m64bit-doubles:-m32bit-doubles} \
+%{msmall-data-limit*:-msmall-data-limit} \
+%{mrelax:-relax} \
+%{mpid} \
+%{mint-register=*} \
+%{mgcc-abi:-mgcc-abi} %{!mgcc-abi:-mrx-abi} \
+%{mcpu=*} \
+"
+
+#undef LIB_SPEC
+#define LIB_SPEC " \
+--start-group \
+-lc \
+%{msim:-lsim}%{!msim:-lnosys} \
+%{fprofile-arcs|fprofile-generate|coverage:-lgcov} \
+--end-group \
+%{!T*: %{msim:%Trx-sim.ld}%{!msim:%Trx.ld}} \
+"
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian-data:--oformat elf32-rx-be} %{mrelax:-relax}"
+
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN TARGET_BIG_ENDIAN_DATA
+#define WORDS_BIG_ENDIAN TARGET_BIG_ENDIAN_DATA
+
+#define UNITS_PER_WORD 4
+
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE (TARGET_64BIT_DOUBLES ? 64 : 32)
+#define LONG_DOUBLE_TYPE_SIZE DOUBLE_TYPE_SIZE
+
+#ifdef __RX_32BIT_DOUBLES__
+#define LIBGCC2_HAS_DF_MODE 0
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 32
+#else
+#define LIBGCC2_HAS_DF_MODE 1
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+#endif
+
+#define DEFAULT_SIGNED_CHAR 0
+
+/* RX load/store instructions can handle unaligned addresses. */
+#define STRICT_ALIGNMENT 0
+#define FUNCTION_BOUNDARY 8
+#define BIGGEST_ALIGNMENT 32
+#define STACK_BOUNDARY 32
+#define PARM_BOUNDARY 8
+
+#define STACK_GROWS_DOWNWARD 1
+#define FRAME_GROWS_DOWNWARD 0
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#define Pmode SImode
+#define POINTER_SIZE 32
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+#define POINTERS_EXTEND_UNSIGNED 1
+#define FUNCTION_MODE QImode
+#define CASE_VECTOR_MODE Pmode
+#define WORD_REGISTER_OPERATIONS 1
+#define HAS_LONG_COND_BRANCH 0
+#define HAS_LONG_UNCOND_BRANCH 0
+
+#define MOVE_MAX 4
+#define STARTING_FRAME_OFFSET 0
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+#define HAVE_PRE_DECREMENT 1
+#define HAVE_POST_INCREMENT 1
+
+#define MOVE_RATIO(SPEED) ((SPEED) ? 4 : 2)
+#define SLOW_BYTE_ACCESS 1
+
+#define STORE_FLAG_VALUE 1
+#define LOAD_EXTEND_OP(MODE) SIGN_EXTEND
+#define SHORT_IMMEDIATES_SIGN_EXTEND 1
+
+enum reg_class
+{
+ NO_REGS, /* No registers in set. */
+ GR_REGS, /* Integer registers. */
+ ALL_REGS, /* All registers. */
+ LIM_REG_CLASSES /* Max value + 1. */
+};
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "GR_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000 }, /* No registers, */ \
+ { 0x0000ffff }, /* Integer registers. */ \
+ { 0x0000ffff } /* All registers. */ \
+}
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+#define CLASS_MAX_NREGS(CLASS, MODE) ((GET_MODE_SIZE (MODE) \
+ + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+#define GENERAL_REGS GR_REGS
+#define BASE_REG_CLASS GR_REGS
+#define INDEX_REG_CLASS GR_REGS
+
+#define FIRST_PSEUDO_REGISTER 17
+
+#define REGNO_REG_CLASS(REGNO) ((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? GR_REGS : NO_REGS)
+
+#define STACK_POINTER_REGNUM 0
+#define FUNC_RETURN_REGNUM 1
+#define FRAME_POINTER_REGNUM 6
+#define ARG_POINTER_REGNUM 7
+#define STATIC_CHAIN_REGNUM 8
+#define TRAMPOLINE_TEMP_REGNUM 9
+#define STRUCT_VAL_REGNUM 15
+#define CC_REGNUM 16
+
+/* This is the register which will probably be used to hold the address of
+ the start of the small data area, if -msmall-data-limit is being used,
+ or the address of the constant data area if -mpid is being used. If both
+ features are in use then two consecutive registers will be used.
+
+ Note - these registers must not be call_used because otherwise library
+ functions that are compiled without -msmall-data-limit/-mpid support
+ might clobber them.
+
+ Note that the actual values used depends on other options; use
+ rx_gp_base_regnum() and rx_pid_base_regnum() instead. */
+#define GP_BASE_REGNUM 13
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM }, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = rx_initial_elimination_offset ((FROM), (TO))
+
+
+#define FUNCTION_ARG_REGNO_P(N) (((N) >= 1) && ((N) <= 4))
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == FUNC_RETURN_REGNUM)
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#define FIXED_REGISTERS \
+{ \
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 \
+}
+
+#define CALL_USED_REGISTERS \
+{ \
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1 \
+}
+
+#define LIBCALL_VALUE(MODE) \
+ gen_rtx_REG (((GET_MODE_CLASS (MODE) != MODE_INT \
+ || COMPLEX_MODE_P (MODE) \
+ || GET_MODE_SIZE (MODE) >= 4) \
+ ? (MODE) \
+ : SImode), \
+ FUNC_RETURN_REGNUM)
+
+/* Order of allocation of registers. */
+
+#define REG_ALLOC_ORDER \
+{ 7, 10, 11, 12, 13, 14, 4, 3, 2, 1, 9, 8, 6, 5, 15 \
+}
+
+#define REGNO_IN_RANGE(REGNO, MIN, MAX) \
+ (IN_RANGE ((REGNO), (MIN), (MAX)) \
+ || (reg_renumber != NULL \
+ && reg_renumber[(REGNO)] >= (MIN) \
+ && reg_renumber[(REGNO)] <= (MAX)))
+
+#ifdef REG_OK_STRICT
+#define REGNO_OK_FOR_BASE_P(regno) REGNO_IN_RANGE (regno, 0, 15)
+#else
+#define REGNO_OK_FOR_BASE_P(regno) 1
+#endif
+
+#define REGNO_OK_FOR_INDEX_P(regno) REGNO_OK_FOR_BASE_P (regno)
+
+#define RTX_OK_FOR_BASE(X, STRICT) \
+ ((STRICT) ? \
+ ( (REG_P (X) \
+ && REGNO_IN_RANGE (REGNO (X), 0, 15)) \
+ || (GET_CODE (X) == SUBREG \
+ && REG_P (SUBREG_REG (X)) \
+ && REGNO_IN_RANGE (REGNO (SUBREG_REG (X)), 0, 15))) \
+ : \
+ ( (REG_P (X) \
+ || (GET_CODE (X) == SUBREG \
+ && REG_P (SUBREG_REG (X))))))
+
+
+#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) \
+ ((COUNT) == 0 \
+ ? gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, arg_pointer_rtx, GEN_INT (-4))) \
+ : NULL_RTX)
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_MEM (Pmode, stack_pointer_rtx)
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+typedef unsigned int CUMULATIVE_ARGS;
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ (CUM) = 0
+
+
+#define TRAMPOLINE_SIZE (! TARGET_BIG_ENDIAN_DATA ? 14 : 20)
+#define TRAMPOLINE_ALIGNMENT 32
+
+#define NO_PROFILE_COUNTERS 1
+#define PROFILE_BEFORE_PROLOGUE 1
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tbsr\t__mcount\n");
+
+
+#define HARD_REGNO_NREGS(REGNO, MODE) CLASS_MAX_NREGS (0, MODE)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ REGNO_REG_CLASS (REGNO) == GR_REGS
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ( ( GET_MODE_CLASS (MODE1) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE1) == MODE_COMPLEX_FLOAT) \
+ == ( GET_MODE_CLASS (MODE2) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE2) == MODE_COMPLEX_FLOAT))
+
+
+#define REGISTER_NAMES \
+ { \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc" \
+ }
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ { "sp", STACK_POINTER_REGNUM } \
+ , { "fp", FRAME_POINTER_REGNUM } \
+ , { "arg", ARG_POINTER_REGNUM } \
+ , { "chain", STATIC_CHAIN_REGNUM } \
+}
+
+#define DATA_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION D,DATA" \
+ : "\t.section D,\"aw\",@progbits\n\t.p2align 2")
+
+#define SDATA_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION D_2,DATA,ALIGN=2" \
+ : "\t.section D_2,\"aw\",@progbits\n\t.p2align 1")
+
+#undef READONLY_DATA_SECTION_ASM_OP
+#define READONLY_DATA_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION C,ROMDATA,ALIGN=4" \
+ : "\t.section C,\"a\",@progbits\n\t.p2align 2")
+
+#define BSS_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION B,DATA,ALIGN=4" \
+ : "\t.section B,\"w\",@nobits\n\t.p2align 2")
+
+#define SBSS_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION B_2,DATA,ALIGN=2" \
+ : "\t.section B_2,\"w\",@nobits\n\t.p2align 1")
+
+/* The following definitions are conditional depending upon whether the
+ compiler is being built or crtstuff.c is being compiled by the built
+ compiler. */
+#if defined CRT_BEGIN || defined CRT_END
+# ifdef __RX_AS100_SYNTAX
+# define TEXT_SECTION_ASM_OP "\t.SECTION P,CODE"
+# define CTORS_SECTION_ASM_OP "\t.SECTION init_array,CODE"
+# define DTORS_SECTION_ASM_OP "\t.SECTION fini_array,CODE"
+# define INIT_ARRAY_SECTION_ASM_OP "\t.SECTION init_array,CODE"
+# define FINI_ARRAY_SECTION_ASM_OP "\t.SECTION fini_array,CODE"
+# else
+# define TEXT_SECTION_ASM_OP "\t.section P,\"ax\""
+# define CTORS_SECTION_ASM_OP \
+ "\t.section\t.init_array,\"awx\",@init_array"
+# define DTORS_SECTION_ASM_OP \
+ "\t.section\t.fini_array,\"awx\",@fini_array"
+# define INIT_ARRAY_SECTION_ASM_OP \
+ "\t.section\t.init_array,\"awx\",@init_array"
+# define FINI_ARRAY_SECTION_ASM_OP \
+ "\t.section\t.fini_array,\"awx\",@fini_array"
+# endif
+#else
+# define TEXT_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION P,CODE" : "\t.section P,\"ax\"")
+
+# define CTORS_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION init_array,CODE" \
+ : "\t.section\t.init_array,\"awx\",@init_array")
+
+# define DTORS_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION fini_array,CODE" \
+ : "\t.section\t.fini_array,\"awx\",@fini_array")
+
+# define INIT_ARRAY_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION init_array,CODE" \
+ : "\t.section\t.init_array,\"awx\",@init_array")
+
+# define FINI_ARRAY_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION fini_array,CODE" \
+ : "\t.section\t.fini_array,\"awx\",@fini_array")
+#endif
+
+#define GLOBAL_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.GLB\t" : "\t.global\t")
+#define ASM_COMMENT_START " ;"
+#define ASM_APP_ON ""
+#define ASM_APP_OFF ""
+#define LOCAL_LABEL_PREFIX "L"
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+/* Compute the alignment needed for label X in various situations.
+ If the user has specified an alignment then honour that, otherwise
+ use rx_align_for_label. */
+#define JUMP_ALIGN(x) (align_jumps ? align_jumps : rx_align_for_label (x, 0))
+#define LABEL_ALIGN(x) (align_labels ? align_labels : rx_align_for_label (x, 3))
+#define LOOP_ALIGN(x) (align_loops ? align_loops : rx_align_for_label (x, 2))
+#define LABEL_ALIGN_AFTER_BARRIER(x) rx_align_for_label (x, 0)
+
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(STREAM, LOG, MAX_SKIP) \
+ do \
+ { \
+ if ((LOG) == 0 || (MAX_SKIP) == 0) \
+ break; \
+ if (TARGET_AS100_SYNTAX) \
+ { \
+ if ((LOG) >= 2) \
+ fprintf (STREAM, "\t.ALIGN 4\t; %d alignment actually requested\n", 1 << (LOG)); \
+ else \
+ fprintf (STREAM, "\t.ALIGN 2\n"); \
+ } \
+ else \
+ fprintf (STREAM, "\t.balign %d,3,%d\n", 1 << (LOG), (MAX_SKIP)); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_ALIGN(STREAM, LOG) \
+ do \
+ { \
+ if ((LOG) == 0) \
+ break; \
+ if (TARGET_AS100_SYNTAX) \
+ { \
+ if ((LOG) >= 2) \
+ fprintf (STREAM, "\t.ALIGN 4\t; %d alignment actually requested\n", 1 << (LOG)); \
+ else \
+ fprintf (STREAM, "\t.ALIGN 2\n"); \
+ } \
+ else \
+ fprintf (STREAM, "\t.balign %d\n", 1 << (LOG)); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, TARGET_AS100_SYNTAX ? "\t.LWORD L%d\n" : "\t.long .L%d\n", \
+ VALUE)
+
+/* This is how to output an element of a case-vector that is relative.
+ Note: The local label referenced by the "1b" below is emitted by
+ the tablejump insn. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, TARGET_AS100_SYNTAX \
+ ? "\t.LWORD L%d - ?-\n" : "\t.long .L%d - 1b\n", VALUE)
+
+#define CASE_VECTOR_PC_RELATIVE (TARGET_PID)
+
+#define ASM_OUTPUT_SIZE_DIRECTIVE(STREAM, NAME, SIZE) \
+ do \
+ { \
+ HOST_WIDE_INT size_ = (SIZE); \
+ \
+ /* The as100 assembler does not have an equivalent of the SVR4 \
+ .size pseudo-op. */ \
+ if (TARGET_AS100_SYNTAX) \
+ break; \
+ \
+ fputs (SIZE_ASM_OP, STREAM); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, ", " HOST_WIDE_INT_PRINT_DEC "\n", size_); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_MEASURED_SIZE(STREAM, NAME) \
+ do \
+ { \
+ /* The as100 assembler does not have an equivalent of the SVR4 \
+ .size pseudo-op. */ \
+ if (TARGET_AS100_SYNTAX) \
+ break; \
+ fputs (SIZE_ASM_OP, STREAM); \
+ assemble_name (STREAM, NAME); \
+ fputs (", .-", STREAM); \
+ assemble_name (STREAM, NAME); \
+ putc ('\n', STREAM); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_TYPE_DIRECTIVE(STREAM, NAME, TYPE) \
+ do \
+ { \
+ /* The as100 assembler does not have an equivalent of the SVR4 \
+ .size pseudo-op. */ \
+ if (TARGET_AS100_SYNTAX) \
+ break; \
+ fputs (TYPE_ASM_OP, STREAM); \
+ assemble_name (STREAM, NAME); \
+ fputs (", ", STREAM); \
+ fprintf (STREAM, TYPE_OPERAND_FMT, TYPE); \
+ putc ('\n', STREAM); \
+ } \
+ while (0)
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \
+ do \
+ { \
+ sprintf (LABEL, TARGET_AS100_SYNTAX ? "*%s%u" : "*.%s%u", \
+ PREFIX, (unsigned) (NUM)); \
+ } \
+ while (0)
+
+#undef ASM_OUTPUT_EXTERNAL
+#define ASM_OUTPUT_EXTERNAL(FILE, DECL, NAME) \
+ do \
+ { \
+ if (TARGET_AS100_SYNTAX) \
+ targetm.asm_out.globalize_label (FILE, NAME); \
+ default_elf_asm_output_external (FILE, DECL, NAME); \
+ } \
+ while (0)
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if (TARGET_AS100_SYNTAX) \
+ { \
+ fprintf ((FILE), "\t.GLB\t"); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "\n"); \
+ assemble_name ((FILE), (NAME)); \
+ switch ((ALIGN) / BITS_PER_UNIT) \
+ { \
+ case 4: \
+ fprintf ((FILE), ":\t.BLKL\t"HOST_WIDE_INT_PRINT_UNSIGNED"\n",\
+ (SIZE) / 4); \
+ break; \
+ case 2: \
+ fprintf ((FILE), ":\t.BLKW\t"HOST_WIDE_INT_PRINT_UNSIGNED"\n",\
+ (SIZE) / 2); \
+ break; \
+ default: \
+ fprintf ((FILE), ":\t.BLKB\t"HOST_WIDE_INT_PRINT_UNSIGNED"\n",\
+ (SIZE)); \
+ break; \
+ } \
+ } \
+ else \
+ { \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \
+ (SIZE), (ALIGN) / BITS_PER_UNIT); \
+ } \
+ } \
+ while (0)
+
+#undef SKIP_ASM_OP
+#define SKIP_ASM_OP (TARGET_AS100_SYNTAX ? "\t.BLKB\t" : "\t.zero\t")
+
+#undef ASM_OUTPUT_LIMITED_STRING
+#define ASM_OUTPUT_LIMITED_STRING(FILE, STR) \
+ do \
+ { \
+ const unsigned char *_limited_str = \
+ (const unsigned char *) (STR); \
+ unsigned ch; \
+ \
+ fprintf ((FILE), TARGET_AS100_SYNTAX \
+ ? "\t.BYTE\t\"" : "\t.string\t\""); \
+ \
+ for (; (ch = *_limited_str); _limited_str++) \
+ { \
+ int escape; \
+ \
+ switch (escape = ESCAPES[ch]) \
+ { \
+ case 0: \
+ putc (ch, (FILE)); \
+ break; \
+ case 1: \
+ fprintf ((FILE), "\\%03o", ch); \
+ break; \
+ default: \
+ putc ('\\', (FILE)); \
+ putc (escape, (FILE)); \
+ break; \
+ } \
+ } \
+ \
+ fprintf ((FILE), TARGET_AS100_SYNTAX ? "\"\n\t.BYTE\t0\n" : "\"\n");\
+ } \
+ while (0)
+
+/* For PIC put jump tables into the text section so that the offsets that
+ they contain are always computed between two same-section symbols. */
+#define JUMP_TABLES_IN_TEXT_SECTION (TARGET_PID || flag_pic)
+
+/* This is a version of REG_P that also returns TRUE for SUBREGs. */
+#define RX_REG_P(rtl) (REG_P (rtl) || GET_CODE (rtl) == SUBREG)
+
+/* Like REG_P except that this macro is true for SET expressions. */
+#define SET_P(rtl) (GET_CODE (rtl) == SET)
+
+/* The AS100 assembler does not support .leb128 and .uleb128, but
+ the compiler-build-time configure tests will have enabled their
+ use because GAS supports them. So default to generating STABS
+ debug information instead of DWARF2 when generating AS100
+ compatible output. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE (TARGET_AS100_SYNTAX \
+ ? DBX_DEBUG : DWARF2_DEBUG)
+
+#define INCOMING_FRAME_SP_OFFSET 4
+#define ARG_POINTER_CFA_OFFSET(FNDECL) 4
+#define FRAME_POINTER_CFA_OFFSET(FNDECL) 4
+
+#define TARGET_USE_FPU (! TARGET_NO_USE_FPU)
+
+/* This macro is used to decide when RX FPU instructions can be used. */
+#define ALLOW_RX_FPU_INSNS (TARGET_USE_FPU)
+
+#define BRANCH_COST(SPEED,PREDICT) 1
+#define REGISTER_MOVE_COST(MODE,FROM,TO) 2
+
+#define SELECT_CC_MODE(OP,X,Y) rx_select_cc_mode(OP, X, Y)
+
+#define ADJUST_INSN_LENGTH(INSN,LENGTH) \
+ do \
+ { \
+ (LENGTH) = rx_adjust_insn_length ((INSN), (LENGTH)); \
+ } \
+ while (0)
diff --git a/gcc-4.9/gcc/config/rx/rx.md b/gcc-4.9/gcc/config/rx/rx.md
new file mode 100644
index 000000000..ecdfc15b5
--- /dev/null
+++ b/gcc-4.9/gcc/config/rx/rx.md
@@ -0,0 +1,2641 @@
+;; Machine Description for Renesas RX processors
+;; Copyright (C) 2008-2014 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; This code iterator is used for sign- and zero- extensions.
+(define_mode_iterator small_int_modes [(HI "") (QI "")])
+
+;; This code iterator is used for max and min operations.
+(define_mode_iterator int_modes [(SI "") (HI "") (QI "")])
+
+;; We do not handle DFmode here because it is either
+;; the same as SFmode, or if -m64bit-doubles is active
+;; then all operations on doubles have to be handled by
+;; library functions.
+(define_mode_iterator register_modes
+ [(SF "") (SI "") (HI "") (QI "")])
+
+(define_constants
+ [
+ (SP_REG 0)
+ (CC_REG 16)
+
+ (UNSPEC_LOW_REG 0)
+ (UNSPEC_HIGH_REG 1)
+
+ (UNSPEC_RTE 10)
+ (UNSPEC_RTFI 11)
+ (UNSPEC_NAKED 12)
+ (UNSPEC_CONST 13)
+
+ (UNSPEC_MOVSTR 20)
+ (UNSPEC_MOVMEM 21)
+ (UNSPEC_SETMEM 22)
+ (UNSPEC_STRLEN 23)
+ (UNSPEC_CMPSTRN 24)
+
+ (UNSPEC_BUILTIN_BRK 30)
+ (UNSPEC_BUILTIN_CLRPSW 31)
+ (UNSPEC_BUILTIN_INT 32)
+ (UNSPEC_BUILTIN_MACHI 33)
+ (UNSPEC_BUILTIN_MACLO 34)
+ (UNSPEC_BUILTIN_MULHI 35)
+ (UNSPEC_BUILTIN_MULLO 36)
+ (UNSPEC_BUILTIN_MVFACHI 37)
+ (UNSPEC_BUILTIN_MVFACMI 38)
+ (UNSPEC_BUILTIN_MVFC 39)
+ (UNSPEC_BUILTIN_MVFCP 40)
+ (UNSPEC_BUILTIN_MVTACHI 41)
+ (UNSPEC_BUILTIN_MVTACLO 42)
+ (UNSPEC_BUILTIN_MVTC 43)
+ (UNSPEC_BUILTIN_MVTIPL 44)
+ (UNSPEC_BUILTIN_RACW 45)
+ (UNSPEC_BUILTIN_REVW 46)
+ (UNSPEC_BUILTIN_RMPA 47)
+ (UNSPEC_BUILTIN_ROUND 48)
+ (UNSPEC_BUILTIN_SAT 49)
+ (UNSPEC_BUILTIN_SETPSW 50)
+ (UNSPEC_BUILTIN_WAIT 51)
+
+ (UNSPEC_PID_ADDR 52)
+ ]
+)
+
+(define_attr "length" "" (const_int 8))
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; Pipeline description.
+
+;; The RX only has a single pipeline. It has five stages (fetch,
+;; decode, execute, memory access, writeback) each of which normally
+;; takes a single CPU clock cycle.
+
+;; The timings attribute consists of two numbers, the first is the
+;; throughput, which is the number of cycles the instruction takes
+;; to execute and generate a result. The second is the latency
+;; which is the effective number of cycles the instruction takes to
+;; execute if its result is used by the following instruction. The
+;; latency is always greater than or equal to the throughput.
+;; These values were taken from tables 2.13 and 2.14 in section 2.8
+;; of the RX610 Group Hardware Manual v0.11
+
+;; Note - it would be nice to use strings rather than integers for
+;; the possible values of this attribute, so that we can have the
+;; gcc build mechanism check for values that are not supported by
+;; the reservations below. But this will not work because the code
+;; in rx_adjust_sched_cost() needs integers not strings.
+
+(define_attr "timings" "" (const_int 11))
+
+(define_automaton "pipelining")
+(define_cpu_unit "throughput" "pipelining")
+
+(define_insn_reservation "throughput__1_latency__1" 1
+ (eq_attr "timings" "11") "throughput")
+(define_insn_reservation "throughput__1_latency__2" 2
+ (eq_attr "timings" "12") "throughput,nothing")
+(define_insn_reservation "throughput__2_latency__2" 1
+ (eq_attr "timings" "22") "throughput*2")
+(define_insn_reservation "throughput__3_latency__3" 1
+ (eq_attr "timings" "33") "throughput*3")
+(define_insn_reservation "throughput__3_latency__4" 2
+ (eq_attr "timings" "34") "throughput*3,nothing")
+(define_insn_reservation "throughput__4_latency__4" 1
+ (eq_attr "timings" "44") "throughput*4")
+(define_insn_reservation "throughput__4_latency__5" 2
+ (eq_attr "timings" "45") "throughput*4,nothing")
+(define_insn_reservation "throughput__5_latency__5" 1
+ (eq_attr "timings" "55") "throughput*5")
+(define_insn_reservation "throughput__5_latency__6" 2
+ (eq_attr "timings" "56") "throughput*5,nothing")
+(define_insn_reservation "throughput__6_latency__6" 1
+ (eq_attr "timings" "66") "throughput*6")
+(define_insn_reservation "throughput_10_latency_10" 1
+ (eq_attr "timings" "1010") "throughput*10")
+(define_insn_reservation "throughput_11_latency_11" 1
+ (eq_attr "timings" "1111") "throughput*11")
+(define_insn_reservation "throughput_16_latency_16" 1
+ (eq_attr "timings" "1616") "throughput*16")
+(define_insn_reservation "throughput_18_latency_18" 1
+ (eq_attr "timings" "1818") "throughput*18")
+
+;; ----------------------------------------------------------------------------
+
+;; Comparisons
+
+;; Note - we do not specify the two instructions necessary to perform
+;; a compare-and-branch in the cbranchsi4 pattern because that would
+;; allow the comparison to be moved away from the jump before the reload
+;; pass has completed. That would be problematical because reload can
+;; generate ADDSI3 instructions which would corrupt the PSW flags.
+
+(define_expand "cbranchsi4"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "comparison_operator"
+ [(match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "rx_source_operand")])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+)
+
+(define_insn_and_split "*cbranchsi4"
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "comparison_operator"
+ [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "rx_source_operand" "riQ")])
+ (match_operand 2 "label_ref_operand" "")
+ (pc)))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ rx_split_cbranch (CCmode, GET_CODE (operands[3]),
+ operands[0], operands[1], operands[2]);
+ DONE;
+})
+
+(define_insn "*cmpsi"
+ [(set (reg:CC CC_REG)
+ (compare:CC (match_operand:SI 0 "register_operand" "r,r,r,r,r,r,r")
+ (match_operand:SI 1 "rx_source_operand" "r,Uint04,Int08,Sint16,Sint24,i,Q")))]
+ "reload_completed"
+ "cmp\t%Q1, %0"
+ [(set_attr "timings" "11,11,11,11,11,11,33")
+ (set_attr "length" "2,2,3,4,5,6,5")]
+)
+
+;; Canonical method for representing TST.
+(define_insn_and_split "*cbranchsi4_tst"
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "rx_zs_comparison_operator"
+ [(and:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "rx_source_operand" "riQ"))
+ (const_int 0)])
+ (match_operand 2 "label_ref_operand" "")
+ (pc)))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ rx_split_cbranch (CC_ZSmode, GET_CODE (operands[3]),
+ XEXP (operands[3], 0), XEXP (operands[3], 1),
+ operands[2]);
+ DONE;
+})
+
+;; Various other ways that GCC codes "var & const"
+(define_insn_and_split "*cbranchsi4_tst_ext"
+ [(set (pc)
+ (if_then_else
+ (match_operator 4 "rx_z_comparison_operator"
+ [(zero_extract:SI
+ (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "rx_constshift_operand" "")
+ (match_operand:SI 2 "rx_constshift_operand" ""))
+ (const_int 0)])
+ (match_operand 3 "label_ref_operand" "")
+ (pc)))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ HOST_WIDE_INT mask;
+ rtx x;
+
+ mask = 1;
+ mask <<= INTVAL (operands[1]);
+ mask -= 1;
+ mask <<= INTVAL (operands[2]);
+ x = gen_rtx_AND (SImode, operands[0], gen_int_mode (mask, SImode));
+
+ rx_split_cbranch (CC_ZSmode, GET_CODE (operands[4]),
+ x, const0_rtx, operands[3]);
+ DONE;
+})
+
+(define_insn "*tstsi"
+ [(set (reg:CC_ZS CC_REG)
+ (compare:CC_ZS
+ (and:SI (match_operand:SI 0 "register_operand" "r,r,r")
+ (match_operand:SI 1 "rx_source_operand" "r,i,Q"))
+ (const_int 0)))]
+ "reload_completed"
+ "tst\t%Q1, %0"
+ [(set_attr "timings" "11,11,33")
+ (set_attr "length" "3,7,6")]
+)
+
+(define_expand "cbranchsf4"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "rx_fp_comparison_operator"
+ [(match_operand:SF 1 "register_operand")
+ (match_operand:SF 2 "rx_source_operand")])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ "ALLOW_RX_FPU_INSNS"
+)
+
+(define_insn_and_split "*cbranchsf4"
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "rx_fp_comparison_operator"
+ [(match_operand:SF 0 "register_operand" "r")
+ (match_operand:SF 1 "rx_source_operand" "rFQ")])
+ (match_operand 2 "label_ref_operand" "")
+ (pc)))]
+ "ALLOW_RX_FPU_INSNS"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rx_split_cbranch (CC_Fmode, GET_CODE (operands[3]),
+ operands[0], operands[1], operands[2]);
+ DONE;
+})
+
+(define_insn "*cmpsf"
+ [(set (reg:CC_F CC_REG)
+ (compare:CC_F
+ (match_operand:SF 0 "register_operand" "r,r,r")
+ (match_operand:SF 1 "rx_source_operand" "r,F,Q")))]
+ "ALLOW_RX_FPU_INSNS && reload_completed"
+ "fcmp\t%1, %0"
+ [(set_attr "timings" "11,11,33")
+ (set_attr "length" "3,7,5")]
+)
+
+;; Flow Control Instructions:
+
+(define_insn "*conditional_branch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "comparison_operator"
+ [(reg CC_REG) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "reload_completed"
+ "b%B1\t%0"
+ [(set_attr "length" "8") ;; This length is wrong, but it is
+ ;; too hard to compute statically.
+ (set_attr "timings" "33")] ;; The timing assumes that the branch is taken.
+)
+
+;; ----------------------------------------------------------------------------
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "bra\t%0"
+ [(set_attr "length" "4")
+ (set_attr "timings" "33")]
+)
+
+(define_insn "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "jmp\t%0"
+ [(set_attr "length" "2")
+ (set_attr "timings" "33")]
+)
+
+(define_insn "tablejump"
+ [(set (pc)
+ (match_operand:SI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ { return TARGET_PID ? (TARGET_AS100_SYNTAX ? "\n?:\tbra\t%0"
+ : "\n1:\tbra\t%0")
+ : "\n1:jmp\t%0";
+ }
+ [(set_attr "timings" "33")
+ (set_attr "length" "2")]
+)
+
+(define_expand "return"
+ [(return)]
+ "rx_can_use_simple_return ()"
+ "rx_expand_epilogue (false); DONE;"
+)
+
+(define_insn "simple_return"
+ [(simple_return)]
+ ""
+ "rts"
+ [(set_attr "length" "1")
+ (set_attr "timings" "55")]
+)
+
+;; Unspec used so that the constant will not be invalid
+;; if -mmax-constant-size has been specified.
+(define_insn "deallocate_and_return"
+ [(set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (const:SI (unspec:SI [(match_operand 0 "const_int_operand" "n")] UNSPEC_CONST))))
+ (return)]
+ ""
+ "rtsd\t%0"
+ [(set_attr "length" "2")
+ (set_attr "timings" "55")]
+)
+
+(define_insn "pop_and_return"
+ [(match_parallel 1 "rx_rtsd_vector"
+ [(set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 0 "const_int_operand" "n")))])
+ (return)]
+ "reload_completed"
+ {
+ rx_emit_stack_popm (operands, false);
+ return "";
+ }
+ [(set_attr "length" "3")
+ (set_attr "timings" "56")]
+)
+
+(define_insn "fast_interrupt_return"
+ [(unspec_volatile [(return)] UNSPEC_RTFI) ]
+ ""
+ "rtfi"
+ [(set_attr "length" "2")
+ (set_attr "timings" "33")]
+)
+
+(define_insn "exception_return"
+ [(unspec_volatile [(return)] UNSPEC_RTE) ]
+ ""
+ "rte"
+ [(set_attr "length" "2")
+ (set_attr "timings" "66")]
+)
+
+(define_insn "naked_return"
+ [(unspec_volatile [(return)] UNSPEC_NAKED) ]
+ ""
+ "; Naked function: epilogue provided by programmer."
+)
+
+
+;; Note - the following set of patterns do not use the "memory_operand"
+;; predicate or an "m" constraint because we do not allow symbol_refs
+;; or label_refs as legitimate memory addresses. This matches the
+;; behaviour of most of the RX instructions. Only the call/branch
+;; instructions are allowed to refer to symbols/labels directly.
+;; The call operands are in QImode because that is the value of
+;; FUNCTION_MODE
+
+(define_expand "call"
+ [(call (match_operand:QI 0 "general_operand")
+ (match_operand:SI 1 "general_operand"))]
+ ""
+ {
+ rtx dest = XEXP (operands[0], 0);
+
+ if (! rx_call_operand (dest, Pmode))
+ dest = force_reg (Pmode, dest);
+ emit_call_insn (gen_call_internal (dest));
+ DONE;
+ }
+)
+
+(define_insn "call_internal"
+ [(call (mem:QI (match_operand:SI 0 "rx_call_operand" "r,Symbol"))
+ (const_int 0))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ jsr\t%0
+ bsr\t%A0"
+ [(set_attr "length" "2,4")
+ (set_attr "timings" "33")]
+)
+
+(define_expand "call_value"
+ [(set (match_operand 0 "register_operand")
+ (call (match_operand:QI 1 "general_operand")
+ (match_operand:SI 2 "general_operand")))]
+ ""
+ {
+ rtx dest = XEXP (operands[1], 0);
+
+ if (! rx_call_operand (dest, Pmode))
+ dest = force_reg (Pmode, dest);
+ emit_call_insn (gen_call_value_internal (operands[0], dest));
+ DONE;
+ }
+)
+
+(define_insn "call_value_internal"
+ [(set (match_operand 0 "register_operand" "=r,r")
+ (call (mem:QI (match_operand:SI 1 "rx_call_operand" "r,Symbol"))
+ (const_int 0)))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ jsr\t%1
+ bsr\t%A1"
+ [(set_attr "length" "2,4")
+ (set_attr "timings" "33")]
+)
+
+;; Note - we do not allow indirect sibcalls (with the address
+;; held in a register) because we cannot guarantee that the register
+;; chosen will be a call-used one. If it is a call-saved register,
+;; then the epilogue code will corrupt it by popping the saved value
+;; off of the stack.
+(define_expand "sibcall"
+ [(parallel
+ [(call (mem:QI (match_operand:SI 0 "rx_symbolic_call_operand"))
+ (match_operand:SI 1 "general_operand"))
+ (return)])]
+ ""
+ {
+ if (MEM_P (operands[0]))
+ operands[0] = XEXP (operands[0], 0);
+ emit_call_insn (gen_sibcall_internal (operands[0]));
+ DONE;
+ }
+)
+
+(define_insn "sibcall_internal"
+ [(call (mem:QI (match_operand:SI 0 "rx_symbolic_call_operand" "Symbol"))
+ (const_int 0))
+ (return)]
+ ""
+ "bra\t%A0"
+ [(set_attr "length" "4")
+ (set_attr "timings" "33")]
+)
+
+(define_expand "sibcall_value"
+ [(parallel
+ [(set (match_operand 0 "register_operand")
+ (call (mem:QI (match_operand:SI 1 "rx_symbolic_call_operand"))
+ (match_operand:SI 2 "general_operand")))
+ (return)])]
+ ""
+ {
+ if (MEM_P (operands[1]))
+ operands[1] = XEXP (operands[1], 0);
+ emit_call_insn (gen_sibcall_value_internal (operands[0], operands[1]));
+ DONE;
+ }
+)
+
+(define_insn "sibcall_value_internal"
+ [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:QI (match_operand:SI 1 "rx_symbolic_call_operand" "Symbol"))
+ (const_int 0)))
+ (return)]
+ ""
+ "bra\t%A1"
+ [(set_attr "length" "4")
+ (set_attr "timings" "33")]
+)
+
+;; Function Prologue/Epilogue Instructions
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "rx_expand_prologue (); DONE;"
+)
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+ "rx_expand_epilogue (false); DONE;"
+)
+
+(define_expand "sibcall_epilogue"
+ [(return)]
+ ""
+ "rx_expand_epilogue (true); DONE;"
+)
+
+;; Move Instructions
+
+;; Note - we do not allow memory to memory moves, even though the ISA
+;; supports them. The reason is that the conditions on such moves are
+;; too restrictive, specifically the source addressing mode is limited
+;; by the destination addressing mode and vice versa. (For example it
+;; is not possible to use indexed register indirect addressing for one
+;; of the operands if the other operand is anything other than a register,
+;; but it is possible to use register relative addressing when the other
+;; operand also uses register relative or register indirect addressing).
+;;
+;; GCC does not support computing legitimate addresses based on the
+;; nature of other operands involved in the instruction, and reload is
+;; not smart enough to cope with a whole variety of different memory
+;; addressing constraints, so it is simpler and safer to just refuse
+;; to support memory to memory moves.
+
+(define_expand "mov<register_modes:mode>"
+ [(set (match_operand:register_modes 0 "general_operand")
+ (match_operand:register_modes 1 "general_operand"))]
+ ""
+ {
+ if (MEM_P (operands[0]) && MEM_P (operands[1]))
+ operands[1] = copy_to_mode_reg (<register_modes:MODE>mode, operands[1]);
+ operands[0] = rx_maybe_pidify_operand (operands[0], 0);
+ operands[1] = rx_maybe_pidify_operand (operands[1], 0);
+ if (GET_CODE (operands[0]) != REG
+ && GET_CODE (operands[1]) == PLUS)
+ operands[1] = copy_to_mode_reg (<register_modes:MODE>mode, operands[1]);
+ if (GET_CODE (operands[1]) == PLUS && GET_MODE (operands[1]) == SImode)
+ {
+ emit_insn (gen_addsi3 (operands[0], XEXP (operands[1], 0), XEXP (operands[1], 1)));
+ DONE;
+ }
+ if (CONST_INT_P (operand1)
+ && ! rx_is_legitimate_constant (<register_modes:MODE>mode, operand1))
+ FAIL;
+ }
+)
+
+(define_insn "*mov<register_modes:mode>_internal"
+ [(set (match_operand:register_modes
+ 0 "nonimmediate_operand" "=r,r,r,r,r,r,m,Q,Q,Q,Q,r")
+ (match_operand:register_modes
+ 1 "general_operand" "Int08,Sint16,Sint24,i,r,m,r,Int08,Sint16,Sint24,i,RpdaRpid"))]
+ ""
+ { return rx_gen_move_template (operands, false); }
+ [(set_attr "length" "3,4,5,6,2,4,6,5,6,7,8,8")
+ (set_attr "timings" "11,11,11,11,11,12,11,11,11,11,11,11")]
+)
+
+(define_insn "extend<small_int_modes:mode>si2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (sign_extend:SI (match_operand:small_int_modes
+ 1 "nonimmediate_operand" "r,m")))]
+ ""
+ { return rx_gen_move_template (operands, false); }
+ [(set_attr "length" "2,6")
+ (set_attr "timings" "11,12")]
+)
+
+(define_insn "zero_extend<small_int_modes:mode>si2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:small_int_modes
+ 1 "nonimmediate_operand" "r,m")))]
+ ""
+ { return rx_gen_move_template (operands, true); }
+ [(set_attr "length" "2,4")
+ (set_attr "timings" "11,12")]
+)
+
+(define_insn "stack_push"
+ [(set (reg:SI SP_REG)
+ (minus:SI (reg:SI SP_REG)
+ (const_int 4)))
+ (set (mem:SI (reg:SI SP_REG))
+ (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "push.l\t%0"
+ [(set_attr "length" "2")]
+)
+
+(define_insn "stack_pushm"
+ [(match_parallel 1 "rx_store_multiple_vector"
+ [(set (reg:SI SP_REG)
+ (minus:SI (reg:SI SP_REG)
+ (match_operand:SI 0 "const_int_operand" "n")))])]
+ "reload_completed"
+ {
+ rx_emit_stack_pushm (operands);
+ return "";
+ }
+ [(set_attr "length" "2")
+ (set_attr "timings" "44")] ;; The timing is a guesstimate average timing.
+)
+
+(define_insn "stack_pop"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (const_int 4)))]
+ ""
+ "pop\t%0"
+ [(set_attr "length" "2")
+ (set_attr "timings" "12")]
+)
+
+(define_insn "stack_popm"
+ [(match_parallel 1 "rx_load_multiple_vector"
+ [(set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 0 "const_int_operand" "n")))])]
+ "reload_completed"
+ {
+ rx_emit_stack_popm (operands, true);
+ return "";
+ }
+ [(set_attr "length" "2")
+ (set_attr "timings" "45")] ;; The timing is a guesstimate average timing.
+)
+
+(define_insn_and_split "cstoresi4"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 1 "comparison_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (match_operand:SI 3 "rx_source_operand" "riQ")]))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ rtx flags, x;
+
+ flags = gen_rtx_REG (CCmode, CC_REG);
+ x = gen_rtx_COMPARE (CCmode, operands[2], operands[3]);
+ x = gen_rtx_SET (VOIDmode, flags, x);
+ emit_insn (x);
+
+ x = gen_rtx_fmt_ee (GET_CODE (operands[1]), SImode, flags, const0_rtx);
+ x = gen_rtx_SET (VOIDmode, operands[0], x);
+ emit_insn (x);
+ DONE;
+})
+
+(define_insn "*sccc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 1 "comparison_operator"
+ [(reg CC_REG) (const_int 0)]))]
+ "reload_completed"
+ "sc%B1.L\t%0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn_and_split "cstoresf4"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 1 "rx_fp_comparison_operator"
+ [(match_operand:SF 2 "register_operand" "r")
+ (match_operand:SF 3 "rx_source_operand" "rFQ")]))]
+ "ALLOW_RX_FPU_INSNS"
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ rtx flags, x;
+
+ flags = gen_rtx_REG (CC_Fmode, CC_REG);
+ x = gen_rtx_COMPARE (CC_Fmode, operands[2], operands[3]);
+ x = gen_rtx_SET (VOIDmode, flags, x);
+ emit_insn (x);
+
+ x = gen_rtx_fmt_ee (GET_CODE (operands[1]), SImode, flags, const0_rtx);
+ x = gen_rtx_SET (VOIDmode, operands[0], x);
+ emit_insn (x);
+ DONE;
+})
+
+(define_expand "movsicc"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand")
+ (if_then_else:SI (match_operand:SI 1 "comparison_operator")
+ (match_operand:SI 2 "nonmemory_operand")
+ (match_operand:SI 3 "nonmemory_operand")))
+ (clobber (reg:CC CC_REG))])]
+ ""
+{
+ /* One operand must be a constant or a register, the other must be a register. */
+ if ( ! CONSTANT_P (operands[2])
+ && ! CONSTANT_P (operands[3])
+ && ! (REG_P (operands[2]) && REG_P (operands[3])))
+ FAIL;
+})
+
+(define_insn_and_split "*movsicc"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "register_operand" "r,r,r")
+ (match_operand:SI 4 "rx_source_operand" "riQ,riQ,riQ")])
+ (match_operand:SI 1 "nonmemory_operand" "i,ri,r")
+ (match_operand:SI 2 "nonmemory_operand" "ri,i,r")))
+ (clobber (reg:CC CC_REG))]
+ "(CONSTANT_P (operands[1]) || CONSTANT_P (operands[2]))
+ || (REG_P (operands[1]) && REG_P (operands[2]))"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rtx x, flags, op0, op1, op2;
+ enum rtx_code cmp_code;
+
+ flags = gen_rtx_REG (CCmode, CC_REG);
+ x = gen_rtx_COMPARE (CCmode, operands[3], operands[4]);
+ emit_insn (gen_rtx_SET (VOIDmode, flags, x));
+
+ cmp_code = GET_CODE (operands[5]);
+ op0 = operands[0];
+ op1 = operands[1];
+ op2 = operands[2];
+
+ /* If OP2 is the constant, reverse the sense of the move.
+ Likewise if both operands are registers but OP1 == OP0. */
+ if ((! CONSTANT_P (operands[1]) && CONSTANT_P (operands[2]))
+ || (REG_P (operands[1]) && REG_P (operands[2])
+ && rtx_equal_p (op0, op1)))
+ {
+ x = op1, op1 = op2, op2 = x;
+ cmp_code = reverse_condition (cmp_code);
+ }
+
+ /* If OP2 does not match the output, copy it into place. We have allowed
+ these alternatives so that the destination can legitimately be one of
+ the comparison operands without increasing register pressure. */
+ if (! rtx_equal_p (op0, op2))
+ emit_move_insn (op0, op2);
+
+ x = gen_rtx_fmt_ee (cmp_code, VOIDmode, flags, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (SImode, x, op1, op0);
+ emit_insn (gen_rtx_SET (VOIDmode, op0, x));
+ DONE;
+})
+
+(define_insn "*stcc"
+ [(set (match_operand:SI 0 "register_operand" "+r,r,r,r")
+ (if_then_else:SI
+ (match_operator 2 "rx_z_comparison_operator"
+ [(reg CC_REG) (const_int 0)])
+ (match_operand:SI 1 "immediate_operand" "Sint08,Sint16,Sint24,i")
+ (match_dup 0)))]
+ "reload_completed
+ && ((GET_CODE (operands[2]) == EQ) || (GET_CODE (operands[2]) == NE))"
+ {
+ if (GET_CODE (operands[2]) == EQ)
+ return "stz\t%1, %0";
+ else
+ return "stnz\t%1, %0";
+ }
+ [(set_attr "length" "4,5,6,7")]
+)
+
+(define_insn "*stcc_reg"
+ [(set (match_operand:SI 0 "register_operand" "+r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 2 "comparison_operator"
+ [(reg CC_REG) (const_int 0)])
+ (match_operand:SI 1 "nonmemory_operand"
+ "r,Uint04,Sint08,Sint16,Sint24,i")
+ (match_dup 0)))]
+ "reload_completed"
+ {
+ PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
+ return "b%B2 1f\n\tmov %1, %0\n1:";
+ }
+ [(set_attr "length" "3,3,4,5,6,7")]
+)
+
+;; Arithmetic Instructions
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (abs:SI (match_operand:SI 1 "register_operand" "0,r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ abs\t%0
+ abs\t%1, %0"
+ [(set_attr "length" "2,3")]
+)
+
+(define_insn "*abssi2_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (abs:SI (match_operand:SI 1 "register_operand" "0,r")))
+ (set (reg CC_REG)
+ (compare (abs:SI (match_dup 1))
+ (const_int 0)))]
+ ;; Note - although the ABS instruction does set the O bit in the processor
+ ;; status word, it does not do so in a way that is comparable with the CMP
+ ;; instruction. Hence we use CC_ZSmode rather than CC_ZSOmode.
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "@
+ abs\t%0
+ abs\t%1, %0"
+ [(set_attr "length" "2,3")]
+)
+
+(define_expand "addsi3"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "rx_source_operand" "")))
+ (clobber (reg:CC CC_REG))])]
+ ""
+ "
+ operands[0] = rx_maybe_pidify_operand (operands[0], 1);
+ operands[1] = rx_maybe_pidify_operand (operands[1], 1);
+ operands[2] = rx_maybe_pidify_operand (operands[2], 1);
+ "
+)
+
+(define_insn "addsi3_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r,r,r,r,r,r,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,0,r,r,r,r,r,r,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Uint04,NEGint4,Sint08,Sint16,Sint24,i,0,r,Sint08,Sint16,Sint24,i,Q")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ add\t%2, %0
+ add\t%2, %0
+ sub\t%N2, %0
+ add\t%2, %0
+ add\t%2, %0
+ add\t%2, %0
+ add\t%2, %0
+ add\t%1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%Q2, %0"
+ [(set_attr "timings" "11,11,11,11,11,11,11,11,11,11,11,11,11,33")
+ (set_attr "length" "2,2,2,3,4,5,6,2,3,3,4,5,6,5")]
+)
+
+(define_insn "*addsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r,r,r,r,r,r,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,0,r,r,r,r,r,r,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Uint04,NEGint4,Sint08,Sint16,Sint24,i,0,r,Sint08,Sint16,Sint24,i,Q")))
+ (set (reg CC_REG)
+ (compare (plus:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSCmode)"
+ "@
+ add\t%2, %0
+ add\t%2, %0
+ sub\t%N2, %0
+ add\t%2, %0
+ add\t%2, %0
+ add\t%2, %0
+ add\t%2, %0
+ add\t%1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%Q2, %0"
+ [(set_attr "timings" "11,11,11,11,11,11,11,11,11,11,11,11,11,33")
+ (set_attr "length" "2,2,2,3,4,5,6,2,3,3,4,5,6,5")]
+)
+
+;; A helper to expand the above with the CC_MODE filled in.
+(define_expand "addsi3_flags"
+ [(parallel [(set (match_operand:SI 0 "register_operand")
+ (plus:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "rx_source_operand")))
+ (set (reg:CC_ZSC CC_REG)
+ (compare:CC_ZSC (plus:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))])]
+)
+
+(define_insn "adc_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (plus:SI
+ (plus:SI
+ (ltu:SI (reg:CC CC_REG) (const_int 0))
+ (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0"))
+ (match_operand:SI 2 "rx_source_operand" "r,Sint08,Sint16,Sint24,i,Q")))
+ (clobber (reg:CC CC_REG))]
+ "reload_completed"
+ "adc\t%2, %0"
+ [(set_attr "timings" "11,11,11,11,11,33")
+ (set_attr "length" "3,4,5,6,7,6")]
+)
+
+(define_insn "*adc_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (plus:SI
+ (plus:SI
+ (ltu:SI (reg:CC CC_REG) (const_int 0))
+ (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0"))
+ (match_operand:SI 2 "rx_source_operand" "r,Sint08,Sint16,Sint24,i,Q")))
+ (set (reg CC_REG)
+ (compare
+ (plus:SI
+ (plus:SI
+ (ltu:SI (reg:CC CC_REG) (const_int 0))
+ (match_dup 1))
+ (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSCmode)"
+ "adc\t%2, %0"
+ [(set_attr "timings" "11,11,11,11,11,33")
+ (set_attr "length" "3,4,5,6,7,6")]
+)
+
+;; Peepholes to match:
+;; (set (reg A) (reg B))
+;; (set (CC) (compare:CC (reg A/reg B) (const_int 0)))
+;; and replace them with the addsi3_flags pattern, using an add
+;; of zero to copy the register and set the condition code bits.
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operand:SI 1 "register_operand"))
+ (set (reg:CC CC_REG)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ ""
+ [(parallel [(set (match_dup 0)
+ (plus:SI (match_dup 1) (const_int 0)))
+ (set (reg:CC_ZSC CC_REG)
+ (compare:CC_ZSC (plus:SI (match_dup 1) (const_int 0))
+ (const_int 0)))])]
+)
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operand:SI 1 "register_operand"))
+ (set (reg:CC CC_REG)
+ (compare:CC (match_dup 1)
+ (const_int 0)))]
+ ""
+ [(parallel [(set (match_dup 0)
+ (plus:SI (match_dup 1) (const_int 0)))
+ (set (reg:CC_ZSC CC_REG)
+ (compare:CC_ZSC (plus:SI (match_dup 1) (const_int 0))
+ (const_int 0)))])]
+)
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "register_operand")
+ (plus:DI (match_operand:DI 1 "register_operand")
+ (match_operand:DI 2 "rx_source_operand")))]
+ ""
+{
+ rtx op0l, op0h, op1l, op1h, op2l, op2h;
+
+ op0l = gen_lowpart (SImode, operands[0]);
+ op1l = gen_lowpart (SImode, operands[1]);
+ op2l = gen_lowpart (SImode, operands[2]);
+ op0h = gen_highpart (SImode, operands[0]);
+ op1h = gen_highpart (SImode, operands[1]);
+ op2h = gen_highpart_mode (SImode, DImode, operands[2]);
+
+ emit_insn (gen_adddi3_internal (op0l, op0h, op1l, op2l, op1h, op2h));
+ DONE;
+})
+
+(define_insn_and_split "adddi3_internal"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (plus:SI (match_operand:SI 2 "register_operand" "r")
+ (match_operand:SI 3 "rx_source_operand" "riQ")))
+ (set (match_operand:SI 1 "register_operand" "=r")
+ (plus:SI
+ (plus:SI
+ (ltu:SI (plus:SI (match_dup 2) (match_dup 3)) (match_dup 2))
+ (match_operand:SI 4 "register_operand" "%1"))
+ (match_operand:SI 5 "rx_source_operand" "riQ")))
+ (clobber (match_scratch:SI 6 "=&r"))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ rtx op0l = operands[0];
+ rtx op0h = operands[1];
+ rtx op1l = operands[2];
+ rtx op2l = operands[3];
+ rtx op1h = operands[4];
+ rtx op2h = operands[5];
+ rtx scratch = operands[6];
+ rtx x;
+
+ if (reg_overlap_mentioned_p (op0l, op1h))
+ {
+ emit_move_insn (scratch, op0l);
+ op1h = scratch;
+ if (reg_overlap_mentioned_p (op0l, op2h))
+ op2h = scratch;
+ }
+ else if (reg_overlap_mentioned_p (op0l, op2h))
+ {
+ emit_move_insn (scratch, op0l);
+ op2h = scratch;
+ }
+
+ if (rtx_equal_p (op0l, op1l))
+ ;
+ /* It is preferable that op0l == op1l... */
+ else if (rtx_equal_p (op0l, op2l))
+ x = op1l, op1l = op2l, op2l = x;
+ /* ... but it is only a requirement if op2l == MEM. */
+ else if (MEM_P (op2l))
+ {
+ /* Let's hope that we still have a scratch register free. */
+ gcc_assert (op1h != scratch);
+ emit_move_insn (scratch, op2l);
+ op2l = scratch;
+ }
+
+ emit_insn (gen_addsi3_flags (op0l, op1l, op2l));
+
+ if (rtx_equal_p (op0h, op1h))
+ ;
+ else if (rtx_equal_p (op0h, op2h))
+ x = op1h, op1h = op2h, op2h = x;
+ else
+ {
+ emit_move_insn (op0h, op1h);
+ op1h = op0h;
+ }
+ emit_insn (gen_adc_internal (op0h, op1h, op2h));
+ DONE;
+})
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,r,r,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Uint04,Sint08,Sint16,Sint24,i,0,r,Q")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%1, %0
+ and\t%2, %1, %0
+ and\t%Q2, %0"
+ [(set_attr "timings" "11,11,11,11,11,11,11,11,33")
+ (set_attr "length" "2,2,3,4,5,6,2,5,5")]
+)
+
+(define_insn "*andsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,r,r,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Uint04,Sint08,Sint16,Sint24,i,0,r,Q")))
+ (set (reg CC_REG)
+ (compare (and:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "@
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%1, %0
+ and\t%2, %1, %0
+ and\t%Q2, %0"
+ [(set_attr "timings" "11,11,11,11,11,11,11,11,33")
+ (set_attr "length" "2,2,3,4,5,6,2,5,5")]
+)
+
+;; Byte swap (single 32-bit value).
+(define_insn "bswapsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (bswap:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "revl\t%1, %0"
+ [(set_attr "length" "3")]
+)
+
+;; Byte swap (single 16-bit value). Note - we ignore the swapping of the high 16-bits.
+(define_insn "bswaphi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (bswap:HI (match_operand:HI 1 "register_operand" "r")))]
+ ""
+ "revw\t%1, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (div:SI (match_operand:SI 1 "register_operand" "0,0,0,0,0,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Sint08,Sint16,Sint24,i,Q")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "div\t%Q2, %0"
+ [(set_attr "timings" "1111") ;; Strictly speaking the timing should be
+ ;; 2222, but that is a worst case sceanario.
+ (set_attr "length" "3,4,5,6,7,6")]
+)
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "0,0,0,0,0,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Sint08,Sint16,Sint24,i,Q")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "divu\t%Q2, %0"
+ [(set_attr "timings" "1010") ;; Strictly speaking the timing should be
+ ;; 2020, but that is a worst case sceanario.
+ (set_attr "length" "3,4,5,6,7,6")]
+)
+
+;; Note - these patterns are suppressed in big-endian mode because they
+;; generate a little endian result. ie the most significant word of the
+;; result is placed in the higher numbered register of the destination
+;; register pair.
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r")
+ (mult:DI (sign_extend:DI (match_operand:SI
+ 1 "register_operand" "%0,0,0,0,0,0"))
+ (sign_extend:DI (match_operand:SI
+ 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q"))))]
+ "! TARGET_BIG_ENDIAN_DATA"
+ "emul\t%Q2, %0"
+ [(set_attr "length" "3,4,5,6,7,6")
+ (set_attr "timings" "22,22,22,22,22,44")]
+)
+
+;; See comment for mulsidi3.
+;; Note - the zero_extends are to distinguish this pattern from the
+;; mulsidi3 pattern. Immediate mode addressing is not supported
+;; because gcc cannot handle the expression: (zero_extend (const_int)).
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "%0,0"))
+ (zero_extend:DI (match_operand:SI 2 "rx_compare_operand" "r,Q"))))]
+ "! TARGET_BIG_ENDIAN_DATA"
+ "emulu\t%Q2, %0"
+ [(set_attr "length" "3,6")
+ (set_attr "timings" "22,44")]
+)
+
+(define_insn "smaxsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (smax:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:SI 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q")))]
+ ""
+ "max\t%Q2, %0"
+ [(set_attr "length" "3,4,5,6,7,6")
+ (set_attr "timings" "11,11,11,11,11,33")]
+)
+
+(define_insn "sminsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (smin:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:SI 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q")))]
+ ""
+ "min\t%Q2, %0"
+ [(set_attr "length" "3,4,5,6,7,6")
+ (set_attr "timings" "11,11,11,11,11,33")]
+)
+
+(define_insn "umax<small_int_modes:mode>3_u"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (smax:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0")
+ (zero_extend:SI (match_operand:small_int_modes 2 "rx_minmaxex_operand"
+ "r,Sint08,Sint16,Sint24,i,Q"))))]
+ ""
+ "max\t%R2, %0"
+ [(set_attr "length" "3,4,5,6,7,6")
+ (set_attr "timings" "11,11,11,11,11,33")]
+)
+
+(define_insn "umin<small_int_modes:mode>3_ur"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (smin:SI (zero_extend:SI (match_operand:small_int_modes 2 "rx_minmaxex_operand"
+ "r,Sint08,Sint16,Sint24,i,Q"))
+ (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0")))]
+ ""
+ "min\t%R2, %0"
+ [(set_attr "length" "3,4,5,6,7,6")
+ (set_attr "timings" "11,11,11,11,11,33")]
+)
+
+(define_insn "umax<small_int_modes:mode>3_ur"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (smax:SI (zero_extend:SI (match_operand:small_int_modes 2 "rx_minmaxex_operand"
+ "r,Sint08,Sint16,Sint24,i,Q"))
+ (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0")))]
+ ""
+ "max\t%R2, %0"
+ [(set_attr "length" "3,4,5,6,7,6")
+ (set_attr "timings" "11,11,11,11,11,33")]
+)
+
+(define_expand "umax<small_int_modes:mode>3"
+ [(set (match_dup 4)
+ (zero_extend:SI (match_operand:small_int_modes 1 "register_operand" "%0,0,0,0,0,0")))
+ (set (match_dup 3)
+ (smax:SI (match_dup 4)
+ (match_operand:small_int_modes 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q")))
+ (set (match_operand:small_int_modes 0 "register_operand" "=r,r,r,r,r,r")
+ (match_dup 6))
+ ]
+ ""
+ "operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = gen_reg_rtx (SImode);
+ operands[6] = gen_rtx_SUBREG (GET_MODE (operands[0]), operands[3],
+ TARGET_BIG_ENDIAN_DATA ? (GET_MODE (operands[0]) == HImode ? 2 : 3) : 0);
+ if (GET_CODE (operands[2]) != CONST_INT)
+ {
+ emit_move_insn (operands[5], gen_rtx_ZERO_EXTEND (SImode, operands[2]));
+ operands[2] = operands[5];
+ }
+ "
+)
+
+(define_expand "umin<small_int_modes:mode>3"
+ [(set (match_dup 4)
+ (zero_extend:SI (match_operand:small_int_modes 1 "register_operand" "%0,0,0,0,0,0")))
+ (set (match_dup 3)
+ (smin:SI (match_dup 4)
+ (match_operand:small_int_modes 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q")))
+ (set (match_operand:small_int_modes 0 "register_operand" "=r,r,r,r,r,r")
+ (match_dup 6))
+ ]
+ ""
+ "operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = gen_reg_rtx (SImode);
+ operands[6] = gen_rtx_SUBREG (GET_MODE (operands[0]), operands[3],
+ TARGET_BIG_ENDIAN_DATA ? (GET_MODE (operands[0]) == HImode ? 2 : 3) : 0);
+ if (GET_CODE (operands[2]) != CONST_INT)
+ {
+ emit_move_insn (operands[5], gen_rtx_ZERO_EXTEND (SImode, operands[2]));
+ operands[2] = operands[5];
+ }
+ "
+)
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r,r")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,0,r,r")
+ (match_operand:SI 2 "rx_source_operand"
+ "r,Uint04,Sint08,Sint16,Sint24,i,Q,0,r")))]
+ ""
+ "@
+ mul\t%2, %0
+ mul\t%2, %0
+ mul\t%2, %0
+ mul\t%2, %0
+ mul\t%2, %0
+ mul\t%Q2, %0
+ mul\t%Q2, %0
+ mul\t%1, %0
+ mul\t%2, %1, %0"
+ [(set_attr "length" "2,2,3,4,5,6,5,2,3")
+ (set_attr "timings" "11,11,11,11,11,11,33,11,11")]
+)
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (neg:SI (match_operand:SI 1 "register_operand" "0,r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ neg\t%0
+ neg\t%1, %0"
+ [(set_attr "length" "2,3")]
+)
+
+;; Note that the O and C flags are not set as per a normal compare,
+;; and thus are unusable in that context.
+(define_insn "*negsi2_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (neg:SI (match_operand:SI 1 "register_operand" "0,r")))
+ (set (reg CC_REG)
+ (compare (neg:SI (match_dup 1))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "@
+ neg\t%0
+ neg\t%1, %0"
+ [(set_attr "length" "2,3")]
+)
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (not:SI (match_operand:SI 1 "register_operand" "0,r")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ not\t%0
+ not\t%1, %0"
+ [(set_attr "length" "2,3")]
+)
+
+(define_insn "*one_cmplsi2_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (not:SI (match_operand:SI 1 "register_operand" "0,r")))
+ (set (reg CC_REG)
+ (compare (not:SI (match_dup 1))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "@
+ not\t%0
+ not\t%1, %0"
+ [(set_attr "length" "2,3")]
+)
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,r,r,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Uint04,Sint08,Sint16,Sint24,i,0,r,Q")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %0
+ or\t%Q2, %0
+ or\t%1, %0
+ or\t%2, %1, %0
+ or\t%Q2, %0"
+ [(set_attr "timings" "11,11,11,11,11,11,11,11,33")
+ (set_attr "length" "2,2,3,4,5,6,2,3,5")]
+)
+
+(define_insn "*iorsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,r,r,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Uint04,Sint08,Sint16,Sint24,i,0,r,Q")))
+ (set (reg CC_REG)
+ (compare (ior:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "@
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %0
+ or\t%Q2, %0
+ or\t%1, %0
+ or\t%2, %1, %0
+ or\t%Q2, %0"
+ [(set_attr "timings" "11,11,11,11,11,11,11,11,33")
+ (set_attr "length" "2,2,3,4,5,6,2,3,5")]
+)
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "rx_shift_operand" "rn")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "rotl\t%2, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "*rotlsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "rx_shift_operand" "rn")))
+ (set (reg CC_REG)
+ (compare (rotate:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "rotl\t%2, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "rx_shift_operand" "rn")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "rotr\t%2, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "*rotrsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "rx_shift_operand" "rn")))
+ (set (reg CC_REG)
+ (compare (rotatert:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "rotr\t%2, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "rx_shift_operand" "r,n,n")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ shar\t%2, %0
+ shar\t%2, %0
+ shar\t%2, %1, %0"
+ [(set_attr "length" "3,2,3")]
+)
+
+(define_insn "*ashrsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "rx_shift_operand" "r,n,n")))
+ (set (reg CC_REG)
+ (compare (ashiftrt:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "@
+ shar\t%2, %0
+ shar\t%2, %0
+ shar\t%2, %1, %0"
+ [(set_attr "length" "3,2,3")]
+)
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "rx_shift_operand" "r,n,n")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ shlr\t%2, %0
+ shlr\t%2, %0
+ shlr\t%2, %1, %0"
+ [(set_attr "length" "3,2,3")]
+)
+
+(define_insn "*lshrsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "rx_shift_operand" "r,n,n")))
+ (set (reg CC_REG)
+ (compare (lshiftrt:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "@
+ shlr\t%2, %0
+ shlr\t%2, %0
+ shlr\t%2, %1, %0"
+ [(set_attr "length" "3,2,3")]
+)
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "rx_shift_operand" "r,n,n")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ shll\t%2, %0
+ shll\t%2, %0
+ shll\t%2, %1, %0"
+ [(set_attr "length" "3,2,3")]
+)
+
+(define_insn "*ashlsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "rx_shift_operand" "r,n,n")))
+ (set (reg CC_REG)
+ (compare (ashift:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "@
+ shll\t%2, %0
+ shll\t%2, %0
+ shll\t%2, %1, %0"
+ [(set_attr "length" "3,2,3")]
+)
+
+;; Saturate to 32-bits
+(define_insn_and_split "ssaddsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ss_plus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "rx_source_operand" "riQ")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ "reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (match_dup 1) (match_dup 2)))
+ (set (reg:CC_ZSC CC_REG)
+ (compare:CC_ZSC
+ (plus:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))])
+ (set (match_dup 0)
+ (unspec:SI [(match_dup 0) (reg:CC CC_REG)]
+ UNSPEC_BUILTIN_SAT))]
+ ""
+)
+
+(define_insn "*sat"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "0")
+ (reg:CC CC_REG)]
+ UNSPEC_BUILTIN_SAT))]
+ "reload_completed"
+ "sat\t%0"
+ [(set_attr "length" "2")]
+)
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Uint04,n,r,Q")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "@
+ sub\t%2, %0
+ sub\t%2, %0
+ add\t%N2, %0
+ sub\t%2, %1, %0
+ sub\t%Q2, %0"
+ [(set_attr "timings" "11,11,11,11,33")
+ (set_attr "length" "2,2,6,3,5")]
+)
+
+;; Note that the O flag is set as if (compare op1 op2) not for
+;; what is described here, (compare op0 0).
+(define_insn "*subsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Uint04,n,r,Q")))
+ (set (reg CC_REG)
+ (compare (minus:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSCmode)"
+ "@
+ sub\t%2, %0
+ sub\t%2, %0
+ add\t%N2, %0
+ sub\t%2, %1, %0
+ sub\t%Q2, %0"
+ [(set_attr "timings" "11,11,11,11,33")
+ (set_attr "length" "2,2,6,3,5")]
+)
+
+;; A helper to expand the above with the CC_MODE filled in.
+(define_expand "subsi3_flags"
+ [(parallel [(set (match_operand:SI 0 "register_operand")
+ (minus:SI (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "rx_source_operand")))
+ (set (reg:CC_ZSC CC_REG)
+ (compare:CC_ZSC (minus:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))])]
+)
+
+(define_insn "sbb_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (minus:SI
+ (minus:SI
+ (match_operand:SI 1 "register_operand" " 0,0")
+ (match_operand:SI 2 "rx_compare_operand" " r,Q"))
+ (geu:SI (reg:CC CC_REG) (const_int 0))))
+ (clobber (reg:CC CC_REG))]
+ "reload_completed"
+ "sbb\t%2, %0"
+ [(set_attr "timings" "11,33")
+ (set_attr "length" "3,6")]
+)
+
+(define_insn "*sbb_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (minus:SI
+ (minus:SI
+ (match_operand:SI 1 "register_operand" " 0,0")
+ (match_operand:SI 2 "rx_compare_operand" " r,Q"))
+ (geu:SI (reg:CC CC_REG) (const_int 0))))
+ (set (reg CC_REG)
+ (compare
+ (minus:SI
+ (minus:SI (match_dup 1) (match_dup 2))
+ (geu:SI (reg:CC CC_REG) (const_int 0)))
+ (const_int 0)))]
+ "reload_completed"
+ "sbb\t%2, %0"
+ [(set_attr "timings" "11,33")
+ (set_attr "length" "3,6")]
+)
+
+(define_expand "subdi3"
+ [(set (match_operand:DI 0 "register_operand")
+ (minus:DI (match_operand:DI 1 "register_operand")
+ (match_operand:DI 2 "register_operand")))]
+ ""
+{
+ rtx op0l, op0h, op1l, op1h, op2l, op2h;
+
+ op0l = gen_lowpart (SImode, operands[0]);
+ op1l = gen_lowpart (SImode, operands[1]);
+ op2l = gen_lowpart (SImode, operands[2]);
+ op0h = gen_highpart (SImode, operands[0]);
+ op1h = gen_highpart (SImode, operands[1]);
+ op2h = gen_highpart_mode (SImode, DImode, operands[2]);
+
+ emit_insn (gen_subdi3_internal (op0l, op0h, op1l, op2l, op1h, op2h));
+ DONE;
+})
+
+(define_insn_and_split "subdi3_internal"
+ [(set (match_operand:SI 0 "register_operand" "=&r,&r")
+ (minus:SI (match_operand:SI 2 "register_operand" " 0, r")
+ (match_operand:SI 3 "rx_compare_operand" "rQ, r")))
+ (set (match_operand:SI 1 "register_operand" "= r, r")
+ (minus:SI
+ (minus:SI
+ (match_operand:SI 4 "register_operand" " 1, 1")
+ (match_operand:SI 5 "rx_compare_operand" " rQ,rQ"))
+ (geu:SI (match_dup 2) (match_dup 3))))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ emit_insn (gen_subsi3_flags (operands[0], operands[2], operands[3]));
+ emit_insn (gen_sbb_internal (operands[1], operands[4], operands[5]));
+ DONE;
+})
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:SI 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q")))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "xor\t%Q2, %0"
+ [(set_attr "timings" "11,11,11,11,11,33")
+ (set_attr "length" "3,4,5,6,7,6")]
+)
+
+(define_insn "*xorsi3_flags"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:SI 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q")))
+ (set (reg CC_REG)
+ (compare (xor:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "reload_completed && rx_match_ccmode (insn, CC_ZSmode)"
+ "xor\t%Q2, %0"
+ [(set_attr "timings" "11,11,11,11,11,33")
+ (set_attr "length" "3,4,5,6,7,6")]
+)
+
+;; A set of peepholes to catch extending loads followed by arithmetic operations.
+;; We use iterators where possible to reduce the amount of typing and hence the
+;; possibilities for typos.
+
+(define_code_iterator extend_types [(zero_extend "") (sign_extend "")])
+(define_code_attr letter [(zero_extend "R") (sign_extend "Q")])
+
+(define_code_iterator memex_commutative [(plus "") (and "") (ior "") (xor "")])
+(define_code_iterator memex_noncomm [(div "") (udiv "") (minus "")])
+(define_code_iterator memex_nocc [(smax "") (smin "") (mult "")])
+
+(define_code_attr op [(plus "add") (and "and") (div "div") (udiv "divu") (smax "max") (smin "min") (mult "mul") (ior "or") (minus "sub") (xor "xor")])
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (extend_types:SI (match_operand:small_int_modes 1 "rx_restricted_mem_operand")))
+ (parallel [(set (match_operand:SI 2 "register_operand")
+ (memex_commutative:SI (match_dup 0)
+ (match_dup 2)))
+ (clobber (reg:CC CC_REG))])]
+ "peep2_regno_dead_p (2, REGNO (operands[0])) && (optimize < 3 || optimize_size)"
+ [(parallel [(set:SI (match_dup 2)
+ (memex_commutative:SI (match_dup 2)
+ (extend_types:SI (match_dup 1))))
+ (clobber (reg:CC CC_REG))])]
+)
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (extend_types:SI (match_operand:small_int_modes 1 "rx_restricted_mem_operand")))
+ (parallel [(set (match_operand:SI 2 "register_operand")
+ (memex_commutative:SI (match_dup 2)
+ (match_dup 0)))
+ (clobber (reg:CC CC_REG))])]
+ "peep2_regno_dead_p (2, REGNO (operands[0])) && (optimize < 3 || optimize_size)"
+ [(parallel [(set:SI (match_dup 2)
+ (memex_commutative:SI (match_dup 2)
+ (extend_types:SI (match_dup 1))))
+ (clobber (reg:CC CC_REG))])]
+)
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (extend_types:SI (match_operand:small_int_modes 1 "rx_restricted_mem_operand")))
+ (parallel [(set (match_operand:SI 2 "register_operand")
+ (memex_noncomm:SI (match_dup 2)
+ (match_dup 0)))
+ (clobber (reg:CC CC_REG))])]
+ "peep2_regno_dead_p (2, REGNO (operands[0])) && (optimize < 3 || optimize_size)"
+ [(parallel [(set:SI (match_dup 2)
+ (memex_noncomm:SI (match_dup 2)
+ (extend_types:SI (match_dup 1))))
+ (clobber (reg:CC CC_REG))])]
+)
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (extend_types:SI (match_operand:small_int_modes 1 "rx_restricted_mem_operand")))
+ (set (match_operand:SI 2 "register_operand")
+ (memex_nocc:SI (match_dup 0)
+ (match_dup 2)))]
+ "peep2_regno_dead_p (2, REGNO (operands[0])) && (optimize < 3 || optimize_size)"
+ [(set:SI (match_dup 2)
+ (memex_nocc:SI (match_dup 2)
+ (extend_types:SI (match_dup 1))))]
+)
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (extend_types:SI (match_operand:small_int_modes 1 "rx_restricted_mem_operand")))
+ (set (match_operand:SI 2 "register_operand")
+ (memex_nocc:SI (match_dup 2)
+ (match_dup 0)))]
+ "peep2_regno_dead_p (2, REGNO (operands[0])) && (optimize < 3 || optimize_size)"
+ [(set:SI (match_dup 2)
+ (memex_nocc:SI (match_dup 2)
+ (extend_types:SI (match_dup 1))))]
+)
+
+(define_insn "<memex_commutative:code>si3_<extend_types:code><small_int_modes:mode>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (memex_commutative:SI (match_operand:SI 1 "register_operand" "%0")
+ (extend_types:SI (match_operand:small_int_modes 2 "rx_restricted_mem_operand" "Q"))))
+ (clobber (reg:CC CC_REG))]
+ "(optimize < 3 || optimize_size)"
+ "<memex_commutative:op>\t%<extend_types:letter>2, %0"
+ [(set_attr "timings" "33")
+ (set_attr "length" "5")] ;; This length is corrected in rx_adjust_insn_length
+)
+
+(define_insn "<memex_noncomm:code>si3_<extend_types:code><small_int_modes:mode>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (memex_noncomm:SI (match_operand:SI 1 "register_operand" "0")
+ (extend_types:SI (match_operand:small_int_modes 2 "rx_restricted_mem_operand" "Q"))))
+ (clobber (reg:CC CC_REG))]
+ "(optimize < 3 || optimize_size)"
+ "<memex_noncomm:op>\t%<extend_types:letter>2, %0"
+ [(set_attr "timings" "33")
+ (set_attr "length" "5")] ;; This length is corrected in rx_adjust_insn_length
+)
+
+(define_insn "<memex_nocc:code>si3_<extend_types:code><small_int_modes:mode>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (memex_nocc:SI (match_operand:SI 1 "register_operand" "%0")
+ (extend_types:SI (match_operand:small_int_modes 2 "rx_restricted_mem_operand" "Q"))))]
+ "(optimize < 3 || optimize_size)"
+ "<memex_nocc:op>\t%<extend_types:letter>2, %0"
+ [(set_attr "timings" "33")
+ (set_attr "length" "5")] ;; This length is corrected in rx_adjust_insn_length
+)
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (extend_types:SI (match_operand:small_int_modes 1 "rx_restricted_mem_operand")))
+ (set (reg:CC CC_REG)
+ (compare:CC (match_operand:SI 2 "register_operand")
+ (match_dup 0)))]
+ "peep2_regno_dead_p (2, REGNO (operands[0])) && (optimize < 3 || optimize_size)"
+ [(set (reg:CC CC_REG)
+ (compare:CC (match_dup 2)
+ (extend_types:SI (match_dup 1))))]
+)
+
+;; Convert:
+;; (set (reg1) (sign_extend (mem))
+;; (set (reg2) (zero_extend (reg1))
+;; into
+;; (set (reg2) (zero_extend (mem)))
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (sign_extend:SI (match_operand:small_int_modes 1 "memory_operand")))
+ (set (match_operand:SI 2 "register_operand")
+ (zero_extend:SI (match_operand:small_int_modes 3 "register_operand")))]
+ "REGNO (operands[0]) == REGNO (operands[3])
+ && (REGNO (operands[0]) == REGNO (operands[2])
+ || peep2_regno_dead_p (2, REGNO (operands[0])))"
+ [(set (match_dup 2)
+ (zero_extend:SI (match_dup 1)))]
+)
+
+;; Remove the redundant sign extension from:
+;; (set (reg) (extend (mem)))
+;; (set (reg) (extend (reg)))
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (extend_types:SI (match_operand:small_int_modes 1 "memory_operand")))
+ (set (match_dup 0)
+ (extend_types:SI (match_operand:small_int_modes 2 "register_operand")))]
+ "REGNO (operands[0]) == REGNO (operands[2])"
+ [(set (match_dup 0) (extend_types:SI (match_dup 1)))]
+)
+
+(define_insn "comparesi3_<extend_types:code><small_int_modes:mode>"
+ [(set (reg:CC CC_REG)
+ (compare:CC (match_operand:SI 0 "register_operand" "r")
+ (extend_types:SI (match_operand:small_int_modes 1 "rx_restricted_mem_operand" "Q"))))]
+ "(optimize < 3 || optimize_size)"
+ "cmp\t%<extend_types:letter>1, %0"
+ [(set_attr "timings" "33")
+ (set_attr "length" "5")] ;; This length is corrected in rx_adjust_insn_length
+)
+
+;; Floating Point Instructions
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,r,r")
+ (plus:SF (match_operand:SF 1 "register_operand" "%0,0,0")
+ (match_operand:SF 2 "rx_source_operand" "r,F,Q")))
+ (clobber (reg:CC CC_REG))]
+ "ALLOW_RX_FPU_INSNS"
+ "fadd\t%2, %0"
+ [(set_attr "timings" "44,44,66")
+ (set_attr "length" "3,7,5")]
+)
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,r,r")
+ (div:SF (match_operand:SF 1 "register_operand" "0,0,0")
+ (match_operand:SF 2 "rx_source_operand" "r,F,Q")))
+ (clobber (reg:CC CC_REG))]
+ "ALLOW_RX_FPU_INSNS"
+ "fdiv\t%2, %0"
+ [(set_attr "timings" "1616,1616,1818")
+ (set_attr "length" "3,7,5")]
+)
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,r,r")
+ (mult:SF (match_operand:SF 1 "register_operand" "%0,0,0")
+ (match_operand:SF 2 "rx_source_operand" "r,F,Q")))
+ (clobber (reg:CC CC_REG))]
+ "ALLOW_RX_FPU_INSNS"
+ "fmul\t%2, %0"
+ [(set_attr "timings" "33,33,55")
+ (set_attr "length" "3,7,5")]
+)
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,r,r")
+ (minus:SF (match_operand:SF 1 "register_operand" "0,0,0")
+ (match_operand:SF 2 "rx_source_operand" "r,F,Q")))
+ (clobber (reg:CC CC_REG))]
+ "ALLOW_RX_FPU_INSNS"
+ "fsub\t%Q2, %0"
+ [(set_attr "timings" "44,44,66")
+ (set_attr "length" "3,7,5")]
+)
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (fix:SI (match_operand:SF 1 "rx_compare_operand" "r,Q")))
+ (clobber (reg:CC CC_REG))]
+ "ALLOW_RX_FPU_INSNS"
+ "ftoi\t%Q1, %0"
+ [(set_attr "timings" "22,44")
+ (set_attr "length" "3,5")]
+)
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=r,r")
+ (float:SF (match_operand:SI 1 "rx_compare_operand" "r,Q")))
+ (clobber (reg:CC CC_REG))]
+ "ALLOW_RX_FPU_INSNS"
+ "itof\t%Q1, %0"
+ [(set_attr "timings" "22,44")
+ (set_attr "length" "3,6")]
+)
+
+;; Bit manipulation instructions.
+
+;; ??? The *_in_memory patterns will not be matched without further help.
+;; At one time we had the insv expander generate them, but I suspect that
+;; in general we get better performance by exposing the register load to
+;; the optimizers.
+;;
+;; An alternate solution would be to re-organize these patterns such
+;; that allow both register and memory operands. This would allow the
+;; register allocator to spill and not load the register operand. This
+;; would be possible only for operations for which we have a constant
+;; bit offset, so that we can adjust the address by ofs/8 and replace
+;; the offset in the insn by ofs%8.
+
+(define_insn "*bitset"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ior:SI (ashift:SI (const_int 1)
+ (match_operand:SI 1 "rx_shift_operand" "ri"))
+ (match_operand:SI 2 "register_operand" "0")))]
+ ""
+ "bset\t%1, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "*bitset_in_memory"
+ [(set (match_operand:QI 0 "rx_restricted_mem_operand" "+Q")
+ (ior:QI (ashift:QI (const_int 1)
+ (match_operand:QI 1 "nonmemory_operand" "ri"))
+ (match_dup 0)))]
+ ""
+ "bset\t%1, %0.B"
+ [(set_attr "length" "3")
+ (set_attr "timings" "33")]
+)
+
+(define_insn "*bitinvert"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (xor:SI (ashift:SI (const_int 1)
+ (match_operand:SI 1 "rx_shift_operand" "ri"))
+ (match_operand:SI 2 "register_operand" "0")))]
+ ""
+ "bnot\t%1, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "*bitinvert_in_memory"
+ [(set (match_operand:QI 0 "rx_restricted_mem_operand" "+Q")
+ (xor:QI (ashift:QI (const_int 1)
+ (match_operand:QI 1 "nonmemory_operand" "ri"))
+ (match_dup 0)))]
+ ""
+ "bnot\t%1, %0.B"
+ [(set_attr "length" "5")
+ (set_attr "timings" "33")]
+)
+
+(define_insn "*bitclr"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (and:SI (not:SI
+ (ashift:SI
+ (const_int 1)
+ (match_operand:SI 1 "rx_shift_operand" "ri")))
+ (match_operand:SI 2 "register_operand" "0")))]
+ ""
+ "bclr\t%1, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "*bitclr_in_memory"
+ [(set (match_operand:QI 0 "rx_restricted_mem_operand" "+Q")
+ (and:QI (not:QI
+ (ashift:QI
+ (const_int 1)
+ (match_operand:QI 1 "nonmemory_operand" "ri")))
+ (match_dup 0)))]
+ ""
+ "bclr\t%1, %0.B"
+ [(set_attr "length" "3")
+ (set_attr "timings" "33")]
+)
+
+(define_insn "*insv_imm"
+ [(set (zero_extract:SI
+ (match_operand:SI 0 "register_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "rx_shift_operand" "ri"))
+ (match_operand:SI 2 "const_int_operand" ""))]
+ ""
+{
+ if (INTVAL (operands[2]) & 1)
+ return "bset\t%1, %0";
+ else
+ return "bclr\t%1, %0";
+}
+ [(set_attr "length" "3")]
+)
+
+(define_insn_and_split "rx_insv_reg"
+ [(set (zero_extract:SI
+ (match_operand:SI 0 "register_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "const_int_operand" ""))
+ (match_operand:SI 2 "register_operand" "r"))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1))
+ (match_dup 3))]
+{
+ rtx flags, x;
+
+ /* Emit tst #1, op2. */
+ flags = gen_rtx_REG (CC_ZSmode, CC_REG);
+ x = gen_rtx_AND (SImode, operands[2], const1_rtx);
+ x = gen_rtx_COMPARE (CC_ZSmode, x, const0_rtx);
+ x = gen_rtx_SET (VOIDmode, flags, x);
+ emit_insn (x);
+
+ /* Emit bmne. */
+ operands[3] = gen_rtx_NE (SImode, flags, const0_rtx);
+})
+
+(define_insn_and_split "*insv_cond"
+ [(set (zero_extract:SI
+ (match_operand:SI 0 "register_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "const_int_operand" ""))
+ (match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (match_operand:SI 3 "rx_source_operand" "riQ")]))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1))
+ (match_dup 4))]
+{
+ rtx flags, x;
+
+ flags = gen_rtx_REG (CCmode, CC_REG);
+ x = gen_rtx_COMPARE (CCmode, operands[2], operands[3]);
+ x = gen_rtx_SET (VOIDmode, flags, x);
+ emit_insn (x);
+
+ operands[4] = gen_rtx_fmt_ee (GET_CODE (operands[4]), SImode,
+ flags, const0_rtx);
+})
+
+(define_insn "*bmcc"
+ [(set (zero_extract:SI
+ (match_operand:SI 0 "register_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "const_int_operand" ""))
+ (match_operator:SI 2 "comparison_operator"
+ [(reg CC_REG) (const_int 0)]))]
+ "reload_completed"
+ "bm%B2\t%1, %0"
+ [(set_attr "length" "3")]
+)
+
+;; Work around the fact that X=Y<0 is preferentially expanded as a shift.
+(define_insn_and_split "*insv_cond_lt"
+ [(set (zero_extract:SI
+ (match_operand:SI 0 "register_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "const_int_operand" ""))
+ (match_operator:SI 3 "rshift_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (const_int 31)]))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "#"
+ ""
+ [(parallel [(set (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1))
+ (lt:SI (match_dup 2) (const_int 0)))
+ (clobber (reg:CC CC_REG))])]
+ ""
+)
+
+(define_expand "insv"
+ [(set (zero_extract:SI
+ (match_operand:SI 0 "register_operand") ;; Destination
+ (match_operand:SI 1 "const_int_operand") ;; # of bits to set
+ (match_operand:SI 2 "nonmemory_operand")) ;; Starting bit
+ (match_operand:SI 3 "nonmemory_operand"))] ;; Bits to insert
+ ""
+{
+ /* We only handle single-bit inserts. */
+ if (!CONST_INT_P (operands[1]) || INTVAL (operands[1]) != 1)
+ FAIL;
+
+ /* Either the bit to insert or the position must be constant. */
+ if (CONST_INT_P (operands[3]))
+ operands[3] = GEN_INT (INTVAL (operands[3]) & 1);
+ else if (CONST_INT_P (operands[2]))
+ {
+ emit_insn (gen_rx_insv_reg (operands[0], operands[2], operands[3]));
+ DONE;
+ }
+ else
+ FAIL;
+})
+
+;; Atomic exchange operation.
+
+(define_insn "sync_lock_test_and_setsi"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (match_operand:SI 1 "rx_compare_operand" "=r,Q"))
+ (set (match_dup 1)
+ (match_operand:SI 2 "register_operand" "0,0"))]
+ ""
+ "xchg\t%1, %0"
+ [(set_attr "length" "3,6")
+ (set_attr "timings" "22")]
+)
+
+;; Block move functions.
+
+(define_expand "movstr"
+ [(set (match_operand:BLK 1 "memory_operand") ;; Dest
+ (match_operand:BLK 2 "memory_operand")) ;; Source
+ (use (match_operand:SI 0 "register_operand")) ;; Updated Dest
+ ]
+ ""
+ {
+ rtx addr1 = gen_rtx_REG (SImode, 1);
+ rtx addr2 = gen_rtx_REG (SImode, 2);
+ rtx len = gen_rtx_REG (SImode, 3);
+ rtx dest_copy = gen_reg_rtx (SImode);
+
+ emit_move_insn (len, GEN_INT (-1));
+ emit_move_insn (addr1, force_operand (XEXP (operands[1], 0), NULL_RTX));
+ emit_move_insn (addr2, force_operand (XEXP (operands[2], 0), NULL_RTX));
+ operands[1] = replace_equiv_address_nv (operands[1], addr1);
+ operands[2] = replace_equiv_address_nv (operands[2], addr2);
+ emit_move_insn (dest_copy, addr1);
+ emit_insn (gen_rx_movstr ());
+ emit_move_insn (len, GEN_INT (-1));
+ emit_insn (gen_rx_strend (operands[0], dest_copy));
+ DONE;
+ }
+)
+
+(define_insn "rx_movstr"
+ [(set (mem:BLK (reg:SI 1))
+ (mem:BLK (reg:SI 2)))
+ (unspec_volatile:BLK [(reg:SI 1) (reg:SI 2) (reg:SI 3)] UNSPEC_MOVSTR)
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))]
+ ""
+ "smovu"
+ [(set_attr "length" "2")
+ (set_attr "timings" "1111")] ;; The timing is a guesstimate.
+)
+
+(define_insn "rx_strend"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
+ (reg:SI 3)] UNSPEC_STRLEN))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))
+ (clobber (reg:CC CC_REG))
+ ]
+ ""
+ "mov\t%1, r1\n\tmov\t#0, r2\n\tsuntil.b\n\tmov\tr1, %0\n\tsub\t#1, %0"
+ [(set_attr "length" "10")
+ (set_attr "timings" "1111")] ;; The timing is a guesstimate.
+)
+
+(define_expand "movmemsi"
+ [(parallel
+ [(set (match_operand:BLK 0 "memory_operand") ;; Dest
+ (match_operand:BLK 1 "memory_operand")) ;; Source
+ (use (match_operand:SI 2 "register_operand")) ;; Length in bytes
+ (match_operand 3 "immediate_operand") ;; Align
+ (unspec_volatile:BLK [(reg:SI 1) (reg:SI 2) (reg:SI 3)] UNSPEC_MOVMEM)]
+ )]
+ ""
+ {
+ rtx addr1 = gen_rtx_REG (SImode, 1);
+ rtx addr2 = gen_rtx_REG (SImode, 2);
+ rtx len = gen_rtx_REG (SImode, 3);
+
+ /* Do not use when the source or destination are volatile - the SMOVF
+ instruction will read and write in word sized blocks, which may be
+ outside of the valid address range. */
+ if (MEM_P (operands[0]) && MEM_VOLATILE_P (operands[0]))
+ FAIL;
+ if (MEM_P (operands[1]) && MEM_VOLATILE_P (operands[1]))
+ FAIL;
+
+ if (REG_P (operands[0]) && (REGNO (operands[0]) == 2
+ || REGNO (operands[0]) == 3))
+ FAIL;
+ if (REG_P (operands[1]) && (REGNO (operands[1]) == 1
+ || REGNO (operands[1]) == 3))
+ FAIL;
+ if (REG_P (operands[2]) && (REGNO (operands[2]) == 1
+ || REGNO (operands[2]) == 2))
+ FAIL;
+
+ emit_move_insn (addr1, force_operand (XEXP (operands[0], 0), NULL_RTX));
+ emit_move_insn (addr2, force_operand (XEXP (operands[1], 0), NULL_RTX));
+ emit_move_insn (len, force_operand (operands[2], NULL_RTX));
+ operands[0] = replace_equiv_address_nv (operands[0], addr1);
+ operands[1] = replace_equiv_address_nv (operands[1], addr2);
+ emit_insn (gen_rx_movmem ());
+ DONE;
+ }
+)
+
+(define_insn "rx_movmem"
+ [(set (mem:BLK (reg:SI 1))
+ (mem:BLK (reg:SI 2)))
+ (use (reg:SI 3))
+ (unspec_volatile:BLK [(reg:SI 1) (reg:SI 2) (reg:SI 3)] UNSPEC_MOVMEM)
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))]
+ ""
+ "smovf"
+ [(set_attr "length" "2")
+ (set_attr "timings" "1111")] ;; The timing is a guesstimate.
+)
+
+(define_expand "setmemsi"
+ [(set (match_operand:BLK 0 "memory_operand") ;; Dest
+ (match_operand:QI 2 "nonmemory_operand")) ;; Value
+ (use (match_operand:SI 1 "nonmemory_operand")) ;; Length
+ (match_operand 3 "immediate_operand") ;; Align
+ (unspec_volatile:BLK [(reg:SI 1) (reg:SI 2) (reg:SI 3)] UNSPEC_SETMEM)]
+ ""
+ {
+ rtx addr = gen_rtx_REG (SImode, 1);
+ rtx val = gen_rtx_REG (QImode, 2);
+ rtx len = gen_rtx_REG (SImode, 3);
+
+ emit_move_insn (addr, force_operand (XEXP (operands[0], 0), NULL_RTX));
+ emit_move_insn (len, force_operand (operands[1], NULL_RTX));
+ emit_move_insn (val, operands[2]);
+ emit_insn (gen_rx_setmem ());
+ DONE;
+ }
+)
+
+(define_insn "rx_setmem"
+ [(set (mem:BLK (reg:SI 1))
+ (unspec_volatile:BLK [(reg:SI 1) (reg:SI 2) (reg:SI 3)] UNSPEC_SETMEM))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 3))]
+ ""
+ "sstr.b"
+ [(set_attr "length" "2")
+ (set_attr "timings" "1111")] ;; The timing is a guesstimate.
+)
+
+(define_expand "cmpstrnsi"
+ [(set (match_operand:SI 0 "register_operand") ;; Result
+ (unspec_volatile:SI [(match_operand:BLK 1 "memory_operand") ;; String1
+ (match_operand:BLK 2 "memory_operand")] ;; String2
+ UNSPEC_CMPSTRN))
+ (use (match_operand:SI 3 "register_operand")) ;; Max Length
+ (match_operand:SI 4 "immediate_operand")] ;; Known Align
+ ""
+ {
+ rtx str1 = gen_rtx_REG (SImode, 1);
+ rtx str2 = gen_rtx_REG (SImode, 2);
+ rtx len = gen_rtx_REG (SImode, 3);
+
+ emit_move_insn (str1, force_operand (XEXP (operands[1], 0), NULL_RTX));
+ emit_move_insn (str2, force_operand (XEXP (operands[2], 0), NULL_RTX));
+ emit_move_insn (len, force_operand (operands[3], NULL_RTX));
+
+ emit_insn (gen_rx_cmpstrn (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+)
+
+(define_expand "cmpstrsi"
+ [(set (match_operand:SI 0 "register_operand") ;; Result
+ (unspec_volatile:SI [(match_operand:BLK 1 "memory_operand") ;; String1
+ (match_operand:BLK 2 "memory_operand")] ;; String2
+ UNSPEC_CMPSTRN))
+ (match_operand:SI 3 "immediate_operand")] ;; Known Align
+ ""
+ {
+ rtx str1 = gen_rtx_REG (SImode, 1);
+ rtx str2 = gen_rtx_REG (SImode, 2);
+ rtx len = gen_rtx_REG (SImode, 3);
+
+ emit_move_insn (str1, force_reg (SImode, XEXP (operands[1], 0)));
+ emit_move_insn (str2, force_reg (SImode, XEXP (operands[2], 0)));
+ emit_move_insn (len, GEN_INT (-1));
+
+ emit_insn (gen_rx_cmpstrn (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+)
+
+(define_insn "rx_cmpstrn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(reg:SI 1) (reg:SI 2) (reg:SI 3)]
+ UNSPEC_CMPSTRN))
+ (use (match_operand:BLK 1 "memory_operand" "m"))
+ (use (match_operand:BLK 2 "memory_operand" "m"))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "scmpu ; Perform the string comparison
+ mov #-1, %0 ; Set up -1 result (which cannot be created
+ ; by the SC insn)
+ bnc ?+ ; If Carry is not set skip over
+ scne.L %0 ; Set result based on Z flag
+?:
+"
+ [(set_attr "length" "9")
+ (set_attr "timings" "1111")] ;; The timing is a guesstimate.
+)
+
+;; Builtin Functions
+;;
+;; GCC does not have the ability to generate the following instructions
+;; on its own so they are provided as builtins instead. To use them from
+;; a program for example invoke them as __builtin_rx_<insn_name>. For
+;; example:
+;;
+;; int short_byte_swap (int arg) { return __builtin_rx_revw (arg); }
+
+;;---------- Accumulator Support ------------------------
+
+;; Multiply & Accumulate (high)
+(define_insn "machi"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_BUILTIN_MACHI)]
+ ""
+ "machi\t%0, %1"
+ [(set_attr "length" "3")]
+)
+
+;; Multiply & Accumulate (low)
+(define_insn "maclo"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_BUILTIN_MACLO)]
+ ""
+ "maclo\t%0, %1"
+ [(set_attr "length" "3")]
+)
+
+;; Multiply (high)
+(define_insn "mulhi"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_BUILTIN_MULHI)]
+ ""
+ "mulhi\t%0, %1"
+ [(set_attr "length" "3")]
+)
+
+;; Multiply (low)
+(define_insn "mullo"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_BUILTIN_MULLO)]
+ ""
+ "mullo\t%0, %1"
+ [(set_attr "length" "3")]
+)
+
+;; Move from Accumulator (high)
+(define_insn "mvfachi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)]
+ UNSPEC_BUILTIN_MVFACHI))]
+ ""
+ "mvfachi\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Move from Accumulator (middle)
+(define_insn "mvfacmi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)]
+ UNSPEC_BUILTIN_MVFACMI))]
+ ""
+ "mvfacmi\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Move to Accumulator (high)
+(define_insn "mvtachi"
+ [(unspec_volatile:SI [(match_operand:SI 0 "register_operand" "r")]
+ UNSPEC_BUILTIN_MVTACHI)]
+ ""
+ "mvtachi\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Move to Accumulator (low)
+(define_insn "mvtaclo"
+ [(unspec_volatile:SI [(match_operand:SI 0 "register_operand" "r")]
+ UNSPEC_BUILTIN_MVTACLO)]
+ ""
+ "mvtaclo\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Round Accumulator
+(define_insn "racw"
+ [(unspec_volatile:SI [(match_operand:SI 0 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_RACW)]
+ ""
+ "racw\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Repeat multiply and accumulate
+(define_insn "rmpa"
+ [(unspec:SI [(const_int 0) (reg:SI 1) (reg:SI 2) (reg:SI 3)
+ (reg:SI 4) (reg:SI 5) (reg:SI 6)]
+ UNSPEC_BUILTIN_RMPA)
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))]
+ ""
+ "rmpa"
+ [(set_attr "length" "2")
+ (set_attr "timings" "1010")]
+)
+
+;;---------- Arithmetic ------------------------
+
+;; Byte swap (two 16-bit values).
+(define_insn "revw"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_BUILTIN_REVW))]
+ ""
+ "revw\t%1, %0"
+ [(set_attr "length" "3")]
+)
+
+;; Round to integer.
+(define_insn "lrintsf2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (unspec:SI [(match_operand:SF 1 "rx_compare_operand" "r,Q")]
+ UNSPEC_BUILTIN_ROUND))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "round\t%1, %0"
+ [(set_attr "timings" "22,44")
+ (set_attr "length" "3,5")]
+)
+
+;;---------- Control Registers ------------------------
+
+;; Clear Processor Status Word
+(define_insn "clrpsw"
+ [(unspec_volatile:SI [(match_operand:SI 0 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_CLRPSW)
+ (clobber (reg:CC CC_REG))]
+ ""
+ "clrpsw\t%F0"
+ [(set_attr "length" "2")]
+)
+
+;; Set Processor Status Word
+(define_insn "setpsw"
+ [(unspec_volatile:SI [(match_operand:SI 0 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_SETPSW)
+ (clobber (reg:CC CC_REG))]
+ ""
+ "setpsw\t%F0"
+ [(set_attr "length" "2")]
+)
+
+;; Move from control register
+(define_insn "mvfc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(match_operand:SI 1 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_MVFC))]
+ ""
+ "mvfc\t%C1, %0"
+ [(set_attr "length" "3")]
+)
+
+;; Move to control register
+(define_insn "mvtc"
+ [(unspec_volatile:SI [(match_operand:SI 0 "immediate_operand" "i,i")
+ (match_operand:SI 1 "nonmemory_operand" "r,i")]
+ UNSPEC_BUILTIN_MVTC)]
+ ""
+ "mvtc\t%1, %C0"
+ [(set_attr "length" "3,7")]
+ ;; Ignore possible clobbering of the comparison flags in the
+ ;; PSW register. This is a cc0 target so any cc0 setting
+ ;; instruction will always be paired with a cc0 user, without
+ ;; the possibility of this instruction being placed in between
+ ;; them.
+)
+
+;; Move to interrupt priority level
+(define_insn "mvtipl"
+ [(unspec_volatile:SI [(match_operand:SI 0 "immediate_operand" "Uint04")]
+ UNSPEC_BUILTIN_MVTIPL)]
+ ""
+ "mvtipl\t%0"
+ [(set_attr "length" "3")]
+)
+
+;;---------- Interrupts ------------------------
+
+;; Break
+(define_insn "brk"
+ [(unspec_volatile [(const_int 0)]
+ UNSPEC_BUILTIN_BRK)]
+ ""
+ "brk"
+ [(set_attr "length" "1")
+ (set_attr "timings" "66")]
+)
+
+;; Interrupt
+(define_insn "int"
+ [(unspec_volatile:SI [(match_operand:SI 0 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_INT)]
+ ""
+ "int\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Wait
+(define_insn "wait"
+ [(unspec_volatile [(const_int 0)]
+ UNSPEC_BUILTIN_WAIT)]
+ ""
+ "wait"
+ [(set_attr "length" "2")]
+)
+
+;;---------- CoProcessor Support ------------------------
+
+;; FIXME: The instructions are currently commented out because
+;; the bit patterns have not been finalized, so the assembler
+;; does not support them. Once they are decided and the assembler
+;; supports them, enable the instructions here.
+
+;; Move from co-processor register
+(define_insn "mvfcp"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "i")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_MVFCP))]
+ ""
+ "; mvfcp\t%1, %0, %2"
+ [(set_attr "length" "5")]
+)
+
+;;---------- Misc ------------------------
+
+;; Required by cfglayout.c...
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "length" "1")]
+)
+
+(define_expand "pid_addr"
+ [(plus:SI (match_operand:SI 0)
+ (const:SI (unspec:SI [(match_operand:SI 1)] UNSPEC_PID_ADDR)))]
+ ""
+ ""
+)
+
+(define_insn "movdi"
+ [(set:DI (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (match_operand:DI 1 "general_operand" "rmi"))]
+ "TARGET_ENABLE_LRA"
+ { return rx_gen_move_template (operands, false); }
+ [(set_attr "length" "16")
+ (set_attr "timings" "22")]
+)
+
+(define_insn "movdf"
+ [(set:DF (match_operand:DF 0 "nonimmediate_operand" "=rm")
+ (match_operand:DF 1 "general_operand" "rmi"))]
+ "TARGET_ENABLE_LRA"
+ { return rx_gen_move_template (operands, false); }
+ [(set_attr "length" "16")
+ (set_attr "timings" "22")]
+)
diff --git a/gcc-4.9/gcc/config/rx/rx.opt b/gcc-4.9/gcc/config/rx/rx.opt
new file mode 100644
index 000000000..53e572987
--- /dev/null
+++ b/gcc-4.9/gcc/config/rx/rx.opt
@@ -0,0 +1,141 @@
+; Command line options for the Renesas RX port of GCC.
+; Copyright (C) 2008-2014 Free Software Foundation, Inc.
+; Contributed by Red Hat.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+;---------------------------------------------------
+
+HeaderInclude
+config/rx/rx-opts.h
+
+; The default is -fpu -m32bit-doubles.
+
+m64bit-doubles
+Target RejectNegative Mask(64BIT_DOUBLES) Report
+Store doubles in 64 bits.
+
+m32bit-doubles
+Target RejectNegative InverseMask(64BIT_DOUBLES) Report
+Stores doubles in 32 bits. This is the default.
+
+nofpu
+Target RejectNegative Alias(mnofpu)
+Disable the use of RX FPU instructions.
+
+mnofpu
+Target RejectNegative Mask(NO_USE_FPU) Report Undocumented
+
+fpu
+Target RejectNegative InverseMask(NO_USE_FPU) Report
+Enable the use of RX FPU instructions. This is the default.
+
+;---------------------------------------------------
+
+mcpu=
+Target RejectNegative Joined Var(rx_cpu_type) Report ToLower Enum(rx_cpu_types) Init(RX600)
+Specify the target RX cpu type.
+
+Enum
+Name(rx_cpu_types) Type(enum rx_cpu_types)
+
+EnumValue
+Enum(rx_cpu_types) String(rx610) Value(RX610)
+
+EnumValue
+Enum(rx_cpu_types) String(rx200) Value(RX200)
+
+EnumValue
+Enum(rx_cpu_types) String(rx600) Value(RX600)
+
+EnumValue
+Enum(rx_cpu_types) String(rx100) Value(RX100)
+
+;---------------------------------------------------
+
+mbig-endian-data
+Target RejectNegative Mask(BIG_ENDIAN_DATA) Report
+Data is stored in big-endian format.
+
+mlittle-endian-data
+Target RejectNegative InverseMask(BIG_ENDIAN_DATA) Report
+Data is stored in little-endian format. (Default).
+
+;---------------------------------------------------
+
+msmall-data-limit=
+Target RejectNegative Joined UInteger Var(rx_small_data_limit) Init(0)
+Maximum size of global and static variables which can be placed into the small data area.
+
+;---------------------------------------------------
+
+msim
+Target
+Use the simulator runtime.
+
+;---------------------------------------------------
+
+mas100-syntax
+Target Mask(AS100_SYNTAX) Report
+Generate assembler output that is compatible with the Renesas AS100 assembler. This may restrict some of the compiler's capabilities. The default is to generate GAS compatible syntax.
+
+;---------------------------------------------------
+
+mrelax
+Target
+Enable linker relaxation.
+
+;---------------------------------------------------
+
+mmax-constant-size=
+Target RejectNegative Joined UInteger Var(rx_max_constant_size) Init(0)
+Maximum size in bytes of constant values allowed as operands.
+
+;---------------------------------------------------
+
+mint-register=
+Target RejectNegative Joined UInteger Var(rx_deferred_options) Defer
+Specifies the number of registers to reserve for interrupt handlers.
+
+;---------------------------------------------------
+
+msave-acc-in-interrupts
+Target Mask(SAVE_ACC_REGISTER)
+Specifies whether interrupt functions should save and restore the accumulator register.
+
+;---------------------------------------------------
+
+mpid
+Target Mask(PID)
+Enables Position-Independent-Data (PID) mode.
+
+;---------------------------------------------------
+
+mwarn-multiple-fast-interrupts
+Target Report Var(rx_warn_multiple_fast_interrupts) Init(1) Warning
+Warn when multiple, different, fast interrupt handlers are in the compilation unit.
+
+mgcc-abi
+Target RejectNegative Report Mask(GCC_ABI)
+Enable the use of the old, broken, ABI where all stacked function arguments are aligned to 32-bits.
+
+mrx-abi
+Target RejectNegative Report InverseMask(GCC_ABI)
+Enable the use the standard RX ABI where all stacked function arguments are naturally aligned. This is the default.
+
+mlra
+Target Report Mask(ENABLE_LRA)
+Enable the use of the LRA register allocator.
diff --git a/gcc-4.9/gcc/config/rx/t-rx b/gcc-4.9/gcc/config/rx/t-rx
new file mode 100644
index 000000000..e7f6e82e1
--- /dev/null
+++ b/gcc-4.9/gcc/config/rx/t-rx
@@ -0,0 +1,34 @@
+# Makefile fragment for building GCC for the Renesas RX target.
+# Copyright (C) 2008-2014 Free Software Foundation, Inc.
+# Contributed by Red Hat.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published
+# by the Free Software Foundation; either version 3, or (at your
+# option) any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Enable multilibs:
+
+MULTILIB_OPTIONS = m64bit-doubles nofpu mbig-endian-data mpid
+MULTILIB_DIRNAMES = 64-bit-double no-fpu-libs big-endian-data pid
+
+# If necessary uncomment the next two lines to generate multilibs
+# using the old, broken, ABI.
+# MULTILIB_OPTIONS += mgcc-abi
+# MULTILIB_DIRNAMES += gcc-abi
+
+MULTILIB_MATCHES = nofpu=mnofpu nofpu=mcpu?rx200 nofpu=mcpu?rx100
+
+MULTILIB_EXCEPTIONS =
+MULTILIB_EXTRA_OPTS =