aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/gcc/config/rl78
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/gcc/config/rl78')
-rw-r--r--gcc-4.9/gcc/config/rl78/constraints.md386
-rw-r--r--gcc-4.9/gcc/config/rl78/predicates.md71
-rw-r--r--gcc-4.9/gcc/config/rl78/rl78-c.c34
-rw-r--r--gcc-4.9/gcc/config/rl78/rl78-expand.md306
-rw-r--r--gcc-4.9/gcc/config/rl78/rl78-opts.h30
-rw-r--r--gcc-4.9/gcc/config/rl78/rl78-protos.h47
-rw-r--r--gcc-4.9/gcc/config/rl78/rl78-real.md561
-rw-r--r--gcc-4.9/gcc/config/rl78/rl78-virt.md416
-rw-r--r--gcc-4.9/gcc/config/rl78/rl78.c3748
-rw-r--r--gcc-4.9/gcc/config/rl78/rl78.h473
-rw-r--r--gcc-4.9/gcc/config/rl78/rl78.md443
-rw-r--r--gcc-4.9/gcc/config/rl78/rl78.opt55
-rw-r--r--gcc-4.9/gcc/config/rl78/t-rl7827
13 files changed, 6597 insertions, 0 deletions
diff --git a/gcc-4.9/gcc/config/rl78/constraints.md b/gcc-4.9/gcc/config/rl78/constraints.md
new file mode 100644
index 000000000..3f5e49085
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/constraints.md
@@ -0,0 +1,386 @@
+;; Machine Description for Renesas RL78 processors
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+; Constraints in use:
+
+; core:
+; V X g i m n o p r s < >
+; 0..9
+; I..Q - integers
+; Int8 = 0..255
+; Int3 = 1..7
+; J = -255..0
+; K = 1
+; L = -1
+; M = 0
+; N = 2
+; O = -2
+; P = 1..15
+
+; E..H - float constants
+
+; RL78-specific
+; a x b c d e h l w - 8-bit regs
+; A B D T S - 16-bit regs
+; R = all regular registers (A-L)
+; Y - any valid memory
+; Wxx - various memory addressing modes
+; Qxx - conditionals
+; U = usual memory references mov-able to/from AX
+; v = virtual registers
+; Zxx = specific virtual registers
+
+(define_constraint "Int8"
+ "Integer constant in the range 0 @dots{} 255."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 255)")))
+
+(define_constraint "Int3"
+ "Integer constant in the range 1 @dots{} 7."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 1, 7)")))
+
+(define_constraint "Iv08"
+ "@internal
+ Integer constant equal to 8."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 8, 8)")))
+
+(define_constraint "Iv16"
+ "@internal
+ Integer constant equal to 16."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 16, 16)")))
+
+(define_constraint "Iv24"
+ "@internal
+ Integer constant equal to 24."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 24, 24)")))
+
+(define_constraint "Is09"
+ "@internal
+ Integer constant in the range 9 @dots{} 15 (for shifts)."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 9, 15)")))
+
+(define_constraint "Is17"
+ "@internal
+ Integer constant in the range 17 @dots{} 23 (for shifts)."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 17, 23)")))
+
+(define_constraint "Is25"
+ "@internal
+ Integer constant in the range 25 @dots{} 31 (for shifts)."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 25, 31)")))
+
+(define_constraint "ISsi"
+ "@internal
+ Integer constant with bit 31 set."
+ (and (match_code "const_int")
+ (match_test "(ival & 0x80000000) != 0")))
+
+(define_constraint "IShi"
+ "@internal
+ Integer constant with bit 15 set."
+ (and (match_code "const_int")
+ (match_test "(ival & 0x8000) != 0")))
+
+(define_constraint "ISqi"
+ "@internal
+ Integer constant with bit 7 set."
+ (and (match_code "const_int")
+ (match_test "(ival & 0x80) != 0")))
+
+(define_constraint "J"
+ "Integer constant in the range -255 @dots{} 0"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -255, 0)")))
+
+(define_constraint "K"
+ "Integer constant 1."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 1, 1)")))
+
+(define_constraint "L"
+ "Integer constant -1."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -1, -1)")))
+
+(define_constraint "M"
+ "Integer constant 0."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 0)")))
+
+(define_constraint "N"
+ "Integer constant 2."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 2, 2)")))
+
+(define_constraint "O"
+ "Integer constant -2."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -2, -2)")))
+
+(define_constraint "P"
+ "Integer constant 1..15"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 1, 15)")))
+
+(define_register_constraint "R" "QI_REGS"
+ "@code{A} through @code{L} registers.")
+
+(define_register_constraint "a" "AREG"
+ "The @code{A} register.")
+
+(define_register_constraint "x" "XREG"
+ "The @code{X} register.")
+
+(define_register_constraint "b" "BREG"
+ "The @code{B} register.")
+
+(define_register_constraint "c" "CREG"
+ "The @code{C} register.")
+
+(define_register_constraint "d" "DREG"
+ "The @code{D} register.")
+
+(define_register_constraint "e" "EREG"
+ "The @code{E} register.")
+
+(define_register_constraint "h" "HREG"
+ "The @code{H} register.")
+
+(define_register_constraint "l" "LREG"
+ "The @code{L} register.")
+
+(define_register_constraint "w" "PSWREG"
+ "The @code{PSW} register.")
+
+(define_register_constraint "A" "AXREG"
+ "The @code{AX} register.")
+
+(define_register_constraint "B" "BCREG"
+ "The @code{BC} register.")
+
+(define_register_constraint "D" "DEREG"
+ "The @code{DE} register.")
+
+; because H + L = T, assuming A=1.
+(define_register_constraint "T" "HLREG"
+ "The @code{HL} register.")
+
+(define_register_constraint "S" "SPREG"
+ "The @code{SP} register.")
+
+(define_register_constraint "v" "V_REGS"
+ "The virtual registers.")
+
+(define_register_constraint "Z08W" "R8W_REGS"
+ "The R8 register, HImode.")
+
+(define_register_constraint "Z10W" "R10W_REGS"
+ "The R10 register, HImode.")
+
+(define_register_constraint "Zint" "INT_REGS"
+ "The interrupt registers.")
+
+; All the memory addressing schemes the RL78 supports
+; of the form W {register} {bytes of offset}
+; or W {register} {register}
+; Additionally, the Cxx forms are the same as the Wxx forms, but without
+; the ES: override.
+
+; absolute address
+(define_memory_constraint "Cab"
+ "[addr]"
+ (and (match_code "mem")
+ (ior (match_test "CONSTANT_P (XEXP (op, 0))")
+ (match_test "GET_CODE (XEXP (op, 0)) == PLUS && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF"))
+ )
+ )
+(define_memory_constraint "Wab"
+ "es:[addr]"
+ (match_test "(rl78_es_addr (op) && satisfies_constraint_Cab (rl78_es_base (op)))
+ || satisfies_constraint_Cab (op)")
+ )
+
+(define_memory_constraint "Cbc"
+ "word16[BC]"
+ (and (match_code "mem")
+ (ior
+ (and (match_code "reg" "0")
+ (match_test "REGNO (XEXP (op, 0)) == BC_REG"))
+ (and (match_code "plus" "0")
+ (and (and (match_code "reg" "00")
+ (match_test "REGNO (XEXP (XEXP (op, 0), 0)) == BC_REG"))
+ (match_test "uword_operand (XEXP (XEXP (op, 0), 1), VOIDmode)"))))
+ )
+ )
+(define_memory_constraint "Wbc"
+ "es:word16[BC]"
+ (match_test "(rl78_es_addr (op) && satisfies_constraint_Cbc (rl78_es_base (op)))
+ || satisfies_constraint_Cbc (op)")
+ )
+
+(define_memory_constraint "Cde"
+ "[DE]"
+ (and (match_code "mem")
+ (and (match_code "reg" "0")
+ (match_test "REGNO (XEXP (op, 0)) == DE_REG")))
+ )
+(define_memory_constraint "Wde"
+ "es:[DE]"
+ (match_test "(rl78_es_addr (op) && satisfies_constraint_Cde (rl78_es_base (op)))
+ || satisfies_constraint_Cde (op)")
+ )
+
+(define_memory_constraint "Cca"
+ "[AX..HL] for calls"
+ (and (match_code "mem")
+ (and (match_code "reg" "0")
+ (match_test "REGNO (XEXP (op, 0)) <= HL_REG")))
+ )
+(define_memory_constraint "Wca"
+ "es:[AX..HL] for calls"
+ (match_test "(rl78_es_addr (op) && satisfies_constraint_Cca (rl78_es_base (op)))
+ || satisfies_constraint_Cca (op)")
+ )
+
+(define_memory_constraint "Ccv"
+ "[AX..HL,r8-r31] for calls"
+ (and (match_code "mem")
+ (and (match_code "reg" "0")
+ (match_test "REGNO (XEXP (op, 0)) < 32")))
+ )
+(define_memory_constraint "Wcv"
+ "es:[AX..HL,r8-r31] for calls"
+ (match_test "(rl78_es_addr (op) && satisfies_constraint_Ccv (rl78_es_base (op)))
+ || satisfies_constraint_Ccv (op)")
+ )
+
+(define_memory_constraint "Cd2"
+ "word16[DE]"
+ (and (match_code "mem")
+ (ior
+ (and (match_code "reg" "0")
+ (match_test "REGNO (XEXP (op, 0)) == DE_REG"))
+ (and (match_code "plus" "0")
+ (and (and (match_code "reg" "00")
+ (match_test "REGNO (XEXP (XEXP (op, 0), 0)) == DE_REG"))
+ (match_test "uword_operand (XEXP (XEXP (op, 0), 1), VOIDmode)"))))
+ )
+ )
+(define_memory_constraint "Wd2"
+ "es:word16[DE]"
+ (match_test "(rl78_es_addr (op) && satisfies_constraint_Cd2 (rl78_es_base (op)))
+ || satisfies_constraint_Cd2 (op)")
+ )
+
+(define_memory_constraint "Chl"
+ "[HL]"
+ (and (match_code "mem")
+ (and (match_code "reg" "0")
+ (match_test "REGNO (XEXP (op, 0)) == HL_REG")))
+ )
+(define_memory_constraint "Whl"
+ "es:[HL]"
+ (match_test "(rl78_es_addr (op) && satisfies_constraint_Chl (rl78_es_base (op)))
+ || satisfies_constraint_Chl (op)")
+ )
+
+(define_memory_constraint "Ch1"
+ "byte8[HL]"
+ (and (match_code "mem")
+ (and (match_code "plus" "0")
+ (and (and (match_code "reg" "00")
+ (match_test "REGNO (XEXP (XEXP (op, 0), 0)) == HL_REG"))
+ (match_test "ubyte_operand (XEXP (XEXP (op, 0), 1), VOIDmode)"))))
+ )
+(define_memory_constraint "Wh1"
+ "es:byte8[HL]"
+ (match_test "(rl78_es_addr (op) && satisfies_constraint_Ch1 (rl78_es_base (op)))
+ || satisfies_constraint_Ch1 (op)")
+ )
+
+(define_memory_constraint "Chb"
+ "[HL+B]"
+ (and (match_code "mem")
+ (match_test "rl78_hl_b_c_addr_p (XEXP (op, 0))"))
+ )
+(define_memory_constraint "Whb"
+ "es:[HL+B]"
+ (match_test "(rl78_es_addr (op) && satisfies_constraint_Chb (rl78_es_base (op)))
+ || satisfies_constraint_Chb (op)")
+ )
+
+(define_memory_constraint "Cs1"
+ "word8[SP]"
+ (and (match_code "mem")
+ (ior
+ (and (match_code "reg" "0")
+ (match_test "REGNO (XEXP (op, 0)) == SP_REG"))
+ (and (match_code "plus" "0")
+ (and (and (match_code "reg" "00")
+ (match_test "REGNO (XEXP (XEXP (op, 0), 0)) == SP_REG"))
+ (match_test "ubyte_operand (XEXP (XEXP (op, 0), 1), VOIDmode)"))))
+ )
+ )
+(define_memory_constraint "Ws1"
+ "es:word8[SP]"
+ (match_test "(rl78_es_addr (op) && satisfies_constraint_Cs1 (rl78_es_base (op)))
+ || satisfies_constraint_Cs1 (op)")
+ )
+
+(define_memory_constraint "Wfr"
+ "ES/CS far pointer"
+ (and (match_code "mem")
+ (match_test "rl78_far_p (op)"))
+ )
+
+(define_memory_constraint "Y"
+ "any near legitimate memory access"
+ (and (match_code "mem")
+ (match_test "!rl78_far_p (op) && rl78_as_legitimate_address (VOIDmode, XEXP (op, 0), true, ADDR_SPACE_GENERIC)"))
+)
+
+(define_memory_constraint "U"
+ "memory references valid with mov to/from a/ax"
+ (and (match_code "mem")
+ (match_test "rl78_virt_insns_ok ()
+|| satisfies_constraint_Wab (op)
+|| satisfies_constraint_Wbc (op)
+|| satisfies_constraint_Wde (op)
+|| satisfies_constraint_Wd2 (op)
+|| satisfies_constraint_Whl (op)
+|| satisfies_constraint_Wh1 (op)
+|| satisfies_constraint_Whb (op)
+|| satisfies_constraint_Ws1 (op)
+|| satisfies_constraint_Wfr (op) ")))
+
+(define_memory_constraint "Qbi"
+ "built-in compare types"
+ (match_code "eq,ne,gtu,ltu,geu,leu"))
+
+(define_memory_constraint "Qsc"
+ "synthetic compares"
+ (match_code "gt,lt,ge,le"))
diff --git a/gcc-4.9/gcc/config/rl78/predicates.md b/gcc-4.9/gcc/config/rl78/predicates.md
new file mode 100644
index 000000000..e564f4369
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/predicates.md
@@ -0,0 +1,71 @@
+;; Machine Description for Renesas RL78 processors
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_predicate "rl78_any_operand"
+ (ior (match_operand 0 "general_operand")
+ (match_code "mem,const_int,const_double,reg"))
+)
+
+(define_predicate "rl78_nonfar_operand"
+ (and (match_operand 0 "general_operand")
+ (not (match_test "rl78_far_p (op)")))
+)
+
+(define_predicate "rl78_nonfar_nonimm_operand"
+ (and (match_operand 0 "nonimmediate_operand")
+ (not (match_test "rl78_far_p (op)")))
+)
+
+(define_predicate "ubyte_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
+
+(define_predicate "rl78_24_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 2 || INTVAL (op) == 4")))
+
+(define_predicate "uword_operand"
+ (ior (match_code "const")
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 65536)"))))
+
+(define_predicate "rl78_cmp_operator_signed"
+ (match_code "gt,ge,lt,le"))
+(define_predicate "rl78_cmp_operator_real"
+ (match_code "eq,ne,gtu,ltu,geu,leu"))
+(define_predicate "rl78_cmp_operator"
+ (match_code "eq,ne,gtu,ltu,geu,leu,gt,lt,ge,le"))
+
+(define_predicate "rl78_ax_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == AX_REG || REGNO (op) >= FIRST_PSEUDO_REGISTER")))
+
+(define_predicate "rl78_addw_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == AX_REG || REGNO (op) == SP_REG || REGNO (op) >= FIRST_PSEUDO_REGISTER")))
+
+(define_predicate "rl78_stack_based_mem"
+ (and (match_code "mem")
+ (ior (and (match_code "reg" "0")
+ (match_test "REGNO (XEXP (op, 0)) == SP_REG"))
+ (and (match_code "plus" "0")
+ (and (match_code "reg" "00")
+ (match_test "REGNO (XEXP (XEXP (op, 0), 0)) == SP_REG")
+ (match_code "const_int" "01"))))))
diff --git a/gcc-4.9/gcc/config/rl78/rl78-c.c b/gcc-4.9/gcc/config/rl78/rl78-c.c
new file mode 100644
index 000000000..81e84b9df
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/rl78-c.c
@@ -0,0 +1,34 @@
+/* RL78 C-specific support
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "c-family/c-common.h"
+#include "rl78-protos.h"
+
+/* Implements REGISTER_TARGET_PRAGMAS. */
+void
+rl78_register_pragmas (void)
+{
+ c_register_addr_space ("__far", ADDR_SPACE_FAR);
+}
diff --git a/gcc-4.9/gcc/config/rl78/rl78-expand.md b/gcc-4.9/gcc/config/rl78/rl78-expand.md
new file mode 100644
index 000000000..f794d7cb1
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/rl78-expand.md
@@ -0,0 +1,306 @@
+;; Machine Description for Renesas RL78 processors
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;;---------- Moving ------------------------
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand")
+ (match_operand:QI 1 "general_operand"))]
+ ""
+ {
+ if (MEM_P (operands[0]) && MEM_P (operands[1]))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ if (rl78_far_p (operands[0]) && rl78_far_p (operands[1]))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+
+ /* FIXME: Not sure how GCC can generate (SUBREG (SYMBOL_REF)),
+ but it does. Since this makes no sense, reject it here. */
+ if (GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF)
+ FAIL;
+ /* Similarly for (SUBREG (CONST (PLUS (SYMBOL_REF)))).
+ cf. g++.dg/abi/packed.C. */
+ if (GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (XEXP (operands[1], 0)) == CONST
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (operands[1], 0), 0), 0)) == SYMBOL_REF)
+ FAIL;
+
+ if (CONST_INT_P (operands[1]) && ! IN_RANGE (INTVAL (operands[1]), (-1 << 8) + 1, (1 << 8) - 1))
+ FAIL;
+ }
+)
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "nonimmediate_operand")
+ (match_operand:HI 1 "general_operand"))]
+ ""
+ {
+ if (MEM_P (operands[0]) && MEM_P (operands[1]))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ if (rl78_far_p (operands[0]) && rl78_far_p (operands[1]))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+
+ /* FIXME: Not sure how GCC can generate (SUBREG (SYMBOL_REF)),
+ but it does. Since this makes no sense, reject it here. */
+ if (GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF)
+ FAIL;
+ /* Similarly for (SUBREG (CONST (PLUS (SYMBOL_REF)))). */
+ if (GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (XEXP (operands[1], 0)) == CONST
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (operands[1], 0), 0), 0)) == SYMBOL_REF)
+ FAIL;
+ }
+)
+
+(define_insn_and_split "movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=vYS,v,Wfr")
+ (match_operand:SI 1 "general_operand" "viYS,Wfr,v"))]
+ ""
+ "#"
+ ""
+ [(set (match_operand:HI 2 "nonimmediate_operand")
+ (match_operand:HI 4 "general_operand"))
+ (set (match_operand:HI 3 "nonimmediate_operand")
+ (match_operand:HI 5 "general_operand"))]
+ "rl78_split_movsi (operands);"
+ [(set_attr "valloc" "op1")]
+)
+
+;;---------- Conversions ------------------------
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand")
+ (zero_extend:HI (match_operand:QI 1 "general_operand")))]
+ ""
+ "if (rl78_force_nonfar_2 (operands, gen_zero_extendqihi2))
+ DONE;"
+ )
+
+(define_expand "extendqihi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand")
+ (sign_extend:HI (match_operand:QI 1 "general_operand")))]
+ ""
+ "if (rl78_force_nonfar_2 (operands, gen_extendqihi2))
+ DONE;"
+ )
+
+;;---------- Arithmetic ------------------------
+
+(define_expand "add<mode>3"
+ [(set (match_operand:QHI 0 "nonimmediate_operand")
+ (plus:QHI (match_operand:QHI 1 "general_operand")
+ (match_operand:QHI 2 "general_operand")))
+ ]
+ ""
+ "if (rl78_force_nonfar_3 (operands, gen_add<mode>3))
+ DONE;"
+)
+
+(define_expand "sub<mode>3"
+ [(set (match_operand:QHI 0 "nonimmediate_operand")
+ (minus:QHI (match_operand:QHI 1 "general_operand")
+ (match_operand:QHI 2 "general_operand")))
+ ]
+ ""
+ "if (rl78_force_nonfar_3 (operands, gen_sub<mode>3))
+ DONE;"
+)
+
+(define_expand "neg<mode>2"
+ [(set (match_operand:QHI 0 "nonimmediate_operand")
+ (minus:QHI (const_int 0)
+ (match_operand:QHI 1 "general_operand")))
+ ]
+ ""
+ "if (rl78_force_nonfar_2 (operands, gen_neg<mode>2))
+ DONE;"
+)
+
+(define_expand "umulqihi3"
+ [(set (match_operand:HI 0 "register_operand")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand"))
+ (zero_extend:HI (match_operand:QI 2 "register_operand"))))]
+ ""
+ ""
+)
+
+(define_expand "andqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand")
+ (and:QI (match_operand:QI 1 "general_operand")
+ (match_operand:QI 2 "general_operand")))
+ ]
+ ""
+ "if (rl78_force_nonfar_3 (operands, gen_andqi3))
+ DONE;"
+)
+
+(define_expand "iorqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand")
+ (ior:QI (match_operand:QI 1 "general_operand")
+ (match_operand:QI 2 "general_operand")))
+ ]
+ ""
+ "if (rl78_force_nonfar_3 (operands, gen_iorqi3))
+ DONE;"
+)
+
+(define_expand "xorqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand")
+ (xor:QI (match_operand:QI 1 "general_operand")
+ (match_operand:QI 2 "general_operand")))
+ ]
+ ""
+ "if (rl78_force_nonfar_3 (operands, gen_xorqi3))
+ DONE;"
+)
+
+(define_expand "one_cmplqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand")
+ (xor:QI (match_operand:QI 1 "general_operand")
+ (const_int -1)))
+ ]
+ ""
+ "if (rl78_force_nonfar_2 (operands, gen_one_cmplqi2))
+ DONE;"
+)
+
+;;---------- Shifts ------------------------
+
+(define_expand "ashl<mode>3"
+ [(set (match_operand:QHI 0 "nonimmediate_operand")
+ (ashift:QHI (match_operand:QHI 1 "general_operand")
+ (match_operand:QI 2 "general_operand")))
+ ]
+ ""
+ "if (rl78_force_nonfar_3 (operands, gen_ashl<mode>3))
+ DONE;"
+)
+
+(define_expand "ashr<mode>3"
+ [(set (match_operand:QHI 0 "nonimmediate_operand")
+ (ashiftrt:QHI (match_operand:QHI 1 "general_operand")
+ (match_operand:QI 2 "general_operand")))
+ ]
+ ""
+ "if (rl78_force_nonfar_3 (operands, gen_ashr<mode>3))
+ DONE;"
+)
+
+(define_expand "lshr<mode>3"
+ [(set (match_operand:QHI 0 "nonimmediate_operand")
+ (lshiftrt:QHI (match_operand:QHI 1 "general_operand")
+ (match_operand:QI 2 "general_operand")))
+ ]
+ ""
+ "if (rl78_force_nonfar_3 (operands, gen_lshr<mode>3))
+ DONE;"
+)
+
+(define_expand "ashrsi3"
+ [(parallel [(set (match_operand:SI 0 "nonimmediate_operand")
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand")
+ (match_operand:SI 2 "nonmemory_operand")))
+ (clobber (reg:HI X_REG))])
+ ]
+ ""
+ ""
+)
+
+(define_expand "lshrsi3"
+ [(parallel [(set (match_operand:SI 0 "nonimmediate_operand")
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand")
+ (match_operand:SI 2 "nonmemory_operand")))
+ (clobber (reg:HI X_REG))])
+ ]
+ ""
+ ""
+)
+
+(define_expand "ashlsi3"
+ [(parallel [(set (match_operand:SI 0 "nonimmediate_operand")
+ (ashift:SI (match_operand:SI 1 "nonimmediate_operand")
+ (match_operand:SI 2 "nonmemory_operand")))
+ (clobber (reg:HI X_REG))])
+ ]
+ ""
+ ""
+)
+
+;;---------- Branching ------------------------
+
+(define_expand "indirect_jump"
+ [(set (pc)
+ (match_operand:HI 0 "nonimmediate_operand"))]
+ ""
+ ""
+)
+
+(define_expand "call"
+ [(call (match_operand:HI 0 "memory_operand")
+ (match_operand 1 ""))]
+ ""
+ ""
+)
+
+(define_expand "call_value"
+ [(set (match_operand 0 "register_operand")
+ (call (match_operand:HI 1 "memory_operand")
+ (match_operand 2 "")))]
+ ""
+ ""
+)
+
+(define_expand "cbranchqi4"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator"
+ [(match_operand:QI 1 "general_operand")
+ (match_operand:QI 2 "general_operand")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "rl78_expand_compare (operands);"
+)
+
+(define_expand "cbranchhi4"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator"
+ [(match_operand:HI 1 "general_operand")
+ (match_operand:HI 2 "general_operand")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "rl78_expand_compare (operands);"
+)
+
+(define_expand "cbranchsi4"
+ [(parallel [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator"
+ [(match_operand:SI 1 "general_operand")
+ (match_operand:SI 2 "nonmemory_operand")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (reg:HI AX_REG))
+ ])]
+ "1"
+ "rl78_expand_compare (operands);"
+)
diff --git a/gcc-4.9/gcc/config/rl78/rl78-opts.h b/gcc-4.9/gcc/config/rl78/rl78-opts.h
new file mode 100644
index 000000000..95c5278dd
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/rl78-opts.h
@@ -0,0 +1,30 @@
+/* GCC option-handling definitions for the Renesas RL78 processor.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RL78_OPTS_H
+#define RL78_OPTS_H
+
+enum rl78_mul_types
+{
+ MUL_NONE,
+ MUL_RL78,
+ MUL_G13
+};
+
+#endif
diff --git a/gcc-4.9/gcc/config/rl78/rl78-protos.h b/gcc-4.9/gcc/config/rl78/rl78-protos.h
new file mode 100644
index 000000000..a74bda187
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/rl78-protos.h
@@ -0,0 +1,47 @@
+/* Prototypes for Renesas RL78 processors
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+void rl78_emit_eh_epilogue (rtx);
+void rl78_expand_compare (rtx *);
+void rl78_expand_movsi (rtx *);
+void rl78_split_movsi (rtx *);
+int rl78_force_nonfar_2 (rtx *, rtx (*gen)(rtx,rtx));
+int rl78_force_nonfar_3 (rtx *, rtx (*gen)(rtx,rtx,rtx));
+void rl78_expand_eh_epilogue (rtx);
+void rl78_expand_epilogue (void);
+void rl78_expand_prologue (void);
+int rl78_far_p (rtx x);
+int rl78_hard_regno_mode_ok (int, enum machine_mode);
+int rl78_hard_regno_nregs (int, enum machine_mode);
+bool rl78_hl_b_c_addr_p (rtx);
+int rl78_initial_elimination_offset (int, int);
+bool rl78_as_legitimate_address (enum machine_mode, rtx,
+ bool, addr_space_t);
+int rl78_legitimize_reload_address (rtx *, enum machine_mode, int,int, int);
+enum reg_class rl78_mode_code_base_reg_class (enum machine_mode, addr_space_t, int, int);
+bool rl78_peep_movhi_p (rtx *);
+bool rl78_real_insns_ok (void);
+void rl78_register_pragmas (void);
+bool rl78_regno_mode_code_ok_for_base_p (int, enum machine_mode, addr_space_t, int, int);
+void rl78_setup_peep_movhi (rtx *);
+bool rl78_virt_insns_ok (void);
+
+bool rl78_es_addr (rtx);
+rtx rl78_es_base (rtx);
diff --git a/gcc-4.9/gcc/config/rl78/rl78-real.md b/gcc-4.9/gcc/config/rl78/rl78-real.md
new file mode 100644
index 000000000..27ff60fa9
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/rl78-real.md
@@ -0,0 +1,561 @@
+;; Machine Description for Renesas RL78 processors
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; The insns in this file correspond to the actual opcodes the RL78
+;; can issue with real registers. All insns in here should be
+;; conditional on rl78_real_insns_ok() returning true, and should
+;; allow virtual registers in their predicates - the reorg pass that
+;; allocates physical registers uses the constraints to select
+;; registers, but insns with virtual registers MUST match one of these
+;; patterns - other than the constraints - so that the operand info is
+;; properly set up for the alloc pass.
+
+;;---------- Moving ------------------------
+
+(define_insn "movqi_es"
+ [(set (reg:QI ES_REG)
+ (match_operand:QI 0 "register_operand" "a"))]
+ ""
+ "mov\tes, %0"
+)
+
+(define_insn "movqi_cs"
+ [(set (reg:QI CS_REG)
+ (match_operand:QI 0 "register_operand" "a"))]
+ ""
+ "mov\tcs, %0"
+)
+
+(define_insn "*movqi_real"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=g,RaxbcWab,RaxbcWab,a, bcx,R, WabWd2WhlWh1WhbWbcWs1v, bcx")
+ (match_operand 1 "general_operand" "0,K, M, RInt8sJvWabWdeWd2WhlWh1WhbWbcWs1,Wab,aInt8J,a, R"))]
+ "rl78_real_insns_ok ()"
+ "@
+ ; mov\t%0, %1
+ oneb\t%0
+ clrb\t%0
+ mov\t%0, %1
+ mov\t%0, %1
+ mov\t%0, %1
+ mov\t%0, %1
+ mov\t%0, %S1"
+)
+
+(define_insn "*movhi_real"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=g,AB,AB,RSv,A,BDTvSWabWd2WdeWhlWh1WbcWs1, BDT,ABDT,v")
+ (match_operand:HI 1 "general_operand" " 0,K, M, i, BDTvSWabWd2WdeWh1WhlWbcWs1,A, BDT,vS, ABDT"))]
+ "rl78_real_insns_ok ()"
+ "@
+ ; movw\t%0, %1
+ onew\t%0
+ clrw\t%0
+ movw\t%0, %1
+ movw\t%0, %1
+ movw\t%0, %1
+ movw\t%0, %S1
+ movw\t%0, %1
+ movw\t%0, %1"
+)
+
+;;---------- Conversions ------------------------
+
+(define_insn "*zero_extendqihi2_real"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rv,A")
+ (zero_extend:HI (match_operand:QI 1 "general_operand" "0,a")))]
+ "rl78_real_insns_ok ()"
+ "@
+ mov\t%Q0, #0
+ mov\tx, a \;mov\ta, #0"
+ )
+
+(define_insn "*extendqihi2_real"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=A,A")
+ (sign_extend:HI (match_operand:QI 1 "general_operand" "x,a")))]
+ "rl78_real_insns_ok ()"
+ "@
+ shlw\t%0, 8 \;sarw\t%0, 8
+ sarw\t%0, 8"
+ )
+
+;;---------- Arithmetic ------------------------
+
+(define_insn "*addqi3_real"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=rvWabWhlWh1,rvWabWhlWh1,a,*bcdehl")
+ (plus:QI (match_operand:QI 1 "general_operand" "%0,0,0,0")
+ (match_operand:QI 2 "general_operand" "K,L,RWhlWh1i,a")))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ inc\t%0
+ dec\t%0
+ add\t%0, %2
+ add\t%0, %2"
+)
+
+(define_insn "*addhi3_real"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=vABDTWh1Wab,vABDTWh1Wab,v,v,A,S,S,A")
+ (plus:HI (match_operand:HI 1 "general_operand" "%0,0,0,0,0,0,0,S")
+ (match_operand:HI 2 "general_operand" "K,L,N,O,RWh1WhlWabiv,Int8,J,Ri")))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ incw\t%0
+ decw\t%0
+ incw\t%0 \;incw\t%0
+ decw\t%0 \;decw\t%0
+ addw\t%0, %p2
+ addw\t%0, %2
+ subw\t%0, %m2
+ movw\t%0, %1 \;addw\t%0, %2"
+)
+
+(define_insn "*addqihi3a_real"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:HI 2 "register_operand" "0")))
+ ]
+ "rl78_real_insns_ok ()"
+ "add\t%q0, %q1 \;addc\t%Q0, #0"
+)
+
+(define_insn "*subqi3_real"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=a,R,v")
+ (minus:QI (match_operand:QI 1 "general_operand" "0,0,0")
+ (match_operand:QI 2 "general_operand" "RiWabWhbWh1Whl,a,i")))
+ ]
+ "rl78_real_insns_ok ()"
+ "sub\t%0, %2"
+)
+
+(define_insn "*subhi3_real"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=A,S")
+ (minus:HI (match_operand:HI 1 "general_operand" "0,0")
+ (match_operand:HI 2 "general_operand" "iBDTWabWh1v,i")))
+ ]
+ "rl78_real_insns_ok ()"
+ "subw\t%0, %2"
+)
+
+(define_insn "*umulhi3_shift_real"
+ [(set (match_operand:HI 0 "register_operand" "=A,A")
+ (mult:HI (match_operand:HI 1 "rl78_nonfar_operand" "0,0")
+ (match_operand:HI 2 "rl78_24_operand" "N,i")))]
+ "rl78_real_insns_ok ()"
+ "@
+ shlw\t%0, 1
+ shlw\t%0, 2"
+)
+
+(define_insn "*umulqihi3_real"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=A")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "general_operand" "%a"))
+ (zero_extend:HI (match_operand:QI 2 "general_operand" "x"))))]
+ "rl78_real_insns_ok ()"
+ "mulu\t%2"
+)
+
+(define_insn "*andqi3_real"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=A,R,v")
+ (and:QI (match_operand:QI 1 "general_operand" "%0,0,0")
+ (match_operand:QI 2 "general_operand" "iRvWabWhbWh1Whl,A,i")))
+ ]
+ "rl78_real_insns_ok ()"
+ "and\t%0, %2"
+)
+
+(define_insn "*iorqi3_real"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=A,R,v")
+ (ior:QI (match_operand:QI 1 "general_operand" "%0,0,0")
+ (match_operand:QI 2 "general_operand" "iRvWabWhbWh1Whl,A,i")))
+ ]
+ "rl78_real_insns_ok ()"
+ "or\t%0, %2"
+)
+
+(define_insn "*xorqi3_real"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=A,R,v")
+ (xor:QI (match_operand:QI 1 "general_operand" "%0,0,0")
+ (match_operand 2 "general_operand" "iRvWabWhbWh1Whl,A,i")))
+ ]
+ "rl78_real_insns_ok ()"
+ "xor\t%0, %2"
+)
+
+;;---------- Shifts ------------------------
+
+(define_insn "*ashlqi3_real"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=abc,a,a")
+ (ashift:QI (match_operand:QI 1 "general_operand" "0,0,0")
+ (match_operand:QI 2 "general_operand" "Int3,bc,dehl")))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ shl\t%0, %u2
+ cmp0 %2\; bz $2f\; 1: shl\t%0, 1 \;dec %2 \;bnz $1b\;2:
+ inc %2\;dec %2\;bz $2f\;1: shl\t%0, 1 \;dec %2 \;bnz $1b\;2:"
+)
+
+(define_insn "*ashlhi3_real"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=AB,A,A")
+ (ashift:HI (match_operand:HI 1 "general_operand" "0,0,0")
+ (match_operand:QI 2 "general_operand" "P,bc,dehl")))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ shlw\t%0, %u2
+ cmp0 %2\; bz $2f\; 1: shlw\t%0, 1 \;dec %2 \;bnz $1b\;2:
+ inc %2\;dec %2\;bz $2f\;1: shlw\t%0, 1 \;dec %2 \;bnz $1b\;2:"
+)
+
+;;----------
+
+(define_insn "*ashrqi3_real"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=abc,a,a")
+ (ashiftrt:QI (match_operand:QI 1 "general_operand" "0,0,0")
+ (match_operand:QI 2 "general_operand" "Int3,bc,dehl")))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ sar\t%0, %u2
+ cmp0 %2\; bz $2f\; 1: sar\t%0, 1 \;dec %2 \;bnz $1b\;2:
+ inc %2\;dec %2\;bz $2f\;1: sar\t%0, 1\;dec %2 \;bnz $1b\;2:"
+)
+
+(define_insn "*ashrhi3_real"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=AB,A,A")
+ (ashiftrt:HI (match_operand:HI 1 "general_operand" "0,0,0")
+ (match_operand:QI 2 "general_operand" "P,bc,dehl")))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ sarw\t%0, %u2
+ cmp0 %2\; bz $2f\; 1: sarw\t%0, 1 \;dec %2 \;bnz $1b\;2:
+ inc %2\;dec %2\;bz $2f\;1: sarw\t%0, 1\;dec %2\;bnz $1b\;2:"
+)
+
+;;----------
+
+(define_insn "*lshrqi3_real"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=abc,a,a")
+ (lshiftrt:QI (match_operand:QI 1 "general_operand" "0,0,0")
+ (match_operand:QI 2 "general_operand" "Int3,bc,dehl")))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ shr\t%0, %u2
+ cmp0 %2\; bz $2f\; 1: shr\t%0, 1 \;dec %2 \;bnz $1b\;2:
+ inc %2\;dec %2\;bz $2f\;1: shr\t%0, 1\;dec %2\;bnz $1b\;2:"
+)
+
+(define_insn "*lshrhi3_real"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=AB,A,A")
+ (lshiftrt:HI (match_operand:HI 1 "general_operand" "0,0,0")
+ (match_operand:QI 2 "general_operand" "P,bc,dehl")))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ shrw\t%0, %u2
+ cmp0 %2\; bz $2f\; 1: shrw\t%0, 1 \;dec %2 \;bnz $1b\;2:
+ inc %2\;dec %2\;bz $2f\;1: shrw\t%0, 1\;dec %2\;bnz $1b\;2:"
+)
+
+;;---------- Branching ------------------------
+
+(define_insn "*indirect_jump_real"
+ [(set (pc)
+ (match_operand:HI 0 "nonimmediate_operand" "A"))]
+ "rl78_real_insns_ok ()"
+ "br\t%0"
+)
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ ;; $rel8, $!rel16, !abs16, !!abs20
+ "br\t!!%0"
+)
+
+(define_insn "*call_real"
+ [(call (match_operand:HI 0 "memory_operand" "Wab,Wca")
+ (match_operand 1 "" ""))]
+ "rl78_real_insns_ok ()"
+ "@
+ call\t!!%A0
+ call\t%A0"
+ )
+
+(define_insn "*call_value_real"
+ [(set (match_operand 0 "register_operand" "=v,v")
+ (call (match_operand:HI 1 "memory_operand" "Wab,Wca")
+ (match_operand 2 "" "")))]
+ "rl78_real_insns_ok ()"
+ "@
+ call\t!!%A1
+ call\t%A1"
+ )
+
+(define_insn "*cbranchqi4_real_signed"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_signed"
+ [(match_operand:QI 1 "general_operand" "A,A,A")
+ (match_operand:QI 2 "general_operand" "ISqi,i,v")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "rl78_real_insns_ok ()"
+ "@
+ cmp\t%1, %2 \;xor1 CY,%1.7\;not1 CY\;sk%C0 \;br\t!!%3
+ cmp\t%1, %2 \;xor1 CY,%1.7\;sk%C0 \;br\t!!%3
+ cmp\t%1, %2 \;xor1 CY,%1.7\;xor1 CY,%2.7\;sk%C0 \;br\t!!%3"
+ )
+
+(define_insn "*cbranchqi4_real"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_real"
+ [(match_operand:QI 1 "general_operand" "Wabvaxbc,a, v,bcdehl")
+ (match_operand:QI 2 "general_operand" "M, irvWabWhlWh1Whb,i,a")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "rl78_real_insns_ok ()"
+ "@
+ cmp0\t%1 \;sk%C0 \;br\t!!%3
+ cmp\t%1, %2 \;sk%C0 \;br\t!!%3
+ cmp\t%1, %2 \;sk%C0 \;br\t!!%3
+ cmp\t%1, %2 \;sk%C0 \;br\t!!%3"
+ )
+
+(define_insn "*cbranchhi4_real_signed"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_signed"
+ [(match_operand:HI 1 "general_operand" "A,A,A,vR")
+ (match_operand:HI 2 "general_operand" "IShi,i,v,1")])
+ (label_ref (match_operand 3))
+ (pc)))]
+ "rl78_real_insns_ok ()"
+ "@
+ cmpw\t%1, %2 \;xor1 CY,%Q1.7\;not1 CY\;sk%C0 \;br\t!!%3
+ cmpw\t%1, %2 \;xor1 CY,%Q1.7\;sk%C0 \;br\t!!%3
+ cmpw\t%1, %2 \;xor1 CY,%Q1.7\;xor1 CY,%Q2.7\;sk%C0 \;br\t!!%3
+ %z0\t!!%3"
+ )
+
+(define_insn "cbranchhi4_real"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_real"
+ [(match_operand:HI 1 "general_operand" "A,vR")
+ (match_operand:HI 2 "general_operand" "iBDTvWabWhlWh1,1")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "rl78_real_insns_ok ()"
+ "@
+ cmpw\t%1, %2 \;sk%C0 \;br\t!!%3
+ %z0\t!!%3"
+ )
+
+(define_insn "cbranchhi4_real_inverted"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_real"
+ [(match_operand:HI 1 "general_operand" "A")
+ (match_operand:HI 2 "general_operand" "iBDTvWabWhlWh1")])
+ (pc)
+ (label_ref (match_operand 3 "" ""))))]
+ "rl78_real_insns_ok ()"
+ "cmpw\t%1, %2 \;sk%C0 \;br\t!!%3"
+ )
+
+(define_insn "*cbranchsi4_real_lt"
+ [(set (pc) (if_then_else
+ (lt (match_operand:SI 0 "general_operand" "U,vWabWhlWh1")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (clobber (reg:HI AX_REG))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ mov a, %E0 \;mov1 CY,a.7 \;sknc \;br\t!!%1
+ mov1 CY,%E0.7 \;sknc \;br\t!!%1"
+ )
+
+(define_insn "*cbranchsi4_real_ge"
+ [(set (pc) (if_then_else
+ (ge (match_operand:SI 0 "general_operand" "U,vWabWhlWh1")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (clobber (reg:HI AX_REG))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ mov a, %E0 \;mov1 CY,a.7 \;skc \;br\t!!%1
+ mov1 CY,%E0.7 \;skc \;br\t!!%1"
+ )
+
+(define_insn "*cbranchsi4_real_signed"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_signed"
+ [(match_operand:SI 1 "general_operand" "vU,vU,vU,i,i")
+ (match_operand:SI 2 "nonmemory_operand" "ISsi,i,v,S,v")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (reg:HI AX_REG))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ movw ax,%H1 \;cmpw ax, %H2 \;xor1 CY,a.7\;not1 CY\; movw ax,%h1 \;sknz \;cmpw ax, %h2 \;sk%C0 \;br\t!!%3
+ movw ax,%H1 \;cmpw ax, %H2 \;xor1 CY,a.7\; movw ax,%h1 \;sknz \;cmpw ax, %h2 \;sk%C0 \;br\t!!%3
+ movw ax,%H1 \;cmpw ax, %H2 \;xor1 CY,a.7\;xor1 CY,%E2.7\;movw ax,%h1 \;sknz \;cmpw ax, %h2 \;sk%C0 \;br\t!!%3
+ movw ax, %H1\; cmpw ax, %H2\; xor1 CY, a.7\; not1 CY\; movw ax, %h1 \;sknz\; cmpw ax, %h2 \;sk%0 \;br\t!!%3
+ movw ax, %H1\; cmpw ax, %H2\; xor1 CY, a.7\; movw ax, %h1\; sknz\; cmpw ax, %h2\; sk%0\; br\t!!%3"
+ )
+
+(define_insn "*cbranchsi4_real"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_real"
+ [(match_operand:SI 1 "general_operand" "vUi")
+ (match_operand:SI 2 "general_operand" "iWhlWh1v")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (reg:HI AX_REG))
+ ]
+ "rl78_real_insns_ok ()"
+ "movw ax,%H1 \;cmpw ax, %H2 \;movw ax,%h1 \;sknz \;cmpw ax, %h2 \;sk%C0 \;br\t!!%3"
+ )
+
+;; Peephole to match:
+;;
+;; (set (mem (sp)) (ax))
+;; (set (ax) (mem (sp)))
+;; or:
+;; (set (mem (plus (sp) (const)) (ax))
+;; (set (ax) (mem (plus (sp) (const))))
+;;
+;; which can be generated as the last instruction of the conversion
+;; of one virtual insn into a real insn and the first instruction of
+;; the conversion of the following virtual insn.
+
+(define_peephole2
+ [(set (match_operand:HI 0 "rl78_stack_based_mem")
+ (reg:HI AX_REG))
+ (set (reg:HI AX_REG)
+ (match_dup 0))]
+ ""
+ [(set (match_dup 0) (reg:HI AX_REG))]
+ )
+
+;; Bit test and branch insns.
+
+;; NOTE: These patterns will work for bits in other places, not just A.
+
+(define_insn "bf"
+ [(set (pc)
+ (if_then_else (eq (and (reg:QI A_REG)
+ (match_operand 0 "immediate_operand" "n"))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "bf\tA.%B0, $%1"
+)
+
+(define_insn "bt"
+ [(set (pc)
+ (if_then_else (ne (and (reg:QI A_REG)
+ (match_operand 0 "immediate_operand" "n"))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "bt\tA.%B0, $%1"
+)
+
+;; NOTE: These peepholes are fragile. They rely upon GCC generating
+;; a specific sequence on insns, based upon examination of test code.
+;; Improvements to GCC or using code other than the test code can result
+;; in the peephole not matching and the optimization being missed.
+
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand") (reg:QI A_REG))
+ (set (match_dup 0) (and:QI (match_dup 0) (match_operand 1 "immediate_operand")))
+ (set (pc) (if_then_else (eq (match_dup 0) (const_int 0))
+ (label_ref (match_operand 2 ""))
+ (pc)))]
+ "peep2_regno_dead_p (3, REGNO (operands[0]))
+ && exact_log2 (INTVAL (operands[1])) >= 0"
+ [(set (pc) (if_then_else (eq (and (reg:QI A_REG) (match_dup 1)) (const_int 0))
+ (label_ref (match_dup 2))
+ (pc)))]
+ )
+
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand") (reg:QI A_REG))
+ (set (match_dup 0) (and:QI (match_dup 0) (match_operand 1 "immediate_operand")))
+ (set (pc) (if_then_else (ne (match_dup 0) (const_int 0))
+ (label_ref (match_operand 2 ""))
+ (pc)))]
+ "peep2_regno_dead_p (3, REGNO (operands[0]))
+ && exact_log2 (INTVAL (operands[1])) >= 0"
+ [(set (pc) (if_then_else (ne (and (reg:QI A_REG) (match_dup 1)) (const_int 0))
+ (label_ref (match_dup 2))
+ (pc)))]
+ )
+
+;; Eliminate needless register copies.
+(define_peephole2
+ [(set (match_operand:HI 0 "register_operand") (match_operand:HI 1 "register_operand"))
+ (set (match_operand:HI 2 "register_operand") (match_dup 0))]
+ "peep2_regno_dead_p (2, REGNO (operands[0]))
+ && (REGNO (operands[1]) < 8 || REGNO (operands[2]) < 8)"
+ [(set (match_dup 2) (match_dup 1))]
+ )
+
+;; Eliminate needless register copying when performing bit manipulations.
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand") (reg:QI A_REG))
+ (set (match_dup 0) (ior:QI (match_dup 0) (match_operand 1 "immediate_operand")))
+ (set (reg:QI A_REG) (match_dup 0))]
+ "peep2_regno_dead_p (3, REGNO (operands[0]))"
+ [(set (reg:QI A_REG) (ior:QI (reg:QI A_REG) (match_dup 1)))]
+ )
+
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand") (reg:QI A_REG))
+ (set (match_dup 0) (xor:QI (match_dup 0) (match_operand 1 "immediate_operand")))
+ (set (reg:QI A_REG) (match_dup 0))]
+ "peep2_regno_dead_p (3, REGNO (operands[0]))"
+ [(set (reg:QI A_REG) (xor:QI (reg:QI A_REG) (match_dup 1)))]
+ )
+
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand") (reg:QI A_REG))
+ (set (match_dup 0) (and:QI (match_dup 0) (match_operand 1 "immediate_operand")))
+ (set (reg:QI A_REG) (match_dup 0))]
+ "peep2_regno_dead_p (3, REGNO (operands[0]))"
+ [(set (reg:QI A_REG) (and:QI (reg:QI A_REG) (match_dup 1)))]
+ )
+
+(define_insn "*negandhi3_real"
+ [(set (match_operand:HI 0 "register_operand" "=A")
+ (and:HI (neg:HI (match_operand:HI 1 "register_operand" "0"))
+ (match_operand:HI 2 "immediate_operand" "n")))
+ ]
+ "rl78_real_insns_ok ()"
+ "xor a, #0xff @ xch a, x @ xor a, #0xff @ xch a, x @ addw ax, #1 @ and a, %Q2 @ xch a, x @ and a, %q2 @ xch a, x"
+)
diff --git a/gcc-4.9/gcc/config/rl78/rl78-virt.md b/gcc-4.9/gcc/config/rl78/rl78-virt.md
new file mode 100644
index 000000000..1db37512f
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/rl78-virt.md
@@ -0,0 +1,416 @@
+;; Machine Description for Renesas RL78 processors
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; In this MD file, we define those insn patterns that involve
+;; registers, where such registers are virtual until allocated to a
+;; physical register. All of these insns need to be conditional on
+;; rl78_virt_insns_ok () being true.
+
+;; This tells the physical register allocator what method to use to
+;; allocate registers. Basically, this defines the template of the
+;; instruction - op1 is of the form "a = op(b)", op2 is "a = b op c"
+;; etc.
+
+(define_attr "valloc" "op1,op2,ro1,cmp,umul,macax"
+ (const_string "op2"))
+
+;;---------- Moving ------------------------
+
+(define_insn "*movqi_virt"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=vY,v,Wfr")
+ (match_operand 1 "general_operand" "vInt8JY,Wfr,vInt8J"))]
+ "rl78_virt_insns_ok ()"
+ "v.mov %0, %1"
+ [(set_attr "valloc" "op1")]
+)
+
+(define_insn "*movhi_virt"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=vYS,v,Wfr")
+ (match_operand:HI 1 "general_operand" "viYS,Wfr,v"))]
+ "rl78_virt_insns_ok ()"
+ "v.movw %0, %1"
+ [(set_attr "valloc" "op1")]
+)
+
+;;---------- Conversions ------------------------
+
+(define_insn "*zero_extendqihi2_virt"
+ [(set (match_operand:HI 0 "rl78_nonfar_nonimm_operand" "=vm")
+ (zero_extend:HI (match_operand:QI 1 "general_operand" "vim")))]
+ "rl78_virt_insns_ok ()"
+ "v.zero_extend\t%0, %1"
+ [(set_attr "valloc" "op1")]
+ )
+
+(define_insn "*extendqihi2_virt"
+ [(set (match_operand:HI 0 "rl78_nonfar_nonimm_operand" "=vm")
+ (sign_extend:HI (match_operand:QI 1 "general_operand" "vim")))]
+ "rl78_virt_insns_ok ()"
+ "v.sign_extend\t%0, %1"
+ [(set_attr "valloc" "op1")]
+ )
+
+;;---------- Arithmetic ------------------------
+
+(define_insn "*add<mode>3_virt"
+ [(set (match_operand:QHI 0 "rl78_nonfar_nonimm_operand" "=vY,S")
+ (plus:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "viY,0")
+ (match_operand:QHI 2 "general_operand" "vim,i")))
+ ]
+ "rl78_virt_insns_ok ()"
+ "v.add\t%0, %1, %2"
+)
+
+(define_insn "*sub<mode>3_virt"
+ [(set (match_operand:QHI 0 "rl78_nonfar_nonimm_operand" "=vm,S")
+ (minus:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim,0")
+ (match_operand:QHI 2 "general_operand" "vim,i")))
+ ]
+ "rl78_virt_insns_ok ()"
+ "v.sub\t%0, %1, %2"
+)
+
+(define_insn "*umulhi3_shift_virt"
+ [(set (match_operand:HI 0 "register_operand" "=vm")
+ (mult:HI (match_operand:HI 1 "rl78_nonfar_operand" "%vim")
+ (match_operand:HI 2 "rl78_24_operand" "Ni")))]
+ "rl78_virt_insns_ok ()"
+ "v.mulu\t%0, %1, %2"
+ [(set_attr "valloc" "umul")]
+)
+
+(define_insn "*umulqihi3_virt"
+ [(set (match_operand:HI 0 "register_operand" "=vm")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "rl78_nonfar_operand" "%vim"))
+ (zero_extend:HI (match_operand:QI 2 "general_operand" "vim"))))]
+ "rl78_virt_insns_ok ()"
+ "v.mulu\t%0, %2"
+ [(set_attr "valloc" "umul")]
+)
+
+(define_insn "*andqi3_virt"
+ [(set (match_operand:QI 0 "rl78_nonfar_nonimm_operand" "=vm")
+ (and:QI (match_operand:QI 1 "rl78_nonfar_operand" "vim")
+ (match_operand:QI 2 "general_operand" "vim")))
+ ]
+ "rl78_virt_insns_ok ()"
+ "v.and\t%0, %1, %2"
+)
+
+(define_insn "*iorqi3_virt"
+ [(set (match_operand:QI 0 "rl78_nonfar_nonimm_operand" "=vm")
+ (ior:QI (match_operand:QI 1 "rl78_nonfar_operand" "vim")
+ (match_operand:QI 2 "general_operand" "vim")))
+ ]
+ "rl78_virt_insns_ok ()"
+ "v.or\t%0, %1, %2"
+)
+
+(define_insn "*xor3_virt"
+ [(set (match_operand:QI 0 "rl78_nonfar_nonimm_operand" "=v,vm,m")
+ (xor:QI (match_operand:QI 1 "rl78_nonfar_operand" "%0,vm,vm")
+ (match_operand 2 "general_operand" "i,vm,vim")))
+ ]
+ "rl78_virt_insns_ok ()"
+ "v.xor\t%0, %1, %2"
+)
+
+;;---------- Shifts ------------------------
+
+(define_insn "*ashl<mode>3_virt"
+ [(set (match_operand:QHI 0 "rl78_nonfar_nonimm_operand" "=vm")
+ (ashift:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
+ (match_operand:QI 2 "general_operand" "vim")))
+ ]
+ "rl78_virt_insns_ok ()"
+ "v.shl\t%0, %1, %2"
+)
+
+(define_insn "*ashr<mode>3_virt"
+ [(set (match_operand:QHI 0 "rl78_nonfar_nonimm_operand" "=vm")
+ (ashiftrt:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
+ (match_operand:QI 2 "general_operand" "vim")))
+ ]
+ "rl78_virt_insns_ok ()"
+ "v.sar\t%0, %1, %2"
+)
+
+(define_insn "*lshr<mode>3_virt"
+ [(set (match_operand:QHI 0 "rl78_nonfar_nonimm_operand" "=vm")
+ (lshiftrt:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
+ (match_operand:QI 2 "general_operand" "vim")))
+ ]
+ "rl78_virt_insns_ok ()"
+ "v.shr\t%0, %1, %2"
+)
+
+;; This is complex mostly because the RL78 has no SImode operations,
+;; and very limited HImode operations, and no variable shifts. This
+;; pattern is optimized for each constant shift count and operand
+;; types, so as to use a hand-optimized pattern. For readability, the
+;; usual \t\; syntax is not used here. Also, there's no easy way to
+;; constrain to avoid partial overlaps, hence the duplication.
+(define_insn "ashrsi3_virt" ;; 0 1 2-7 8 9-15 16 17-23 24 25-31 var
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=v,vU,&vU,v, &vU, &vU, v, &vU, v, &vU, &vU, vU, v,&vU, vU, vU, vU")
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0, 0, vU,0, vWab, U, 0, vU, 0, vWab,U, vU, 0, vU, vU, vU, 0")
+ (match_operand:SI 2 "nonmemory_operand" "M, K, K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Is17,Is17,Iv24,Is25, iv")))
+ (clobber (reg:HI X_REG))
+ ]
+ ""
+ "@
+ ; ashrsi %0, 0
+
+ movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
+ movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
+
+ movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
+ movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
+ movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
+
+ mov x,%Q1 \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; sarw ax,8 \; movw %H0,ax
+ mov a,%Q1 \; mov x, a \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; sarw ax,8 \; movw %H0,ax
+
+ mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
+ mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
+ mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
+
+ movw ax,%H1 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
+
+ movw ax,%H1 \; sarw ax,%S2 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
+ movw ax,%H1 \; sarw ax,%S2 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
+
+ movw ax,%H1 \; mov %0,a \; sarw ax,15 \; movw %H0,ax \; mov %Q0,a
+
+ movw ax,%H1 \; sar a,%s2 \; mov %0,a \; sarw ax,15 \; movw %H0,ax \; mov %Q0,a
+
+ mov b,%2 \; cmp0 b \; bz $2f \; 1: \; movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a \; dec b \; bnz $1b \; 2:"
+ [(set_attr "valloc" "macax")]
+)
+
+;; Likewise.
+(define_insn "lshrsi3_virt" ;; 0 1 2-7 8 9-15 16 17-23 24 25-31 var
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=v,vU,&vU,v, &vU, &vU, v, &vU, v, &vU, &vU, vU, v,&vU, vU, vU, vU")
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0, 0, vU,0, vWab, U, 0, vU, 0, vWab,U, vU, 0, vU, vU, vU, 0")
+ (match_operand:SI 2 "nonmemory_operand" "M, K, K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Is17,Is17,Iv24,Is25, iv")))
+ (clobber (reg:HI X_REG))
+ ]
+ ""
+ "@
+ ; lshrsi %0, 0
+
+ movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
+ movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
+
+ movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
+ movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
+ movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
+
+ mov x,%Q1 \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; shrw ax,8 \; movw %H0,ax
+ mov a,%Q1 \; mov x, a \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; shrw ax,8 \; movw %H0,ax
+
+ mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
+ mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
+ mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
+
+ movw ax,%H1 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
+
+ movw ax,%H1 \; shrw ax,%S2 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
+ movw ax,%H1 \; shrw ax,%S2 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
+
+ movw ax,%H1 \; mov %0,a \; movw ax,#0 \; movw %H0,ax \; mov %Q0,a
+
+ movw ax,%H1 \; shr a,%s2 \; mov %0,a \; movw ax,#0 \; movw %H0,ax \; mov %Q0,a
+
+ mov b,%2 \; cmp0 b \; bz $2f \; 1: \; movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a \; dec b \; bnz $1b \; 2:"
+ [(set_attr "valloc" "macax")]
+)
+
+;; Likewise.
+(define_insn "ashlsi3_virt" ;; 0 1 2-7 8 9-15 16 17-23 24 25-31 var
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=v,vU,&vU,v, &vU, &vU, v, &vU, v, &vU, &vU, v, U, v,&vU, v, U, v, U, vWab,vU, vU")
+ (ashift:SI (match_operand:SI 1 "nonimmediate_operand" "0, 0, vU,0, vWab, U, 0, vU, 0, vWab,U, vU, vU, 0, vU, vU, vU, vU, vU, 0, vWab,U")
+ (match_operand:SI 2 "nonmemory_operand" "M, K, K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Iv16,Is17,Is17,Iv24,Iv24,Is25,Is25,iv, iv, iv")))
+ (clobber (reg:HI X_REG))
+ ]
+ ""
+ "@
+ ; lshrsi %0, 0
+
+ movw ax,%1 \; shlw ax,1 \; movw %0,ax \; movw ax,%H1 \; rolwc ax,1 \; movw %H0,ax
+ movw ax,%1 \; shlw ax,1 \; movw %0,ax \; movw ax,%H1 \; rolwc ax,1 \; movw %H0,ax
+
+ movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov x,%Q1 \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
+ movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov x,%Q1 \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
+ movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov a,%Q1 \; mov x,a \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
+
+ mov x,%Q1 \; mov a,%H1 \; movw %H0,ax \; movw ax,%1 \; shlw ax,8 \; movw %0,ax
+ mov a,%Q1 \; mov x,a \; mov a,%H1 \; movw %H0,ax \; movw ax,%1 \; shlw ax,8 \; movw %0,ax
+
+ mov x,%Q1 \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
+ mov x,%Q1 \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
+ mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
+
+ movw ax,%1 \; movw %H0,ax \; movw %0,#0
+ movw ax,%1 \; movw %H0,ax \; movw ax,#0 \; movw %0,ax
+
+ movw ax,%1 \; shlw ax,%S2 \; movw %H0,ax \; movw %0,#0
+ movw ax,%1 \; shlw ax,%S2 \; movw %H0,ax \; movw ax,#0 \; movw %0,ax
+
+ mov a,%1 \; movw %H0,ax \; mov %H0,#0 \; movw %0,#0
+ mov a,%1 \; movw %H0,ax \; movw ax,#0 \; mov %H0,a \; movW %0,ax
+
+ mov a,%1 \; shl a,%s2 \; movw %H0,ax \; mov %H0,#0 \; movw %0,#0
+ mov a,%1 \; shl a,%s2 \; movw %H0,ax \; movw ax,#0 \; mov %H0,a \; movW %0,ax
+
+ mov a,%2 \; cmp0 a \; bz $2f \; mov d,a \; movw ax,%H1 \; movw bc,%1 \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; movw %H0,ax \; movw ax,bc \; movw %0,ax \; 2:
+ mov a,%2 \; mov d,a \; movw ax,%H1 \; movw bc,%1 \; cmp0 0xFFEFD \; bz $2f \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; 2: \; movw %H0,ax \; movw ax,bc \; movw %0,ax
+ mov a,%2 \; mov d,a \; movw ax,%1 \; movw bc,ax \; movw ax,%H1 \; cmp0 0xFFEFD \; bz $2f \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; 2: \; movw %H0,ax \; movw ax,bc \; movw %0,ax"
+ [(set_attr "valloc" "macax")]
+ )
+
+;;---------- Branching ------------------------
+
+(define_insn "*indirect_jump_virt"
+ [(set (pc)
+ (match_operand:HI 0 "nonimmediate_operand" "vm"))]
+ "rl78_virt_insns_ok ()"
+ "v.br\t%0"
+ [(set_attr "valloc" "ro1")]
+)
+
+(define_insn "*call_virt"
+ [(call (match_operand:HI 0 "memory_operand" "Wab,Wcv")
+ (match_operand 1 "" ""))]
+ "rl78_virt_insns_ok ()"
+ "v.call\t%0"
+ [(set_attr "valloc" "ro1")]
+ )
+
+(define_insn "*call_value_virt"
+ [(set (match_operand 0 "register_operand" "=v,v")
+ (call (match_operand:HI 1 "memory_operand" "Wab,Wcv")
+ (match_operand 2 "" "")))]
+ "rl78_virt_insns_ok ()"
+ "v.call\t%1"
+ [(set_attr "valloc" "op1")]
+ )
+
+(define_insn "*cbranchqi4_virt_signed"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_signed"
+ [(match_operand:QI 1 "general_operand" "vim")
+ (match_operand:QI 2 "nonmemory_operand" "vi")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "rl78_virt_insns_ok ()"
+ "v.cmp\t%1, %2\\n\tv.b%C0\t%3"
+ [(set_attr "valloc" "cmp")]
+ )
+
+(define_insn "*cbranchqi4_virt"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_real"
+ [(match_operand:QI 1 "general_operand" "vim")
+ (match_operand:QI 2 "general_operand" "vim")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "rl78_virt_insns_ok ()"
+ "v.cmp\t%1, %2\\n\tv.b%C0\t%3"
+ [(set_attr "valloc" "cmp")]
+ )
+
+(define_insn "*cbranchhi4_virt_signed"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_signed"
+ [(match_operand:HI 1 "general_operand" "vim")
+ (match_operand:HI 2 "nonmemory_operand" "vi")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "rl78_virt_insns_ok ()"
+ "v.cmpw\t%1, %2\\n\tv.b%C0\t%3"
+ [(set_attr "valloc" "cmp")]
+ )
+
+(define_insn "*cbranchhi4_virt"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator_real"
+ [(match_operand:HI 1 "general_operand" "vim")
+ (match_operand:HI 2 "general_operand" "vim")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "rl78_virt_insns_ok ()"
+ "v.cmpw\t%1, %2\\n\tv.b%C0\t%3"
+ [(set_attr "valloc" "cmp")]
+ )
+
+(define_insn "*cbranchsi4_virt"
+ [(set (pc) (if_then_else
+ (match_operator 0 "rl78_cmp_operator"
+ [(match_operand:SI 1 "general_operand" "vim")
+ (match_operand:SI 2 "nonmemory_operand" "vi")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (reg:HI AX_REG))
+ ]
+ "rl78_virt_insns_ok ()"
+ "v.cmpd\t%1, %2\\n\tv.b%C0\t%3"
+ [(set_attr "valloc" "macax")]
+ )
+
+;;---------- Peepholes ------------------------
+
+(define_peephole2
+ [(set (match_operand:QI 0 "" "")
+ (match_operand:QI 1 "" ""))
+ (set (match_operand:QI 2 "" "")
+ (match_operand:QI 3 "" ""))]
+ "rl78_peep_movhi_p (operands)"
+ [(set (match_dup 4)
+ (match_dup 5))]
+ "rl78_setup_peep_movhi (operands);"
+ )
+
+(define_peephole2
+ [(set (reg:QI A_REG)
+ (match_operand:QI 1 "" ""))
+ (set (match_operand:QI 0 "" "")
+ (reg:QI A_REG))
+ (set (reg:QI A_REG)
+ (match_operand:QI 3 "" ""))
+ (set (match_operand:QI 2 "" "")
+ (reg:QI A_REG))
+ ]
+ "rl78_peep_movhi_p (operands)"
+ [(set (reg:HI AX_REG)
+ (match_dup 5))
+ (set (match_dup 4)
+ (reg:HI AX_REG))
+ ]
+ "rl78_setup_peep_movhi (operands);"
+ )
+
+(define_insn "*negandhi3_virt"
+ [(set (match_operand:HI 0 "register_operand" "=v")
+ (and:HI (neg:HI (match_operand:HI 1 "register_operand" "0"))
+ (match_operand:HI 2 "immediate_operand" "n")))
+ ]
+ "rl78_virt_insns_ok ()"
+ "v.nand\t%0, %1, %2"
+)
diff --git a/gcc-4.9/gcc/config/rl78/rl78.c b/gcc-4.9/gcc/config/rl78/rl78.c
new file mode 100644
index 000000000..b5cd2ad75
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/rl78.c
@@ -0,0 +1,3748 @@
+/* Subroutines used for code generation on Renesas RL78 processors.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "function.h"
+#include "expr.h"
+#include "optabs.h"
+#include "libfuncs.h"
+#include "recog.h"
+#include "diagnostic-core.h"
+#include "toplev.h"
+#include "reload.h"
+#include "df.h"
+#include "ggc.h"
+#include "tm_p.h"
+#include "debug.h"
+#include "target.h"
+#include "target-def.h"
+#include "langhooks.h"
+#include "rl78-protos.h"
+#include "dumpfile.h"
+#include "tree-pass.h"
+#include "context.h"
+#include "tm-constrs.h" /* for satisfies_constraint_*(). */
+#include "insn-flags.h" /* for gen_*(). */
+
+static inline bool is_interrupt_func (const_tree decl);
+static inline bool is_brk_interrupt_func (const_tree decl);
+static void rl78_reorg (void);
+
+
+/* Debugging statements are tagged with DEBUG0 only so that they can
+ be easily enabled individually, by replacing the '0' with '1' as
+ needed. */
+#define DEBUG0 0
+#define DEBUG1 1
+
+/* REGISTER_NAMES has the names for individual 8-bit registers, but
+ these have the names we need to use when referring to 16-bit
+ register pairs. */
+static const char * const word_regnames[] =
+{
+ "ax", "AX", "bc", "BC", "de", "DE", "hl", "HL",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ "sp", "ap", "psw", "es", "cs"
+};
+
+struct GTY(()) machine_function
+{
+ /* If set, the rest of the fields have been computed. */
+ int computed;
+ /* Which register pairs need to be pushed in the prologue. */
+ int need_to_push [FIRST_PSEUDO_REGISTER / 2];
+
+ /* These fields describe the frame layout... */
+ /* arg pointer */
+ /* 4 bytes for saved PC */
+ int framesize_regs;
+ /* frame pointer */
+ int framesize_locals;
+ int framesize_outgoing;
+ /* stack pointer */
+ int framesize;
+
+ /* If set, recog is allowed to match against the "real" patterns. */
+ int real_insns_ok;
+ /* If set, recog is allowed to match against the "virtual" patterns. */
+ int virt_insns_ok;
+ /* Set if the current function needs to clean up any trampolines. */
+ int trampolines_used;
+};
+
+/* This is our init_machine_status, as set in
+ rl78_option_override. */
+static struct machine_function *
+rl78_init_machine_status (void)
+{
+ struct machine_function *m;
+
+ m = ggc_alloc_cleared_machine_function ();
+ m->virt_insns_ok = 1;
+
+ return m;
+}
+
+/* Returns whether to run the devirtualization pass. */
+static bool
+devirt_gate (void)
+{
+ return true;
+}
+
+/* Runs the devirtualization pass. */
+static unsigned int
+devirt_pass (void)
+{
+ rl78_reorg ();
+ return 0;
+}
+
+/* This pass converts virtual instructions using virtual registers, to
+ real instructions using real registers. Rather than run it as
+ reorg, we reschedule it before vartrack to help with debugging. */
+namespace {
+
+const pass_data pass_data_rl78_devirt =
+{
+ RTL_PASS, /* type */
+ "devirt", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_gate */
+ true, /* has_execute */
+ TV_MACH_DEP, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_rl78_devirt : public rtl_opt_pass
+{
+public:
+ pass_rl78_devirt(gcc::context *ctxt)
+ : rtl_opt_pass(pass_data_rl78_devirt, ctxt)
+ {
+ }
+
+ /* opt_pass methods: */
+ bool gate () { return devirt_gate (); }
+ unsigned int execute () { return devirt_pass (); }
+};
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_rl78_devirt (gcc::context *ctxt)
+{
+ return new pass_rl78_devirt (ctxt);
+}
+
+/* Redundant move elimination pass. Must be run after the basic block
+ reordering pass for the best effect. */
+
+static unsigned int
+move_elim_pass (void)
+{
+ rtx insn, ninsn, prev = NULL_RTX;
+
+ for (insn = get_insns (); insn; insn = ninsn)
+ {
+ rtx set;
+
+ ninsn = next_nonnote_nondebug_insn (insn);
+
+ if ((set = single_set (insn)) == NULL_RTX)
+ {
+ prev = NULL_RTX;
+ continue;
+ }
+
+ /* If we have two SET insns in a row (without anything
+ between them) and the source of the second one is the
+ destination of the first one, and vice versa, then we
+ can eliminate the second SET. */
+ if (prev
+ && rtx_equal_p (SET_DEST (prev), SET_SRC (set))
+ && rtx_equal_p (SET_DEST (set), SET_SRC (prev))
+ )
+ {
+ if (dump_file)
+ fprintf (dump_file, " Delete insn %d because it is redundant\n",
+ INSN_UID (insn));
+
+ delete_insn (insn);
+ prev = NULL_RTX;
+ }
+ else
+ prev = set;
+ }
+
+ if (dump_file)
+ print_rtl_with_bb (dump_file, get_insns (), 0);
+
+ return 0;
+}
+
+namespace {
+
+const pass_data pass_data_rl78_move_elim =
+{
+ RTL_PASS, /* type */
+ "move_elim", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_gate */
+ true, /* has_execute */
+ TV_MACH_DEP, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_rl78_move_elim : public rtl_opt_pass
+{
+public:
+ pass_rl78_move_elim(gcc::context *ctxt)
+ : rtl_opt_pass(pass_data_rl78_move_elim, ctxt)
+ {
+ }
+
+ /* opt_pass methods: */
+ bool gate () { return devirt_gate (); }
+ unsigned int execute () { return move_elim_pass (); }
+};
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_rl78_move_elim (gcc::context *ctxt)
+{
+ return new pass_rl78_move_elim (ctxt);
+}
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START rl78_asm_file_start
+
+static void
+rl78_asm_file_start (void)
+{
+ int i;
+
+ if (TARGET_G10)
+ {
+ /* The memory used is 0xffec8 to 0xffedf; real registers are in
+ 0xffee0 to 0xffee7. */
+ for (i = 8; i < 32; i++)
+ fprintf (asm_out_file, "r%d\t=\t0x%x\n", i, 0xffec0 + i);
+ }
+ else
+ {
+ for (i = 0; i < 8; i++)
+ {
+ fprintf (asm_out_file, "r%d\t=\t0x%x\n", 8 + i, 0xffef0 + i);
+ fprintf (asm_out_file, "r%d\t=\t0x%x\n", 16 + i, 0xffee8 + i);
+ fprintf (asm_out_file, "r%d\t=\t0x%x\n", 24 + i, 0xffee0 + i);
+ }
+ }
+
+ opt_pass *rl78_devirt_pass = make_pass_rl78_devirt (g);
+ static struct register_pass_info rl78_devirt_info =
+ {
+ rl78_devirt_pass,
+ "pro_and_epilogue",
+ 1,
+ PASS_POS_INSERT_BEFORE
+ };
+
+ opt_pass *rl78_move_elim_pass = make_pass_rl78_move_elim (g);
+ static struct register_pass_info rl78_move_elim_info =
+ {
+ rl78_move_elim_pass,
+ "bbro",
+ 1,
+ PASS_POS_INSERT_AFTER
+ };
+
+ register_pass (& rl78_devirt_info);
+ register_pass (& rl78_move_elim_info);
+}
+
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE rl78_option_override
+
+static void
+rl78_option_override (void)
+{
+ flag_omit_frame_pointer = 1;
+ flag_no_function_cse = 1;
+ flag_split_wide_types = 0;
+
+ init_machine_status = rl78_init_machine_status;
+
+ if (TARGET_ALLREGS)
+ {
+ int i;
+
+ for (i = 24; i < 32; i++)
+ fixed_regs[i] = 0;
+ }
+}
+
+/* Most registers are 8 bits. Some are 16 bits because, for example,
+ gcc doesn't like dealing with $FP as a register pair (the second
+ half of $fp is also 2 to keep reload happy wrt register pairs, but
+ no register class includes it). This table maps register numbers
+ to size in bytes. */
+static const int register_sizes[] =
+{
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 1, 1, 1
+};
+
+/* Predicates used in the MD patterns. This one is true when virtual
+ insns may be matched, which typically means before (or during) the
+ devirt pass. */
+bool
+rl78_virt_insns_ok (void)
+{
+ if (cfun)
+ return cfun->machine->virt_insns_ok;
+ return true;
+}
+
+/* Predicates used in the MD patterns. This one is true when real
+ insns may be matched, which typically means after (or during) the
+ devirt pass. */
+bool
+rl78_real_insns_ok (void)
+{
+ if (cfun)
+ return cfun->machine->real_insns_ok;
+ return false;
+}
+
+/* Implements HARD_REGNO_NREGS. */
+int
+rl78_hard_regno_nregs (int regno, enum machine_mode mode)
+{
+ int rs = register_sizes[regno];
+ if (rs < 1)
+ rs = 1;
+ return ((GET_MODE_SIZE (mode) + rs - 1) / rs);
+}
+
+/* Implements HARD_REGNO_MODE_OK. */
+int
+rl78_hard_regno_mode_ok (int regno, enum machine_mode mode)
+{
+ int s = GET_MODE_SIZE (mode);
+
+ if (s < 1)
+ return 0;
+ /* These are not to be used by gcc. */
+ if (regno == 23 || regno == ES_REG || regno == CS_REG)
+ return 0;
+ /* $fp can always be accessed as a 16-bit value. */
+ if (regno == FP_REG && s == 2)
+ return 1;
+ if (regno < SP_REG)
+ {
+ /* Since a reg-reg move is really a reg-mem move, we must
+ enforce alignment. */
+ if (s > 1 && (regno % 2))
+ return 0;
+ return 1;
+ }
+ if (s == CC_REGNUM)
+ return (mode == BImode);
+ /* All other registers must be accessed in their natural sizes. */
+ if (s == register_sizes [regno])
+ return 1;
+ return 0;
+}
+
+/* Simplify_gen_subreg() doesn't handle memory references the way we
+ need it to below, so we use this function for when we must get a
+ valid subreg in a "natural" state. */
+static rtx
+rl78_subreg (enum machine_mode mode, rtx r, enum machine_mode omode, int byte)
+{
+ if (GET_CODE (r) == MEM)
+ return adjust_address (r, mode, byte);
+ else
+ return simplify_gen_subreg (mode, r, omode, byte);
+}
+
+/* Used by movsi. Split SImode moves into two HImode moves, using
+ appropriate patterns for the upper and lower halves of symbols. */
+void
+rl78_expand_movsi (rtx *operands)
+{
+ rtx op00, op02, op10, op12;
+
+ op00 = rl78_subreg (HImode, operands[0], SImode, 0);
+ op02 = rl78_subreg (HImode, operands[0], SImode, 2);
+ if (GET_CODE (operands[1]) == CONST
+ || GET_CODE (operands[1]) == SYMBOL_REF)
+ {
+ op10 = gen_rtx_ZERO_EXTRACT (HImode, operands[1], GEN_INT (16), GEN_INT (0));
+ op10 = gen_rtx_CONST (HImode, op10);
+ op12 = gen_rtx_ZERO_EXTRACT (HImode, operands[1], GEN_INT (16), GEN_INT (16));
+ op12 = gen_rtx_CONST (HImode, op12);
+ }
+ else
+ {
+ op10 = rl78_subreg (HImode, operands[1], SImode, 0);
+ op12 = rl78_subreg (HImode, operands[1], SImode, 2);
+ }
+
+ if (rtx_equal_p (operands[0], operands[1]))
+ ;
+ else if (rtx_equal_p (op00, op12))
+ {
+ emit_move_insn (op02, op12);
+ emit_move_insn (op00, op10);
+ }
+ else
+ {
+ emit_move_insn (op00, op10);
+ emit_move_insn (op02, op12);
+ }
+}
+
+/* Generate code to move an SImode value. */
+void
+rl78_split_movsi (rtx *operands)
+{
+ rtx op00, op02, op10, op12;
+
+ op00 = rl78_subreg (HImode, operands[0], SImode, 0);
+ op02 = rl78_subreg (HImode, operands[0], SImode, 2);
+
+ if (GET_CODE (operands[1]) == CONST
+ || GET_CODE (operands[1]) == SYMBOL_REF)
+ {
+ op10 = gen_rtx_ZERO_EXTRACT (HImode, operands[1], GEN_INT (16), GEN_INT (0));
+ op10 = gen_rtx_CONST (HImode, op10);
+ op12 = gen_rtx_ZERO_EXTRACT (HImode, operands[1], GEN_INT (16), GEN_INT (16));
+ op12 = gen_rtx_CONST (HImode, op12);
+ }
+ else
+ {
+ op10 = rl78_subreg (HImode, operands[1], SImode, 0);
+ op12 = rl78_subreg (HImode, operands[1], SImode, 2);
+ }
+
+ if (rtx_equal_p (operands[0], operands[1]))
+ ;
+ else if (rtx_equal_p (op00, op12))
+ {
+ operands[2] = op02;
+ operands[4] = op12;
+ operands[3] = op00;
+ operands[5] = op10;
+ }
+ else
+ {
+ operands[2] = op00;
+ operands[4] = op10;
+ operands[3] = op02;
+ operands[5] = op12;
+ }
+}
+
+/* Used by various two-operand expanders which cannot accept all
+ operands in the "far" namespace. Force some such operands into
+ registers so that each pattern has at most one far operand. */
+int
+rl78_force_nonfar_2 (rtx *operands, rtx (*gen)(rtx,rtx))
+{
+ int did = 0;
+ rtx temp_reg = NULL;
+
+ /* FIXME: in the future, be smarter about only doing this if the
+ other operand is also far, assuming the devirtualizer can also
+ handle that. */
+ if (rl78_far_p (operands[0]))
+ {
+ temp_reg = operands[0];
+ operands[0] = gen_reg_rtx (GET_MODE (operands[0]));
+ did = 1;
+ }
+ if (!did)
+ return 0;
+
+ emit_insn (gen (operands[0], operands[1]));
+ if (temp_reg)
+ emit_move_insn (temp_reg, operands[0]);
+ return 1;
+}
+
+/* Likewise, but for three-operand expanders. */
+int
+rl78_force_nonfar_3 (rtx *operands, rtx (*gen)(rtx,rtx,rtx))
+{
+ int did = 0;
+ rtx temp_reg = NULL;
+
+ /* FIXME: Likewise. */
+ if (rl78_far_p (operands[1]))
+ {
+ rtx temp_reg = gen_reg_rtx (GET_MODE (operands[1]));
+ emit_move_insn (temp_reg, operands[1]);
+ operands[1] = temp_reg;
+ did = 1;
+ }
+ if (rl78_far_p (operands[0]))
+ {
+ temp_reg = operands[0];
+ operands[0] = gen_reg_rtx (GET_MODE (operands[0]));
+ did = 1;
+ }
+ if (!did)
+ return 0;
+
+ emit_insn (gen (operands[0], operands[1], operands[2]));
+ if (temp_reg)
+ emit_move_insn (temp_reg, operands[0]);
+ return 1;
+}
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE rl78_can_eliminate
+
+static bool
+rl78_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to ATTRIBUTE_UNUSED)
+{
+ return true;
+}
+
+/* Returns true if the given register needs to be saved by the
+ current function. */
+static bool
+need_to_save (unsigned int regno)
+{
+ if (is_interrupt_func (cfun->decl))
+ {
+ /* We don't know what devirt will need */
+ if (regno < 8)
+ return true;
+
+ /* We don't need to save registers that have
+ been reserved for interrupt handlers. */
+ if (regno > 23)
+ return false;
+
+ /* If the handler is a non-leaf function then it may call
+ non-interrupt aware routines which will happily clobber
+ any call_used registers, so we have to preserve them. */
+ if (!crtl->is_leaf && call_used_regs[regno])
+ return true;
+
+ /* Otherwise we only have to save a register, call_used
+ or not, if it is used by this handler. */
+ return df_regs_ever_live_p (regno);
+ }
+
+ if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
+ return true;
+ if (fixed_regs[regno])
+ return false;
+ if (crtl->calls_eh_return)
+ return true;
+ if (df_regs_ever_live_p (regno)
+ && !call_used_regs[regno])
+ return true;
+ return false;
+}
+
+/* We use this to wrap all emitted insns in the prologue. */
+static rtx
+F (rtx x)
+{
+ RTX_FRAME_RELATED_P (x) = 1;
+ return x;
+}
+
+/* Compute all the frame-related fields in our machine_function
+ structure. */
+static void
+rl78_compute_frame_info (void)
+{
+ int i;
+
+ cfun->machine->computed = 1;
+ cfun->machine->framesize_regs = 0;
+ cfun->machine->framesize_locals = get_frame_size ();
+ cfun->machine->framesize_outgoing = crtl->outgoing_args_size;
+
+ for (i = 0; i < 16; i ++)
+ if (need_to_save (i * 2) || need_to_save (i * 2 + 1))
+ {
+ cfun->machine->need_to_push [i] = 1;
+ cfun->machine->framesize_regs += 2;
+ }
+ else
+ cfun->machine->need_to_push [i] = 0;
+
+ if ((cfun->machine->framesize_locals + cfun->machine->framesize_outgoing) & 1)
+ cfun->machine->framesize_locals ++;
+
+ cfun->machine->framesize = (cfun->machine->framesize_regs
+ + cfun->machine->framesize_locals
+ + cfun->machine->framesize_outgoing);
+}
+
+/* Returns true if the provided function has the specified attribute. */
+static inline bool
+has_func_attr (const_tree decl, const char * func_attr)
+{
+ if (decl == NULL_TREE)
+ decl = current_function_decl;
+
+ return lookup_attribute (func_attr, DECL_ATTRIBUTES (decl)) != NULL_TREE;
+}
+
+/* Returns true if the provided function has the "interrupt" attribute. */
+static inline bool
+is_interrupt_func (const_tree decl)
+{
+ return has_func_attr (decl, "interrupt") || has_func_attr (decl, "brk_interrupt");
+}
+
+/* Returns true if the provided function has the "brk_interrupt" attribute. */
+static inline bool
+is_brk_interrupt_func (const_tree decl)
+{
+ return has_func_attr (decl, "brk_interrupt");
+}
+
+/* Check "interrupt" attributes. */
+static tree
+rl78_handle_func_attribute (tree * node,
+ tree name,
+ tree args,
+ int flags ATTRIBUTE_UNUSED,
+ bool * no_add_attrs)
+{
+ gcc_assert (DECL_P (* node));
+ gcc_assert (args == NULL_TREE);
+
+ if (TREE_CODE (* node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ * no_add_attrs = true;
+ }
+
+ /* FIXME: We ought to check that the interrupt and exception
+ handler attributes have been applied to void functions. */
+ return NULL_TREE;
+}
+
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE rl78_attribute_table
+
+/* Table of RL78-specific attributes. */
+const struct attribute_spec rl78_attribute_table[] =
+{
+ /* Name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
+ affects_type_identity. */
+ { "interrupt", 0, 0, true, false, false, rl78_handle_func_attribute,
+ false },
+ { "brk_interrupt", 0, 0, true, false, false, rl78_handle_func_attribute,
+ false },
+ { "naked", 0, 0, true, false, false, rl78_handle_func_attribute,
+ false },
+ { NULL, 0, 0, false, false, false, NULL, false }
+};
+
+
+
+/* Break down an address RTX into its component base/index/addend
+ portions and return TRUE if the address is of a valid form, else
+ FALSE. */
+static bool
+characterize_address (rtx x, rtx *base, rtx *index, rtx *addend)
+{
+ *base = NULL_RTX;
+ *index = NULL_RTX;
+ *addend = NULL_RTX;
+
+ if (GET_CODE (x) == UNSPEC
+ && XINT (x, 1) == UNS_ES_ADDR)
+ x = XVECEXP (x, 0, 1);
+
+ if (GET_CODE (x) == REG)
+ {
+ *base = x;
+ return true;
+ }
+
+ /* We sometimes get these without the CONST wrapper */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ *addend = x;
+ return true;
+ }
+
+ if (GET_CODE (x) == PLUS)
+ {
+ *base = XEXP (x, 0);
+ x = XEXP (x, 1);
+
+ if (GET_CODE (*base) != REG
+ && GET_CODE (x) == REG)
+ {
+ rtx tmp = *base;
+ *base = x;
+ x = tmp;
+ }
+
+ if (GET_CODE (*base) != REG)
+ return false;
+
+ if (GET_CODE (x) == ZERO_EXTEND
+ && GET_CODE (XEXP (x, 0)) == REG)
+ {
+ *index = XEXP (x, 0);
+ return false;
+ }
+ }
+
+ switch (GET_CODE (x))
+ {
+ case PLUS:
+ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (x, 0)) == CONST_INT)
+ {
+ *addend = x;
+ return true;
+ }
+ /* fall through */
+ case MEM:
+ case REG:
+ return false;
+
+ case CONST:
+ case SYMBOL_REF:
+ case CONST_INT:
+ *addend = x;
+ return true;
+
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+/* Used by the Whb constraint. Match addresses that use HL+B or HL+C
+ addressing. */
+bool
+rl78_hl_b_c_addr_p (rtx op)
+{
+ rtx hl, bc;
+
+ if (GET_CODE (op) != PLUS)
+ return false;
+ hl = XEXP (op, 0);
+ bc = XEXP (op, 1);
+ if (GET_CODE (hl) == ZERO_EXTEND)
+ {
+ rtx tmp = hl;
+ hl = bc;
+ bc = tmp;
+ }
+ if (GET_CODE (hl) != REG)
+ return false;
+ if (GET_CODE (bc) != ZERO_EXTEND)
+ return false;
+ bc = XEXP (bc, 0);
+ if (GET_CODE (bc) != REG)
+ return false;
+ if (REGNO (hl) != HL_REG)
+ return false;
+ if (REGNO (bc) != B_REG && REGNO (bc) != C_REG)
+ return false;
+
+ return true;
+}
+
+#define REG_IS(r, regno) (((r) == (regno)) || ((r) >= FIRST_PSEUDO_REGISTER && !(strict)))
+
+/* Used in various constraints and predicates to match operands in the
+ "far" address space. */
+int
+rl78_far_p (rtx x)
+{
+ if (! MEM_P (x))
+ return 0;
+#if DEBUG0
+ fprintf (stderr, "\033[35mrl78_far_p: "); debug_rtx (x);
+ fprintf (stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
+#endif
+ return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
+}
+
+/* Return the appropriate mode for a named address pointer. */
+#undef TARGET_ADDR_SPACE_POINTER_MODE
+#define TARGET_ADDR_SPACE_POINTER_MODE rl78_addr_space_pointer_mode
+static enum machine_mode
+rl78_addr_space_pointer_mode (addr_space_t addrspace)
+{
+ switch (addrspace)
+ {
+ case ADDR_SPACE_GENERIC:
+ return HImode;
+ case ADDR_SPACE_FAR:
+ return SImode;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Returns TRUE for valid addresses. */
+#undef TARGET_VALID_POINTER_MODE
+#define TARGET_VALID_POINTER_MODE rl78_valid_pointer_mode
+static bool
+rl78_valid_pointer_mode (enum machine_mode m)
+{
+ return (m == HImode || m == SImode);
+}
+
+/* Return the appropriate mode for a named address address. */
+#undef TARGET_ADDR_SPACE_ADDRESS_MODE
+#define TARGET_ADDR_SPACE_ADDRESS_MODE rl78_addr_space_address_mode
+static enum machine_mode
+rl78_addr_space_address_mode (addr_space_t addrspace)
+{
+ switch (addrspace)
+ {
+ case ADDR_SPACE_GENERIC:
+ return HImode;
+ case ADDR_SPACE_FAR:
+ return SImode;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P rl78_is_legitimate_constant
+
+static bool
+rl78_is_legitimate_constant (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED)
+{
+ return true;
+}
+
+#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
+#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P rl78_as_legitimate_address
+
+bool
+rl78_as_legitimate_address (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x,
+ bool strict ATTRIBUTE_UNUSED, addr_space_t as ATTRIBUTE_UNUSED)
+{
+ rtx base, index, addend;
+ bool is_far_addr = false;
+
+ if (GET_CODE (x) == UNSPEC
+ && XINT (x, 1) == UNS_ES_ADDR)
+ {
+ x = XVECEXP (x, 0, 1);
+ is_far_addr = true;
+ }
+
+ if (as == ADDR_SPACE_GENERIC
+ && (GET_MODE (x) == SImode || is_far_addr))
+ return false;
+
+ if (! characterize_address (x, &base, &index, &addend))
+ return false;
+
+ /* We can't extract the high/low portions of a PLUS address
+ involving a register during devirtualization, so make sure all
+ such __far addresses do not have addends. This forces GCC to do
+ the sum separately. */
+ if (addend && base && as == ADDR_SPACE_FAR)
+ return false;
+
+ if (base && index)
+ {
+ int ir = REGNO (index);
+ int br = REGNO (base);
+
+#define OK(test, debug) if (test) { /*fprintf(stderr, "%d: OK %s\n", __LINE__, debug);*/ return true; }
+ OK (REG_IS (br, HL_REG) && REG_IS (ir, B_REG), "[hl+b]");
+ OK (REG_IS (br, HL_REG) && REG_IS (ir, C_REG), "[hl+c]");
+ return false;
+ }
+
+ if (strict && base && GET_CODE (base) == REG && REGNO (base) >= FIRST_PSEUDO_REGISTER)
+ return false;
+
+ if (! cfun->machine->virt_insns_ok && base && GET_CODE (base) == REG
+ && REGNO (base) >= 8 && REGNO (base) <= 31)
+ return false;
+
+ return true;
+}
+
+/* Determine if one named address space is a subset of another. */
+#undef TARGET_ADDR_SPACE_SUBSET_P
+#define TARGET_ADDR_SPACE_SUBSET_P rl78_addr_space_subset_p
+static bool
+rl78_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
+{
+ gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
+ gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
+
+ if (subset == superset)
+ return true;
+
+ else
+ return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
+}
+
+#undef TARGET_ADDR_SPACE_CONVERT
+#define TARGET_ADDR_SPACE_CONVERT rl78_addr_space_convert
+/* Convert from one address space to another. */
+static rtx
+rl78_addr_space_convert (rtx op, tree from_type, tree to_type)
+{
+ addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
+ addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
+ rtx result;
+
+ gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
+ gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
+
+ if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
+ {
+ /* This is unpredictable, as we're truncating off usable address
+ bits. */
+
+ result = gen_reg_rtx (HImode);
+ emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
+ return result;
+ }
+ else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
+ {
+ /* This always works. */
+ result = gen_reg_rtx (SImode);
+ emit_move_insn (rl78_subreg (HImode, result, SImode, 0), op);
+ emit_move_insn (rl78_subreg (HImode, result, SImode, 2), const0_rtx);
+ return result;
+ }
+ else
+ gcc_unreachable ();
+}
+
+/* Implements REGNO_MODE_CODE_OK_FOR_BASE_P. */
+bool
+rl78_regno_mode_code_ok_for_base_p (int regno, enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t address_space ATTRIBUTE_UNUSED,
+ int outer_code ATTRIBUTE_UNUSED, int index_code)
+{
+ if (regno <= SP_REG && regno >= 16)
+ return true;
+ if (index_code == REG)
+ return (regno == HL_REG);
+ if (regno == C_REG || regno == B_REG || regno == E_REG || regno == L_REG)
+ return true;
+ return false;
+}
+
+/* Implements MODE_CODE_BASE_REG_CLASS. */
+enum reg_class
+rl78_mode_code_base_reg_class (enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t address_space ATTRIBUTE_UNUSED,
+ int outer_code ATTRIBUTE_UNUSED,
+ int index_code ATTRIBUTE_UNUSED)
+{
+ return V_REGS;
+}
+
+/* Implements INITIAL_ELIMINATION_OFFSET. The frame layout is
+ described in the machine_Function struct definition, above. */
+int
+rl78_initial_elimination_offset (int from, int to)
+{
+ int rv = 0; /* as if arg to arg */
+
+ rl78_compute_frame_info ();
+
+ switch (to)
+ {
+ case STACK_POINTER_REGNUM:
+ rv += cfun->machine->framesize_outgoing;
+ rv += cfun->machine->framesize_locals;
+ /* Fall through. */
+ case FRAME_POINTER_REGNUM:
+ rv += cfun->machine->framesize_regs;
+ rv += 4;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ switch (from)
+ {
+ case FRAME_POINTER_REGNUM:
+ rv -= 4;
+ rv -= cfun->machine->framesize_regs;
+ case ARG_POINTER_REGNUM:
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ return rv;
+}
+
+static int
+rl78_is_naked_func (void)
+{
+ return (lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE);
+}
+
+/* Expand the function prologue (from the prologue pattern). */
+void
+rl78_expand_prologue (void)
+{
+ int i, fs;
+ rtx sp = gen_rtx_REG (HImode, STACK_POINTER_REGNUM);
+ int rb = 0;
+
+ if (rl78_is_naked_func ())
+ return;
+
+ /* Always re-compute the frame info - the register usage may have changed. */
+ rl78_compute_frame_info ();
+
+ if (flag_stack_usage_info)
+ current_function_static_stack_size = cfun->machine->framesize;
+
+ if (is_interrupt_func (cfun->decl) && !TARGET_G10)
+ for (i = 0; i < 4; i++)
+ if (cfun->machine->need_to_push [i])
+ {
+ /* Select Bank 0 if we are using any registers from Bank 0. */
+ emit_insn (gen_sel_rb (GEN_INT (0)));
+ break;
+ }
+
+ for (i = 0; i < 16; i++)
+ if (cfun->machine->need_to_push [i])
+ {
+ if (TARGET_G10)
+ {
+ emit_move_insn (gen_rtx_REG (HImode, 0), gen_rtx_REG (HImode, i*2));
+ F (emit_insn (gen_push (gen_rtx_REG (HImode, 0))));
+ }
+ else
+ {
+ int need_bank = i/4;
+
+ if (need_bank != rb)
+ {
+ emit_insn (gen_sel_rb (GEN_INT (need_bank)));
+ rb = need_bank;
+ }
+ F (emit_insn (gen_push (gen_rtx_REG (HImode, i*2))));
+ }
+ }
+
+ if (rb != 0)
+ emit_insn (gen_sel_rb (GEN_INT (0)));
+
+ if (frame_pointer_needed)
+ {
+ F (emit_move_insn (gen_rtx_REG (HImode, AX_REG),
+ gen_rtx_REG (HImode, STACK_POINTER_REGNUM)));
+ F (emit_move_insn (gen_rtx_REG (HImode, FRAME_POINTER_REGNUM),
+ gen_rtx_REG (HImode, AX_REG)));
+ }
+
+ fs = cfun->machine->framesize_locals + cfun->machine->framesize_outgoing;
+ while (fs > 0)
+ {
+ int fs_byte = (fs > 254) ? 254 : fs;
+ F (emit_insn (gen_subhi3 (sp, sp, GEN_INT (fs_byte))));
+ fs -= fs_byte;
+ }
+}
+
+/* Expand the function epilogue (from the epilogue pattern). */
+void
+rl78_expand_epilogue (void)
+{
+ int i, fs;
+ rtx sp = gen_rtx_REG (HImode, STACK_POINTER_REGNUM);
+ int rb = 0;
+
+ if (rl78_is_naked_func ())
+ return;
+
+ if (frame_pointer_needed)
+ {
+ emit_move_insn (gen_rtx_REG (HImode, AX_REG),
+ gen_rtx_REG (HImode, FRAME_POINTER_REGNUM));
+ emit_move_insn (gen_rtx_REG (HImode, STACK_POINTER_REGNUM),
+ gen_rtx_REG (HImode, AX_REG));
+ }
+ else
+ {
+ fs = cfun->machine->framesize_locals + cfun->machine->framesize_outgoing;
+ while (fs > 0)
+ {
+ int fs_byte = (fs > 254) ? 254 : fs;
+
+ emit_insn (gen_addhi3 (sp, sp, GEN_INT (fs_byte)));
+ fs -= fs_byte;
+ }
+ }
+
+ for (i = 15; i >= 0; i--)
+ if (cfun->machine->need_to_push [i])
+ {
+ if (TARGET_G10)
+ {
+ emit_insn (gen_pop (gen_rtx_REG (HImode, 0)));
+ emit_move_insn (gen_rtx_REG (HImode, i*2), gen_rtx_REG (HImode, 0));
+ }
+ else
+ {
+ int need_bank = i / 4;
+
+ if (need_bank != rb)
+ {
+ emit_insn (gen_sel_rb (GEN_INT (need_bank)));
+ rb = need_bank;
+ }
+ emit_insn (gen_pop (gen_rtx_REG (HImode, i * 2)));
+ }
+ }
+
+ if (rb != 0)
+ emit_insn (gen_sel_rb (GEN_INT (0)));
+
+ if (cfun->machine->trampolines_used)
+ emit_insn (gen_trampoline_uninit ());
+
+ if (is_brk_interrupt_func (cfun->decl))
+ emit_jump_insn (gen_brk_interrupt_return ());
+ else if (is_interrupt_func (cfun->decl))
+ emit_jump_insn (gen_interrupt_return ());
+ else
+ emit_jump_insn (gen_rl78_return ());
+}
+
+/* Likewise, for exception handlers. */
+void
+rl78_expand_eh_epilogue (rtx x ATTRIBUTE_UNUSED)
+{
+ /* FIXME - replace this with an indirect jump with stack adjust. */
+ emit_jump_insn (gen_rl78_return ());
+}
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE rl78_start_function
+
+/* We don't use this to actually emit the function prologue. We use
+ this to insert a comment in the asm file describing the
+ function. */
+static void
+rl78_start_function (FILE *file, HOST_WIDE_INT hwi_local ATTRIBUTE_UNUSED)
+{
+ int i;
+
+ if (cfun->machine->framesize == 0)
+ return;
+ fprintf (file, "\t; start of function\n");
+
+ if (cfun->machine->framesize_regs)
+ {
+ fprintf (file, "\t; push %d:", cfun->machine->framesize_regs);
+ for (i = 0; i < 16; i ++)
+ if (cfun->machine->need_to_push[i])
+ fprintf (file, " %s", word_regnames[i*2]);
+ fprintf (file, "\n");
+ }
+
+ if (frame_pointer_needed)
+ fprintf (file, "\t; $fp points here (r22)\n");
+
+ if (cfun->machine->framesize_locals)
+ fprintf (file, "\t; locals: %d byte%s\n", cfun->machine->framesize_locals,
+ cfun->machine->framesize_locals == 1 ? "" : "s");
+
+ if (cfun->machine->framesize_outgoing)
+ fprintf (file, "\t; outgoing: %d byte%s\n", cfun->machine->framesize_outgoing,
+ cfun->machine->framesize_outgoing == 1 ? "" : "s");
+}
+
+/* Return an RTL describing where a function return value of type RET_TYPE
+ is held. */
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE rl78_function_value
+
+static rtx
+rl78_function_value (const_tree ret_type,
+ const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode = TYPE_MODE (ret_type);
+
+ return gen_rtx_REG (mode, 8);
+}
+
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE rl78_promote_function_mode
+
+static enum machine_mode
+rl78_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
+ enum machine_mode mode,
+ int *punsignedp ATTRIBUTE_UNUSED,
+ const_tree funtype ATTRIBUTE_UNUSED, int for_return ATTRIBUTE_UNUSED)
+{
+ return mode;
+}
+
+/* Return an RTL expression describing the register holding a function
+ parameter of mode MODE and type TYPE or NULL_RTX if the parameter should
+ be passed on the stack. CUM describes the previous parameters to the
+ function and NAMED is false if the parameter is part of a variable
+ parameter list, or the last named parameter before the start of a
+ variable parameter list. */
+
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG rl78_function_arg
+
+static rtx
+rl78_function_arg (cumulative_args_t cum_v ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ return NULL_RTX;
+}
+
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE rl78_function_arg_advance
+
+static void
+rl78_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ int rounded_size;
+ CUMULATIVE_ARGS * cum = get_cumulative_args (cum_v);
+
+ rounded_size = ((mode == BLKmode)
+ ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
+ if (rounded_size & 1)
+ rounded_size ++;
+ (*cum) += rounded_size;
+}
+
+#undef TARGET_FUNCTION_ARG_BOUNDARY
+#define TARGET_FUNCTION_ARG_BOUNDARY rl78_function_arg_boundary
+
+static unsigned int
+rl78_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED)
+{
+ return 16;
+}
+
+/* Supported modifier letters:
+
+ A - address of a MEM
+ S - SADDR form of a real register
+ v - real register corresponding to a virtual register
+ m - minus - negative of CONST_INT value.
+ C - inverse of a conditional (NE vs EQ for example)
+ C - complement of an integer
+ z - collapsed conditional
+ s - shift count mod 8
+ S - shift count mod 16
+ r - reverse shift count (8-(count mod 8))
+ B - bit position
+
+ h - bottom HI of an SI
+ H - top HI of an SI
+ q - bottom QI of an HI
+ Q - top QI of an HI
+ e - third QI of an SI (i.e. where the ES register gets values from)
+ E - fourth QI of an SI (i.e. MSB)
+
+*/
+
+/* Implements the bulk of rl78_print_operand, below. We do it this
+ way because we need to test for a constant at the top level and
+ insert the '#', but not test for it anywhere else as we recurse
+ down into the operand. */
+static void
+rl78_print_operand_1 (FILE * file, rtx op, int letter)
+{
+ int need_paren;
+
+ switch (GET_CODE (op))
+ {
+ case MEM:
+ if (letter == 'A')
+ rl78_print_operand_1 (file, XEXP (op, 0), letter);
+ else
+ {
+ if (rl78_far_p (op))
+ {
+ fprintf (file, "es:");
+ op = gen_rtx_MEM (GET_MODE (op), XVECEXP (XEXP (op, 0), 0, 1));
+ }
+ if (letter == 'H')
+ {
+ op = adjust_address (op, HImode, 2);
+ letter = 0;
+ }
+ if (letter == 'h')
+ {
+ op = adjust_address (op, HImode, 0);
+ letter = 0;
+ }
+ if (letter == 'Q')
+ {
+ op = adjust_address (op, QImode, 1);
+ letter = 0;
+ }
+ if (letter == 'q')
+ {
+ op = adjust_address (op, QImode, 0);
+ letter = 0;
+ }
+ if (letter == 'e')
+ {
+ op = adjust_address (op, QImode, 2);
+ letter = 0;
+ }
+ if (letter == 'E')
+ {
+ op = adjust_address (op, QImode, 3);
+ letter = 0;
+ }
+ if (CONSTANT_P (XEXP (op, 0)))
+ {
+ fprintf (file, "!");
+ rl78_print_operand_1 (file, XEXP (op, 0), letter);
+ }
+ else if (GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF)
+ {
+ fprintf (file, "!");
+ rl78_print_operand_1 (file, XEXP (op, 0), letter);
+ }
+ else if (GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == REG
+ && REGNO (XEXP (XEXP (op, 0), 0)) == 2)
+ {
+ rl78_print_operand_1 (file, XEXP (XEXP (op, 0), 1), 'u');
+ fprintf (file, "[");
+ rl78_print_operand_1 (file, XEXP (XEXP (op, 0), 0), 0);
+ fprintf (file, "]");
+ }
+ else
+ {
+ fprintf (file, "[");
+ rl78_print_operand_1 (file, XEXP (op, 0), letter);
+ fprintf (file, "]");
+ }
+ }
+ break;
+
+ case REG:
+ if (letter == 'Q')
+ fprintf (file, "%s", reg_names [REGNO (op) | 1]);
+ else if (letter == 'H')
+ fprintf (file, "%s", reg_names [REGNO (op) + 2]);
+ else if (letter == 'q')
+ fprintf (file, "%s", reg_names [REGNO (op) & ~1]);
+ else if (letter == 'e')
+ fprintf (file, "%s", reg_names [REGNO (op) + 2]);
+ else if (letter == 'E')
+ fprintf (file, "%s", reg_names [REGNO (op) + 3]);
+ else if (letter == 'S')
+ fprintf (file, "0x%x", 0xffef8 + REGNO (op));
+ else if (GET_MODE (op) == HImode
+ && ! (REGNO (op) & ~0xfe))
+ {
+ if (letter == 'v')
+ fprintf (file, "%s", word_regnames [REGNO (op) % 8]);
+ else
+ fprintf (file, "%s", word_regnames [REGNO (op)]);
+ }
+ else
+ fprintf (file, "%s", reg_names [REGNO (op)]);
+ break;
+
+ case CONST_INT:
+ if (letter == 'Q')
+ fprintf (file, "%ld", INTVAL (op) >> 8);
+ else if (letter == 'H')
+ fprintf (file, "%ld", INTVAL (op) >> 16);
+ else if (letter == 'q')
+ fprintf (file, "%ld", INTVAL (op) & 0xff);
+ else if (letter == 'h')
+ fprintf (file, "%ld", INTVAL (op) & 0xffff);
+ else if (letter == 'e')
+ fprintf (file, "%ld", (INTVAL (op) >> 16) & 0xff);
+ else if (letter == 'B')
+ fprintf (file, "%d", exact_log2 (INTVAL (op)));
+ else if (letter == 'E')
+ fprintf (file, "%ld", (INTVAL (op) >> 24) & 0xff);
+ else if (letter == 'm')
+ fprintf (file, "%ld", - INTVAL (op));
+ else if (letter == 's')
+ fprintf (file, "%ld", INTVAL (op) % 8);
+ else if (letter == 'S')
+ fprintf (file, "%ld", INTVAL (op) % 16);
+ else if (letter == 'r')
+ fprintf (file, "%ld", 8 - (INTVAL (op) % 8));
+ else if (letter == 'C')
+ fprintf (file, "%ld", (INTVAL (op) ^ 0x8000) & 0xffff);
+ else
+ fprintf (file, "%ld", INTVAL (op));
+ break;
+
+ case CONST:
+ rl78_print_operand_1 (file, XEXP (op, 0), letter);
+ break;
+
+ case ZERO_EXTRACT:
+ {
+ int bits = INTVAL (XEXP (op, 1));
+ int ofs = INTVAL (XEXP (op, 2));
+ if (bits == 16 && ofs == 0)
+ fprintf (file, "%%lo16(");
+ else if (bits == 16 && ofs == 16)
+ fprintf (file, "%%hi16(");
+ else if (bits == 8 && ofs == 16)
+ fprintf (file, "%%hi8(");
+ else
+ gcc_unreachable ();
+ rl78_print_operand_1 (file, XEXP (op, 0), 0);
+ fprintf (file, ")");
+ }
+ break;
+
+ case ZERO_EXTEND:
+ if (GET_CODE (XEXP (op, 0)) == REG)
+ fprintf (file, "%s", reg_names [REGNO (XEXP (op, 0))]);
+ else
+ print_rtl (file, op);
+ break;
+
+ case PLUS:
+ need_paren = 0;
+ if (letter == 'H')
+ {
+ fprintf (file, "%%hi16(");
+ need_paren = 1;
+ letter = 0;
+ }
+ if (letter == 'h')
+ {
+ fprintf (file, "%%lo16(");
+ need_paren = 1;
+ letter = 0;
+ }
+ if (letter == 'e')
+ {
+ fprintf (file, "%%hi8(");
+ need_paren = 1;
+ letter = 0;
+ }
+ if (letter == 'q' || letter == 'Q')
+ output_operand_lossage ("q/Q modifiers invalid for symbol references");
+
+ if (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
+ {
+ rl78_print_operand_1 (file, XEXP (op, 1), letter);
+ fprintf (file, "+");
+ rl78_print_operand_1 (file, XEXP (op, 0), letter);
+ }
+ else
+ {
+ rl78_print_operand_1 (file, XEXP (op, 0), letter);
+ fprintf (file, "+");
+ rl78_print_operand_1 (file, XEXP (op, 1), letter);
+ }
+ if (need_paren)
+ fprintf (file, ")");
+ break;
+
+ case SYMBOL_REF:
+ need_paren = 0;
+ if (letter == 'H')
+ {
+ fprintf (file, "%%hi16(");
+ need_paren = 1;
+ letter = 0;
+ }
+ if (letter == 'h')
+ {
+ fprintf (file, "%%lo16(");
+ need_paren = 1;
+ letter = 0;
+ }
+ if (letter == 'e')
+ {
+ fprintf (file, "%%hi8(");
+ need_paren = 1;
+ letter = 0;
+ }
+ if (letter == 'q' || letter == 'Q')
+ output_operand_lossage ("q/Q modifiers invalid for symbol references");
+
+ output_addr_const (file, op);
+ if (need_paren)
+ fprintf (file, ")");
+ break;
+
+ case CODE_LABEL:
+ case LABEL_REF:
+ output_asm_label (op);
+ break;
+
+ case LTU:
+ if (letter == 'z')
+ fprintf (file, "#comparison eliminated");
+ else
+ fprintf (file, letter == 'C' ? "nc" : "c");
+ break;
+ case LEU:
+ if (letter == 'z')
+ fprintf (file, "br");
+ else
+ fprintf (file, letter == 'C' ? "h" : "nh");
+ break;
+ case GEU:
+ if (letter == 'z')
+ fprintf (file, "br");
+ else
+ fprintf (file, letter == 'C' ? "c" : "nc");
+ break;
+ case GTU:
+ if (letter == 'z')
+ fprintf (file, "#comparison eliminated");
+ else
+ fprintf (file, letter == 'C' ? "nh" : "h");
+ break;
+ case EQ:
+ if (letter == 'z')
+ fprintf (file, "br");
+ else
+ fprintf (file, letter == 'C' ? "nz" : "z");
+ break;
+ case NE:
+ if (letter == 'z')
+ fprintf (file, "#comparison eliminated");
+ else
+ fprintf (file, letter == 'C' ? "z" : "nz");
+ break;
+
+ /* Note: these assume appropriate adjustments were made so that
+ unsigned comparisons, which is all this chip has, will
+ work. */
+ case LT:
+ if (letter == 'z')
+ fprintf (file, "#comparison eliminated");
+ else
+ fprintf (file, letter == 'C' ? "nc" : "c");
+ break;
+ case LE:
+ if (letter == 'z')
+ fprintf (file, "br");
+ else
+ fprintf (file, letter == 'C' ? "h" : "nh");
+ break;
+ case GE:
+ if (letter == 'z')
+ fprintf (file, "br");
+ else
+ fprintf (file, letter == 'C' ? "c" : "nc");
+ break;
+ case GT:
+ if (letter == 'z')
+ fprintf (file, "#comparison eliminated");
+ else
+ fprintf (file, letter == 'C' ? "nh" : "h");
+ break;
+
+ default:
+ fprintf (file, "(%s)", GET_RTX_NAME (GET_CODE (op)));
+ break;
+ }
+}
+
+#undef TARGET_PRINT_OPERAND
+#define TARGET_PRINT_OPERAND rl78_print_operand
+
+static void
+rl78_print_operand (FILE * file, rtx op, int letter)
+{
+ if (CONSTANT_P (op) && letter != 'u' && letter != 's' && letter != 'r' && letter != 'S' && letter != 'B')
+ fprintf (file, "#");
+ rl78_print_operand_1 (file, op, letter);
+}
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT rl78_trampoline_init
+
+/* Note that the RL78's addressing makes it very difficult to do
+ trampolines on the stack. So, libgcc has a small pool of
+ trampolines from which one is allocated to this task. */
+static void
+rl78_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
+{
+ rtx mov_addr, thunk_addr;
+ rtx function = XEXP (DECL_RTL (fndecl), 0);
+
+ mov_addr = adjust_address (m_tramp, HImode, 0);
+ thunk_addr = gen_reg_rtx (HImode);
+
+ function = force_reg (HImode, function);
+ static_chain = force_reg (HImode, static_chain);
+
+ emit_insn (gen_trampoline_init (thunk_addr, function, static_chain));
+ emit_move_insn (mov_addr, thunk_addr);
+
+ cfun->machine->trampolines_used = 1;
+}
+
+#undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
+#define TARGET_TRAMPOLINE_ADJUST_ADDRESS rl78_trampoline_adjust_address
+
+static rtx
+rl78_trampoline_adjust_address (rtx m_tramp)
+{
+ rtx x = gen_rtx_MEM (HImode, m_tramp);
+ return x;
+}
+
+/* Expander for cbranchqi4 and cbranchhi4. RL78 is missing some of
+ the "normal" compares, specifically, it only has unsigned compares,
+ so we must synthesize the missing ones. */
+void
+rl78_expand_compare (rtx *operands)
+{
+ if (GET_CODE (operands[2]) == MEM)
+ operands[2] = copy_to_mode_reg (GET_MODE (operands[2]), operands[2]);
+}
+
+
+
+/* Define this to 1 if you are debugging the peephole optimizers. */
+#define DEBUG_PEEP 0
+
+/* Predicate used to enable the peephole2 patterns in rl78-virt.md.
+ The default "word" size is a byte so we can effectively use all the
+ registers, but we want to do 16-bit moves whenever possible. This
+ function determines when such a move is an option. */
+bool
+rl78_peep_movhi_p (rtx *operands)
+{
+ int i;
+ rtx m, a;
+
+ /* (set (op0) (op1))
+ (set (op2) (op3)) */
+
+ if (! rl78_virt_insns_ok ())
+ return false;
+
+#if DEBUG_PEEP
+ fprintf (stderr, "\033[33m");
+ debug_rtx (operands[0]);
+ debug_rtx (operands[1]);
+ debug_rtx (operands[2]);
+ debug_rtx (operands[3]);
+ fprintf (stderr, "\033[0m");
+#endif
+
+ /* You can move a constant to memory as QImode, but not HImode. */
+ if (GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) != REG)
+ {
+#if DEBUG_PEEP
+ fprintf (stderr, "no peep: move constant to memory\n");
+#endif
+ return false;
+ }
+
+ if (rtx_equal_p (operands[0], operands[3]))
+ {
+#if DEBUG_PEEP
+ fprintf (stderr, "no peep: overlapping\n");
+#endif
+ return false;
+ }
+
+ for (i = 0; i < 2; i ++)
+ {
+ if (GET_CODE (operands[i]) != GET_CODE (operands[i+2]))
+ {
+#if DEBUG_PEEP
+ fprintf (stderr, "no peep: different codes\n");
+#endif
+ return false;
+ }
+ if (GET_MODE (operands[i]) != GET_MODE (operands[i+2]))
+ {
+#if DEBUG_PEEP
+ fprintf (stderr, "no peep: different modes\n");
+#endif
+ return false;
+ }
+
+ switch (GET_CODE (operands[i]))
+ {
+ case REG:
+ /* LSB MSB */
+ if (REGNO (operands[i]) + 1 != REGNO (operands[i+2])
+ || GET_MODE (operands[i]) != QImode)
+ {
+#if DEBUG_PEEP
+ fprintf (stderr, "no peep: wrong regnos %d %d %d\n",
+ REGNO (operands[i]), REGNO (operands[i+2]),
+ i);
+#endif
+ return false;
+ }
+ if (! rl78_hard_regno_mode_ok (REGNO (operands[i]), HImode))
+ {
+#if DEBUG_PEEP
+ fprintf (stderr, "no peep: reg %d not HI\n", REGNO (operands[i]));
+#endif
+ return false;
+ }
+ break;
+
+ case CONST_INT:
+ break;
+
+ case MEM:
+ if (GET_MODE (operands[i]) != QImode)
+ return false;
+ if (MEM_ALIGN (operands[i]) < 16)
+ return false;
+ a = XEXP (operands[i], 0);
+ if (GET_CODE (a) == CONST)
+ a = XEXP (a, 0);
+ if (GET_CODE (a) == PLUS)
+ a = XEXP (a, 1);
+ if (GET_CODE (a) == CONST_INT
+ && INTVAL (a) & 1)
+ {
+#if DEBUG_PEEP
+ fprintf (stderr, "no peep: misaligned mem %d\n", i);
+ debug_rtx (operands[i]);
+#endif
+ return false;
+ }
+ m = adjust_address (operands[i], QImode, 1);
+ if (! rtx_equal_p (m, operands[i+2]))
+ {
+#if DEBUG_PEEP
+ fprintf (stderr, "no peep: wrong mem %d\n", i);
+ debug_rtx (m);
+ debug_rtx (operands[i+2]);
+#endif
+ return false;
+ }
+ break;
+
+ default:
+#if DEBUG_PEEP
+ fprintf (stderr, "no peep: wrong rtx %d\n", i);
+#endif
+ return false;
+ }
+ }
+#if DEBUG_PEEP
+ fprintf (stderr, "\033[32mpeep!\033[0m\n");
+#endif
+ return true;
+}
+
+/* Likewise, when a peephole is activated, this function helps compute
+ the new operands. */
+void
+rl78_setup_peep_movhi (rtx *operands)
+{
+ int i;
+
+ for (i = 0; i < 2; i ++)
+ {
+ switch (GET_CODE (operands[i]))
+ {
+ case REG:
+ operands[i+4] = gen_rtx_REG (HImode, REGNO (operands[i]));
+ break;
+
+ case CONST_INT:
+ operands[i+4] = GEN_INT ((INTVAL (operands[i]) & 0xff) + ((char) INTVAL (operands[i+2])) * 256);
+ break;
+
+ case MEM:
+ operands[i+4] = adjust_address (operands[i], HImode, 0);
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/*
+ How Devirtualization works in the RL78 GCC port
+
+Background
+
+The RL78 is an 8-bit port with some 16-bit operations. It has 32
+bytes of register space, in four banks, memory-mapped. One bank is
+the "selected" bank and holds the registers used for primary
+operations. Since the registers are memory mapped, often you can
+still refer to the unselected banks via memory accesses.
+
+Virtual Registers
+
+The GCC port uses bank 0 as the "selected" registers (A, X, BC, etc)
+and refers to the other banks via their memory addresses, although
+they're treated as regular registers internally. These "virtual"
+registers are R8 through R23 (bank3 is reserved for asm-based
+interrupt handlers).
+
+There are four machine description files:
+
+rl78.md - common register-independent patterns and definitions
+rl78-expand.md - expanders
+rl78-virt.md - patterns that match BEFORE devirtualization
+rl78-real.md - patterns that match AFTER devirtualization
+
+At least through register allocation and reload, gcc is told that it
+can do pretty much anything - but may only use the virtual registers.
+GCC cannot properly create the varying addressing modes that the RL78
+supports in an efficient way.
+
+Sometime after reload, the RL78 backend "devirtualizes" the RTL. It
+uses the "valloc" attribute in rl78-virt.md for determining the rules
+by which it will replace virtual registers with real registers (or
+not) and how to make up addressing modes. For example, insns tagged
+with "ro1" have a single read-only parameter, which may need to be
+moved from memory/constant/vreg to a suitable real register. As part
+of devirtualization, a flag is toggled, disabling the rl78-virt.md
+patterns and enabling the rl78-real.md patterns. The new patterns'
+constraints are used to determine the real registers used. NOTE:
+patterns in rl78-virt.md essentially ignore the constrains and rely on
+predicates, where the rl78-real.md ones essentially ignore the
+predicates and rely on the constraints.
+
+The devirtualization pass is scheduled via the pass manager (despite
+being called "rl78_reorg") so it can be scheduled prior to var-track
+(the idea is to let gdb know about the new registers). Ideally, it
+would be scheduled right after pro/epilogue generation, so the
+post-reload optimizers could operate on the real registers, but when I
+tried that there were some issues building the target libraries.
+
+During devirtualization, a simple register move optimizer is run. It
+would be better to run a full CSE/propogation pass on it though, but
+that has not yet been attempted.
+
+ */
+#define DEBUG_ALLOC 0
+
+#define OP(x) (*recog_data.operand_loc[x])
+
+/* This array is used to hold knowledge about the contents of the
+ real registers (A ... H), the memory-based registers (r8 ... r31)
+ and the first NUM_STACK_LOCS words on the stack. We use this to
+ avoid generating redundant move instructions.
+
+ A value in the range 0 .. 31 indicates register A .. r31.
+ A value in the range 32 .. 63 indicates stack slot (value - 32).
+ A value of NOT_KNOWN indicates that the contents of that location
+ are not known. */
+
+#define NUM_STACK_LOCS 32
+#define NOT_KNOWN 127
+
+static unsigned char content_memory [32 + NUM_STACK_LOCS];
+
+static unsigned char saved_update_index = NOT_KNOWN;
+static unsigned char saved_update_value;
+static enum machine_mode saved_update_mode;
+
+
+static inline void
+clear_content_memory (void)
+{
+ memset (content_memory, NOT_KNOWN, sizeof content_memory);
+ if (dump_file)
+ fprintf (dump_file, " clear content memory\n");
+ saved_update_index = NOT_KNOWN;
+}
+
+/* Convert LOC into an index into the content_memory array.
+ If LOC cannot be converted, return NOT_KNOWN. */
+
+static unsigned char
+get_content_index (rtx loc)
+{
+ enum machine_mode mode;
+
+ if (loc == NULL_RTX)
+ return NOT_KNOWN;
+
+ if (REG_P (loc))
+ {
+ if (REGNO (loc) < 32)
+ return REGNO (loc);
+ return NOT_KNOWN;
+ }
+
+ mode = GET_MODE (loc);
+
+ if (! rl78_stack_based_mem (loc, mode))
+ return NOT_KNOWN;
+
+ loc = XEXP (loc, 0);
+
+ if (REG_P (loc))
+ /* loc = MEM (SP) */
+ return 32;
+
+ /* loc = MEM (PLUS (SP, INT)). */
+ loc = XEXP (loc, 1);
+
+ if (INTVAL (loc) < NUM_STACK_LOCS)
+ return 32 + INTVAL (loc);
+
+ return NOT_KNOWN;
+}
+
+/* Return a string describing content INDEX in mode MODE.
+ WARNING: Can return a pointer to a static buffer. */
+static const char *
+get_content_name (unsigned char index, enum machine_mode mode)
+{
+ static char buffer [128];
+
+ if (index == NOT_KNOWN)
+ return "Unknown";
+
+ if (index > 31)
+ sprintf (buffer, "stack slot %d", index - 32);
+ else if (mode == HImode)
+ sprintf (buffer, "%s%s",
+ reg_names [index + 1], reg_names [index]);
+ else
+ return reg_names [index];
+
+ return buffer;
+}
+
+#if DEBUG_ALLOC
+
+static void
+display_content_memory (FILE * file)
+{
+ unsigned int i;
+
+ fprintf (file, " Known memory contents:\n");
+
+ for (i = 0; i < sizeof content_memory; i++)
+ if (content_memory[i] != NOT_KNOWN)
+ {
+ fprintf (file, " %s contains a copy of ", get_content_name (i, QImode));
+ fprintf (file, "%s\n", get_content_name (content_memory [i], QImode));
+ }
+}
+#endif
+
+static void
+update_content (unsigned char index, unsigned char val, enum machine_mode mode)
+{
+ unsigned int i;
+
+ gcc_assert (index < sizeof content_memory);
+
+ content_memory [index] = val;
+ if (val != NOT_KNOWN)
+ content_memory [val] = index;
+
+ /* Make the entry in dump_file *before* VAL is increased below. */
+ if (dump_file)
+ {
+ fprintf (dump_file, " %s now contains ", get_content_name (index, mode));
+ if (val == NOT_KNOWN)
+ fprintf (dump_file, "Unknown\n");
+ else
+ fprintf (dump_file, "%s and vice versa\n", get_content_name (val, mode));
+ }
+
+ if (mode == HImode)
+ {
+ val = val == NOT_KNOWN ? val : val + 1;
+
+ content_memory [index + 1] = val;
+ if (val != NOT_KNOWN)
+ {
+ content_memory [val] = index + 1;
+ -- val;
+ }
+ }
+
+ /* Any other places that had INDEX recorded as their contents are now invalid. */
+ for (i = 0; i < sizeof content_memory; i++)
+ {
+ if (i == index
+ || (val != NOT_KNOWN && i == val))
+ {
+ if (mode == HImode)
+ ++ i;
+ continue;
+ }
+
+ if (content_memory[i] == index
+ || (val != NOT_KNOWN && content_memory[i] == val))
+ {
+ content_memory[i] = NOT_KNOWN;
+
+ if (dump_file)
+ fprintf (dump_file, " %s cleared\n", get_content_name (i, mode));
+
+ if (mode == HImode)
+ content_memory[++ i] = NOT_KNOWN;
+ }
+ }
+}
+
+/* Record that LOC contains VALUE.
+ For HImode locations record that LOC+1 contains VALUE+1.
+ If LOC is not a register or stack slot, do nothing.
+ If VALUE is not a register or stack slot, clear the recorded content. */
+
+static void
+record_content (rtx loc, rtx value)
+{
+ enum machine_mode mode;
+ unsigned char index;
+ unsigned char val;
+
+ if ((index = get_content_index (loc)) == NOT_KNOWN)
+ return;
+
+ val = get_content_index (value);
+
+ mode = GET_MODE (loc);
+
+ if (val == index)
+ {
+ if (! optimize)
+ return;
+
+ /* This should not happen when optimizing. */
+#if 1
+ fprintf (stderr, "ASSIGNMENT of location to itself detected! [%s]\n",
+ get_content_name (val, mode));
+ return;
+#else
+ gcc_unreachable ();
+#endif
+ }
+
+ update_content (index, val, mode);
+}
+
+/* Returns TRUE if LOC already contains a copy of VALUE. */
+
+static bool
+already_contains (rtx loc, rtx value)
+{
+ unsigned char index;
+ unsigned char val;
+
+ if ((index = get_content_index (loc)) == NOT_KNOWN)
+ return false;
+
+ if ((val = get_content_index (value)) == NOT_KNOWN)
+ return false;
+
+ if (content_memory [index] != val)
+ return false;
+
+ if (GET_MODE (loc) == HImode)
+ return content_memory [index + 1] == val + 1;
+
+ return true;
+}
+
+bool
+rl78_es_addr (rtx addr)
+{
+ if (GET_CODE (addr) == MEM)
+ addr = XEXP (addr, 0);
+ if (GET_CODE (addr) != UNSPEC)
+ return false;
+ if (XINT (addr, 1) != UNS_ES_ADDR)
+ return false;
+ return true;
+}
+
+rtx
+rl78_es_base (rtx addr)
+{
+ if (GET_CODE (addr) == MEM)
+ addr = XEXP (addr, 0);
+ addr = XVECEXP (addr, 0, 1);
+ if (GET_CODE (addr) == CONST
+ && GET_CODE (XEXP (addr, 0)) == ZERO_EXTRACT)
+ addr = XEXP (XEXP (addr, 0), 0);
+ /* Mode doesn't matter here. */
+ return gen_rtx_MEM (HImode, addr);
+}
+
+/* Rescans an insn to see if it's recognized again. This is done
+ carefully to ensure that all the constraint information is accurate
+ for the newly matched insn. */
+static bool
+insn_ok_now (rtx insn)
+{
+ rtx pattern = PATTERN (insn);
+ int i;
+
+ INSN_CODE (insn) = -1;
+
+ if (recog (pattern, insn, 0) > -1)
+ {
+ extract_insn (insn);
+ if (constrain_operands (1))
+ {
+#if DEBUG_ALLOC
+ fprintf (stderr, "\033[32m");
+ debug_rtx (insn);
+ fprintf (stderr, "\033[0m");
+#endif
+ if (SET_P (pattern))
+ record_content (SET_DEST (pattern), SET_SRC (pattern));
+
+ /* We need to detect far addresses that haven't been
+ converted to es/lo16 format. */
+ for (i=0; i<recog_data.n_operands; i++)
+ if (GET_CODE (OP (i)) == MEM
+ && GET_MODE (XEXP (OP (i), 0)) == SImode
+ && GET_CODE (XEXP (OP (i), 0)) != UNSPEC)
+ return false;
+
+ return true;
+ }
+ }
+ else
+ {
+ /* We need to re-recog the insn with virtual registers to get
+ the operands. */
+ cfun->machine->virt_insns_ok = 1;
+ if (recog (pattern, insn, 0) > -1)
+ {
+ extract_insn (insn);
+ if (constrain_operands (0))
+ {
+ cfun->machine->virt_insns_ok = 0;
+ return false;
+ }
+ }
+
+#if DEBUG_ALLOC
+ fprintf (stderr, "\033[41;30m Unrecognized *virtual* insn \033[0m\n");
+ debug_rtx (insn);
+#endif
+ gcc_unreachable ();
+ }
+
+#if DEBUG_ALLOC
+ fprintf (stderr, "\033[31m");
+ debug_rtx (insn);
+ fprintf (stderr, "\033[0m");
+#endif
+ return false;
+}
+
+#if DEBUG_ALLOC
+#define WORKED fprintf (stderr, "\033[48;5;22m Worked at line %d \033[0m\n", __LINE__)
+#define FAILEDSOFAR fprintf (stderr, "\033[48;5;52m FAILED at line %d \033[0m\n", __LINE__)
+#define FAILED fprintf (stderr, "\033[48;5;52m FAILED at line %d \033[0m\n", __LINE__), gcc_unreachable()
+#define MAYBE_OK(insn) if (insn_ok_now (insn)) { WORKED; return; } else { FAILEDSOFAR; }
+#define MUST_BE_OK(insn) if (insn_ok_now (insn)) { WORKED; return; } FAILED
+#else
+#define FAILED gcc_unreachable ()
+#define MAYBE_OK(insn) if (insn_ok_now (insn)) return;
+#define MUST_BE_OK(insn) if (insn_ok_now (insn)) return; FAILED
+#endif
+
+/* Registers into which we move the contents of virtual registers. */
+#define X gen_rtx_REG (QImode, X_REG)
+#define A gen_rtx_REG (QImode, A_REG)
+#define C gen_rtx_REG (QImode, C_REG)
+#define B gen_rtx_REG (QImode, B_REG)
+#define E gen_rtx_REG (QImode, E_REG)
+#define D gen_rtx_REG (QImode, D_REG)
+#define L gen_rtx_REG (QImode, L_REG)
+#define H gen_rtx_REG (QImode, H_REG)
+
+#define AX gen_rtx_REG (HImode, AX_REG)
+#define BC gen_rtx_REG (HImode, BC_REG)
+#define DE gen_rtx_REG (HImode, DE_REG)
+#define HL gen_rtx_REG (HImode, HL_REG)
+
+/* Returns TRUE if R is a virtual register. */
+static inline bool
+is_virtual_register (rtx r)
+{
+ return (GET_CODE (r) == REG
+ && REGNO (r) >= 8
+ && REGNO (r) < 32);
+}
+
+/* In all these alloc routines, we expect the following: the insn
+ pattern is unshared, the insn was previously recognized and failed
+ due to predicates or constraints, and the operand data is in
+ recog_data. */
+
+static int virt_insn_was_frame;
+
+/* Hook for all insns we emit. Re-mark them as FRAME_RELATED if
+ needed. */
+static rtx
+EM2 (int line ATTRIBUTE_UNUSED, rtx r)
+{
+#if DEBUG_ALLOC
+ fprintf (stderr, "\033[36m%d: ", line);
+ debug_rtx (r);
+ fprintf (stderr, "\033[0m");
+#endif
+ /*SCHED_GROUP_P (r) = 1;*/
+ if (virt_insn_was_frame)
+ RTX_FRAME_RELATED_P (r) = 1;
+ return r;
+}
+
+#define EM(x) EM2 (__LINE__, x)
+
+/* Return a suitable RTX for the low half of a __far address. */
+static rtx
+rl78_lo16 (rtx addr)
+{
+ rtx r;
+
+ if (GET_CODE (addr) == SYMBOL_REF
+ || GET_CODE (addr) == CONST)
+ {
+ r = gen_rtx_ZERO_EXTRACT (HImode, addr, GEN_INT (16), GEN_INT (0));
+ r = gen_rtx_CONST (HImode, r);
+ }
+ else
+ r = rl78_subreg (HImode, addr, SImode, 0);
+
+ r = gen_es_addr (r);
+
+ return r;
+}
+
+/* Return a suitable RTX for the high half's lower byte of a __far address. */
+static rtx
+rl78_hi8 (rtx addr)
+{
+ if (GET_CODE (addr) == SYMBOL_REF
+ || GET_CODE (addr) == CONST)
+ {
+ rtx r = gen_rtx_ZERO_EXTRACT (QImode, addr, GEN_INT (8), GEN_INT (16));
+ r = gen_rtx_CONST (QImode, r);
+ return r;
+ }
+ return rl78_subreg (QImode, addr, SImode, 2);
+}
+
+static void
+add_postponed_content_update (rtx to, rtx value)
+{
+ unsigned char index;
+
+ if ((index = get_content_index (to)) == NOT_KNOWN)
+ return;
+
+ gcc_assert (saved_update_index == NOT_KNOWN);
+ saved_update_index = index;
+ saved_update_value = get_content_index (value);
+ saved_update_mode = GET_MODE (to);
+}
+
+static void
+process_postponed_content_update (void)
+{
+ if (saved_update_index != NOT_KNOWN)
+ {
+ update_content (saved_update_index, saved_update_value, saved_update_mode);
+ saved_update_index = NOT_KNOWN;
+ }
+}
+
+/* Generate and emit a move of (register) FROM into TO. if WHERE is not NULL
+ then if BEFORE is true then emit the insn before WHERE, otherwise emit it
+ after WHERE. If TO already contains FROM then do nothing. Returns TO if
+ BEFORE is true, FROM otherwise. */
+static rtx
+gen_and_emit_move (rtx to, rtx from, rtx where, bool before)
+{
+ enum machine_mode mode = GET_MODE (to);
+
+ if (optimize && before && already_contains (to, from))
+ {
+#if DEBUG_ALLOC
+ display_content_memory (stderr);
+#endif
+ if (dump_file)
+ {
+ fprintf (dump_file, " Omit move of %s into ",
+ get_content_name (get_content_index (from), mode));
+ fprintf (dump_file, "%s as it already contains this value\n",
+ get_content_name (get_content_index (to), mode));
+ }
+ }
+ else
+ {
+ rtx move = mode == QImode ? gen_movqi (to, from) : gen_movhi (to, from);
+
+ EM (move);
+
+ if (where == NULL_RTX)
+ emit_insn (move);
+ else if (before)
+ emit_insn_before (move, where);
+ else
+ {
+ rtx note = find_reg_note (where, REG_EH_REGION, NULL_RTX);
+
+ /* If necessary move REG_EH_REGION notes forward.
+ cf. compiling gcc.dg/pr44545.c. */
+ if (note != NULL_RTX)
+ {
+ add_reg_note (move, REG_EH_REGION, XEXP (note, 0));
+ remove_note (where, note);
+ }
+
+ emit_insn_after (move, where);
+ }
+
+ if (before)
+ record_content (to, from);
+ else
+ add_postponed_content_update (to, from);
+ }
+
+ return before ? to : from;
+}
+
+/* If M is MEM(REG) or MEM(PLUS(REG,INT)) and REG is virtual then
+ copy it into NEWBASE and return the updated MEM. Otherwise just
+ return M. Any needed insns are emitted before BEFORE. */
+static rtx
+transcode_memory_rtx (rtx m, rtx newbase, rtx before)
+{
+ rtx base, index, addendr;
+ int addend = 0;
+ int need_es = 0;
+
+ if (! MEM_P (m))
+ return m;
+
+ if (GET_MODE (XEXP (m, 0)) == SImode)
+ {
+ rtx new_m;
+ rtx seg = rl78_hi8 (XEXP (m, 0));
+
+#if DEBUG_ALLOC
+ fprintf (stderr, "setting ES:\n");
+ debug_rtx(seg);
+#endif
+ emit_insn_before (EM (gen_movqi (A, seg)), before);
+ emit_insn_before (EM (gen_movqi_es (A)), before);
+ record_content (A, NULL_RTX);
+
+ new_m = gen_rtx_MEM (GET_MODE (m), rl78_lo16 (XEXP (m, 0)));
+ MEM_COPY_ATTRIBUTES (new_m, m);
+ m = new_m;
+ need_es = 1;
+ }
+
+ characterize_address (XEXP (m, 0), & base, & index, & addendr);
+ gcc_assert (index == NULL_RTX);
+
+#if DEBUG_ALLOC
+ fprintf (stderr, "\033[33m"); debug_rtx (m); fprintf (stderr, "\033[0m");
+ debug_rtx (base);
+#endif
+ if (base == NULL_RTX)
+ return m;
+
+ if (addendr && GET_CODE (addendr) == CONST_INT)
+ addend = INTVAL (addendr);
+
+ gcc_assert (REG_P (base));
+ gcc_assert (REG_P (newbase));
+
+ if (REGNO (base) == SP_REG)
+ {
+ if (addend >= 0 && addend <= 255)
+ return m;
+ }
+
+ /* BASE should be a virtual register. We copy it to NEWBASE. If
+ the addend is out of range for DE/HL, we use AX to compute the full
+ address. */
+
+ if (addend < 0
+ || (addend > 255 && REGNO (newbase) != 2)
+ || (addendr && GET_CODE (addendr) != CONST_INT))
+ {
+ /* mov ax, vreg
+ add ax, #imm
+ mov hl, ax */
+ EM (emit_insn_before (gen_movhi (AX, base), before));
+ EM (emit_insn_before (gen_addhi3 (AX, AX, addendr), before));
+ EM (emit_insn_before (gen_movhi (newbase, AX), before));
+ record_content (AX, NULL_RTX);
+ record_content (newbase, NULL_RTX);
+
+ base = newbase;
+ addend = 0;
+ }
+ else
+ {
+ base = gen_and_emit_move (newbase, base, before, true);
+ }
+
+ if (addend)
+ {
+ record_content (base, NULL_RTX);
+ base = gen_rtx_PLUS (HImode, base, GEN_INT (addend));
+ }
+
+#if DEBUG_ALLOC
+ fprintf (stderr, "\033[33m");
+ debug_rtx (m);
+#endif
+ if (need_es)
+ m = change_address (m, GET_MODE (m), gen_es_addr (base));
+ else
+ m = change_address (m, GET_MODE (m), base);
+#if DEBUG_ALLOC
+ debug_rtx (m);
+ fprintf (stderr, "\033[0m");
+#endif
+ return m;
+}
+
+/* Copy SRC to accumulator (A or AX), placing any generated insns
+ before BEFORE. Returns accumulator RTX. */
+static rtx
+move_to_acc (int opno, rtx before)
+{
+ rtx src = OP (opno);
+ enum machine_mode mode = GET_MODE (src);
+
+ if (REG_P (src) && REGNO (src) < 2)
+ return src;
+
+ if (mode == VOIDmode)
+ mode = recog_data.operand_mode[opno];
+
+ return gen_and_emit_move (mode == QImode ? A : AX, src, before, true);
+}
+
+static void
+force_into_acc (rtx src, rtx before)
+{
+ enum machine_mode mode = GET_MODE (src);
+ rtx move;
+
+ if (REG_P (src) && REGNO (src) < 2)
+ return;
+
+ move = mode == QImode ? gen_movqi (A, src) : gen_movhi (AX, src);
+
+ EM (move);
+
+ emit_insn_before (move, before);
+ record_content (AX, NULL_RTX);
+}
+
+/* Copy accumulator (A or AX) to DEST, placing any generated insns
+ after AFTER. Returns accumulator RTX. */
+static rtx
+move_from_acc (unsigned int opno, rtx after)
+{
+ rtx dest = OP (opno);
+ enum machine_mode mode = GET_MODE (dest);
+
+ if (REG_P (dest) && REGNO (dest) < 2)
+ return dest;
+
+ return gen_and_emit_move (dest, mode == QImode ? A : AX, after, false);
+}
+
+/* Copy accumulator (A or AX) to REGNO, placing any generated insns
+ before BEFORE. Returns reg RTX. */
+static rtx
+move_acc_to_reg (rtx acc, int regno, rtx before)
+{
+ enum machine_mode mode = GET_MODE (acc);
+ rtx reg;
+
+ reg = gen_rtx_REG (mode, regno);
+
+ return gen_and_emit_move (reg, acc, before, true);
+}
+
+/* Copy SRC to X, placing any generated insns before BEFORE.
+ Returns X RTX. */
+static rtx
+move_to_x (int opno, rtx before)
+{
+ rtx src = OP (opno);
+ enum machine_mode mode = GET_MODE (src);
+ rtx reg;
+
+ if (mode == VOIDmode)
+ mode = recog_data.operand_mode[opno];
+ reg = (mode == QImode) ? X : AX;
+
+ if (mode == QImode || ! is_virtual_register (OP (opno)))
+ {
+ OP (opno) = move_to_acc (opno, before);
+ OP (opno) = move_acc_to_reg (OP (opno), X_REG, before);
+ return reg;
+ }
+
+ return gen_and_emit_move (reg, src, before, true);
+}
+
+/* Copy OP (opno) to H or HL, placing any generated insns before BEFORE.
+ Returns H/HL RTX. */
+static rtx
+move_to_hl (int opno, rtx before)
+{
+ rtx src = OP (opno);
+ enum machine_mode mode = GET_MODE (src);
+ rtx reg;
+
+ if (mode == VOIDmode)
+ mode = recog_data.operand_mode[opno];
+ reg = (mode == QImode) ? L : HL;
+
+ if (mode == QImode || ! is_virtual_register (OP (opno)))
+ {
+ OP (opno) = move_to_acc (opno, before);
+ OP (opno) = move_acc_to_reg (OP (opno), L_REG, before);
+ return reg;
+ }
+
+ return gen_and_emit_move (reg, src, before, true);
+}
+
+/* Copy OP (opno) to E or DE, placing any generated insns before BEFORE.
+ Returns E/DE RTX. */
+static rtx
+move_to_de (int opno, rtx before)
+{
+ rtx src = OP (opno);
+ enum machine_mode mode = GET_MODE (src);
+ rtx reg;
+
+ if (mode == VOIDmode)
+ mode = recog_data.operand_mode[opno];
+
+ reg = (mode == QImode) ? E : DE;
+
+ if (mode == QImode || ! is_virtual_register (OP (opno)))
+ {
+ OP (opno) = move_to_acc (opno, before);
+ OP (opno) = move_acc_to_reg (OP (opno), E_REG, before);
+ }
+ else
+ {
+ gen_and_emit_move (reg, src, before, true);
+ }
+
+ return reg;
+}
+
+/* Devirtualize an insn of the form (SET (op) (unop (op))). */
+static void
+rl78_alloc_physical_registers_op1 (rtx insn)
+{
+ /* op[0] = func op[1] */
+
+ /* We first try using A as the destination, then copying it
+ back. */
+ if (rtx_equal_p (OP (0), OP (1)))
+ {
+ OP (0) =
+ OP (1) = transcode_memory_rtx (OP (1), DE, insn);
+ }
+ else
+ {
+ /* If necessary, load the operands into BC and HL.
+ Check to see if we already have OP (0) in HL
+ and if so, swap the order. */
+ if (MEM_P (OP (0))
+ && already_contains (HL, XEXP (OP (0), 0)))
+ {
+ OP (0) = transcode_memory_rtx (OP (0), HL, insn);
+ OP (1) = transcode_memory_rtx (OP (1), BC, insn);
+ }
+ else
+ {
+ OP (0) = transcode_memory_rtx (OP (0), BC, insn);
+ OP (1) = transcode_memory_rtx (OP (1), HL, insn);
+ }
+ }
+
+ MAYBE_OK (insn);
+
+ OP (0) = move_from_acc (0, insn);
+
+ MAYBE_OK (insn);
+
+ /* Try copying the src to acc first, then. This is for, for
+ example, ZERO_EXTEND or NOT. */
+ OP (1) = move_to_acc (1, insn);
+
+ MUST_BE_OK (insn);
+}
+
+/* Returns true if operand OPNUM contains a constraint of type CONSTRAINT.
+ Assumes that the current insn has already been recognised and hence the
+ constraint data has been filled in. */
+static bool
+has_constraint (unsigned int opnum, enum constraint_num constraint)
+{
+ const char * p = recog_data.constraints[opnum];
+
+ /* No constraints means anything is accepted. */
+ if (p == NULL || *p == 0 || *p == ',')
+ return true;
+
+ do
+ {
+ char c;
+ unsigned int len;
+
+ c = *p;
+ len = CONSTRAINT_LEN (c, p);
+ gcc_assert (len > 0);
+
+ switch (c)
+ {
+ case 0:
+ case ',':
+ return false;
+ default:
+ if (lookup_constraint (p) == constraint)
+ return true;
+ }
+ p += len;
+ }
+ while (1);
+}
+
+/* Devirtualize an insn of the form (SET (op) (binop (op) (op))). */
+static void
+rl78_alloc_physical_registers_op2 (rtx insn)
+{
+ rtx prev;
+ rtx first;
+ bool hl_used;
+ int tmp_id;
+ rtx saved_op1;
+
+ if (rtx_equal_p (OP (0), OP (1)))
+ {
+ OP (0) =
+ OP (1) = transcode_memory_rtx (OP (1), DE, insn);
+ OP (2) = transcode_memory_rtx (OP (2), HL, insn);
+ }
+ else if (rtx_equal_p (OP (0), OP (2)))
+ {
+ OP (1) = transcode_memory_rtx (OP (1), DE, insn);
+ OP (0) =
+ OP (2) = transcode_memory_rtx (OP (2), HL, insn);
+ }
+ else
+ {
+ OP (0) = transcode_memory_rtx (OP (0), BC, insn);
+ OP (1) = transcode_memory_rtx (OP (1), DE, insn);
+ OP (2) = transcode_memory_rtx (OP (2), HL, insn);
+ }
+
+ MAYBE_OK (insn);
+
+ prev = prev_nonnote_nondebug_insn (insn);
+ if (recog_data.constraints[1][0] == '%'
+ && is_virtual_register (OP (1))
+ && ! is_virtual_register (OP (2))
+ && ! CONSTANT_P (OP (2)))
+ {
+ rtx tmp = OP (1);
+ OP (1) = OP (2);
+ OP (2) = tmp;
+ }
+
+ /* Make a note of whether (H)L is being used. It matters
+ because if OP (2) also needs reloading, then we must take
+ care not to corrupt HL. */
+ hl_used = reg_mentioned_p (L, OP (0)) || reg_mentioned_p (L, OP (1));
+
+ /* If HL is not currently being used and dest == op1 then there are
+ some possible optimizations available by reloading one of the
+ operands into HL, before trying to use the accumulator. */
+ if (optimize
+ && ! hl_used
+ && rtx_equal_p (OP (0), OP (1)))
+ {
+ /* If op0 is a Ws1 type memory address then switching the base
+ address register to HL might allow us to perform an in-memory
+ operation. (eg for the INCW instruction).
+
+ FIXME: Adding the move into HL is costly if this optimization is not
+ going to work, so for now, make sure that we know that the new insn will
+ match the requirements of the addhi3_real pattern. Really we ought to
+ generate a candidate sequence, test that, and then install it if the
+ results are good. */
+ if (satisfies_constraint_Ws1 (OP (0))
+ && has_constraint (0, CONSTRAINT_Wh1)
+ && (satisfies_constraint_K (OP (2)) || satisfies_constraint_L (OP (2))))
+ {
+ rtx base, index, addend, newbase;
+
+ characterize_address (XEXP (OP (0), 0), & base, & index, & addend);
+ gcc_assert (index == NULL_RTX);
+ gcc_assert (REG_P (base) && REGNO (base) == SP_REG);
+
+ /* Ws1 addressing allows an offset of 0, Wh1 addressing requires a non-zero offset. */
+ if (addend != NULL_RTX)
+ {
+ newbase = gen_and_emit_move (HL, base, insn, true);
+ record_content (newbase, NULL_RTX);
+ newbase = gen_rtx_PLUS (HImode, newbase, addend);
+
+ OP (0) = OP (1) = change_address (OP (0), VOIDmode, newbase);
+
+ /* We do not want to fail here as this means that
+ we have inserted useless insns into the stream. */
+ MUST_BE_OK (insn);
+ }
+ }
+ else if (REG_P (OP (0))
+ && satisfies_constraint_Ws1 (OP (2))
+ && has_constraint (2, CONSTRAINT_Wh1))
+ {
+ rtx base, index, addend, newbase;
+
+ characterize_address (XEXP (OP (2), 0), & base, & index, & addend);
+ gcc_assert (index == NULL_RTX);
+ gcc_assert (REG_P (base) && REGNO (base) == SP_REG);
+
+ /* Ws1 addressing allows an offset of 0, Wh1 addressing requires a non-zero offset. */
+ if (addend != NULL_RTX)
+ {
+ gen_and_emit_move (HL, base, insn, true);
+
+ if (REGNO (OP (0)) != X_REG)
+ {
+ OP (1) = move_to_acc (1, insn);
+ OP (0) = move_from_acc (0, insn);
+ }
+
+ record_content (HL, NULL_RTX);
+ newbase = gen_rtx_PLUS (HImode, HL, addend);
+
+ OP (2) = change_address (OP (2), VOIDmode, newbase);
+
+ /* We do not want to fail here as this means that
+ we have inserted useless insns into the stream. */
+ MUST_BE_OK (insn);
+ }
+ }
+ }
+
+ OP (0) = move_from_acc (0, insn);
+
+ tmp_id = get_max_insn_count ();
+ saved_op1 = OP (1);
+
+ if (rtx_equal_p (OP (1), OP (2)))
+ OP (2) = OP (1) = move_to_acc (1, insn);
+ else
+ OP (1) = move_to_acc (1, insn);
+
+ MAYBE_OK (insn);
+
+ /* If we omitted the move of OP1 into the accumulator (because
+ it was already there from a previous insn), then force the
+ generation of the move instruction now. We know that we
+ are about to emit a move into HL (or DE) via AX, and hence
+ our optimization to remove the load of OP1 is no longer valid. */
+ if (tmp_id == get_max_insn_count ())
+ force_into_acc (saved_op1, insn);
+
+ /* We have to copy op2 to HL (or DE), but that involves AX, which
+ already has a live value. Emit it before those insns. */
+
+ if (prev)
+ first = next_nonnote_nondebug_insn (prev);
+ else
+ for (first = insn; prev_nonnote_nondebug_insn (first); first = prev_nonnote_nondebug_insn (first))
+ ;
+
+ OP (2) = hl_used ? move_to_de (2, first) : move_to_hl (2, first);
+
+ MUST_BE_OK (insn);
+}
+
+/* Devirtualize an insn of the form SET (PC) (MEM/REG). */
+static void
+rl78_alloc_physical_registers_ro1 (rtx insn)
+{
+ OP (0) = transcode_memory_rtx (OP (0), BC, insn);
+
+ MAYBE_OK (insn);
+
+ OP (0) = move_to_acc (0, insn);
+
+ MUST_BE_OK (insn);
+}
+
+/* Devirtualize a compare insn. */
+static void
+rl78_alloc_physical_registers_cmp (rtx insn)
+{
+ int tmp_id;
+ rtx saved_op1;
+ rtx prev = prev_nonnote_nondebug_insn (insn);
+ rtx first;
+
+ OP (1) = transcode_memory_rtx (OP (1), DE, insn);
+ OP (2) = transcode_memory_rtx (OP (2), HL, insn);
+
+ /* HI compares have to have OP (1) in AX, but QI
+ compares do not, so it is worth checking here. */
+ MAYBE_OK (insn);
+
+ /* For an HImode compare, OP (1) must always be in AX.
+ But if OP (1) is a REG (and not AX), then we can avoid
+ a reload of OP (1) if we reload OP (2) into AX and invert
+ the comparison. */
+ if (REG_P (OP (1))
+ && REGNO (OP (1)) != AX_REG
+ && GET_MODE (OP (1)) == HImode
+ && MEM_P (OP (2)))
+ {
+ rtx cmp = XEXP (SET_SRC (PATTERN (insn)), 0);
+
+ OP (2) = move_to_acc (2, insn);
+
+ switch (GET_CODE (cmp))
+ {
+ case EQ:
+ case NE:
+ break;
+ case LTU: cmp = gen_rtx_GTU (HImode, OP (2), OP (1)); break;
+ case GTU: cmp = gen_rtx_LTU (HImode, OP (2), OP (1)); break;
+ case LEU: cmp = gen_rtx_GEU (HImode, OP (2), OP (1)); break;
+ case GEU: cmp = gen_rtx_LEU (HImode, OP (2), OP (1)); break;
+
+ case LT:
+ case GT:
+ case LE:
+ case GE:
+#if DEBUG_ALLOC
+ debug_rtx (insn);
+#endif
+ default:
+ gcc_unreachable ();
+ }
+
+ if (GET_CODE (cmp) == EQ || GET_CODE (cmp) == NE)
+ PATTERN (insn) = gen_cbranchhi4_real (cmp, OP (2), OP (1), OP (3));
+ else
+ PATTERN (insn) = gen_cbranchhi4_real_inverted (cmp, OP (2), OP (1), OP (3));
+
+ MUST_BE_OK (insn);
+ }
+
+ /* Surprisingly, gcc can generate a comparison of a register with itself, but this
+ should be handled by the second alternative of the cbranchhi_real pattern. */
+ if (rtx_equal_p (OP (1), OP (2)))
+ {
+ OP (1) = OP (2) = BC;
+ MUST_BE_OK (insn);
+ }
+
+ tmp_id = get_max_insn_count ();
+ saved_op1 = OP (1);
+
+ OP (1) = move_to_acc (1, insn);
+
+ MAYBE_OK (insn);
+
+ /* If we omitted the move of OP1 into the accumulator (because
+ it was already there from a previous insn), then force the
+ generation of the move instruction now. We know that we
+ are about to emit a move into HL via AX, and hence our
+ optimization to remove the load of OP1 is no longer valid. */
+ if (tmp_id == get_max_insn_count ())
+ force_into_acc (saved_op1, insn);
+
+ /* We have to copy op2 to HL, but that involves the acc, which
+ already has a live value. Emit it before those insns. */
+ if (prev)
+ first = next_nonnote_nondebug_insn (prev);
+ else
+ for (first = insn; prev_nonnote_nondebug_insn (first); first = prev_nonnote_nondebug_insn (first))
+ ;
+ OP (2) = move_to_hl (2, first);
+
+ MUST_BE_OK (insn);
+}
+
+/* Like op2, but AX = A * X. */
+static void
+rl78_alloc_physical_registers_umul (rtx insn)
+{
+ rtx prev = prev_nonnote_nondebug_insn (insn);
+ rtx first;
+ int tmp_id;
+ rtx saved_op1;
+
+ OP (0) = transcode_memory_rtx (OP (0), BC, insn);
+ OP (1) = transcode_memory_rtx (OP (1), DE, insn);
+ OP (2) = transcode_memory_rtx (OP (2), HL, insn);
+
+ MAYBE_OK (insn);
+
+ if (recog_data.constraints[1][0] == '%'
+ && is_virtual_register (OP (1))
+ && !is_virtual_register (OP (2))
+ && !CONSTANT_P (OP (2)))
+ {
+ rtx tmp = OP (1);
+ OP (1) = OP (2);
+ OP (2) = tmp;
+ }
+
+ OP (0) = move_from_acc (0, insn);
+
+ tmp_id = get_max_insn_count ();
+ saved_op1 = OP (1);
+
+ if (rtx_equal_p (OP (1), OP (2)))
+ {
+ gcc_assert (GET_MODE (OP (2)) == QImode);
+ /* The MULU instruction does not support duplicate arguments
+ but we know that if we copy OP (2) to X it will do so via
+ A and thus OP (1) will already be loaded into A. */
+ OP (2) = move_to_x (2, insn);
+ OP (1) = A;
+ }
+ else
+ OP (1) = move_to_acc (1, insn);
+
+ MAYBE_OK (insn);
+
+ /* If we omitted the move of OP1 into the accumulator (because
+ it was already there from a previous insn), then force the
+ generation of the move instruction now. We know that we
+ are about to emit a move into HL (or DE) via AX, and hence
+ our optimization to remove the load of OP1 is no longer valid. */
+ if (tmp_id == get_max_insn_count ())
+ force_into_acc (saved_op1, insn);
+
+ /* We have to copy op2 to X, but that involves the acc, which
+ already has a live value. Emit it before those insns. */
+
+ if (prev)
+ first = next_nonnote_nondebug_insn (prev);
+ else
+ for (first = insn; prev_nonnote_nondebug_insn (first); first = prev_nonnote_nondebug_insn (first))
+ ;
+ OP (2) = move_to_x (2, first);
+
+ MUST_BE_OK (insn);
+}
+
+static void
+rl78_alloc_address_registers_macax (rtx insn)
+{
+ int which, op;
+ bool replace_in_op0 = false;
+ bool replace_in_op1 = false;
+
+ MAYBE_OK (insn);
+
+ /* Two different MEMs are not allowed. */
+ which = 0;
+ for (op = 2; op >= 0; op --)
+ {
+ if (MEM_P (OP (op)))
+ {
+ if (op == 0 && replace_in_op0)
+ continue;
+ if (op == 1 && replace_in_op1)
+ continue;
+
+ switch (which)
+ {
+ case 0:
+ /* If we replace a MEM, make sure that we replace it for all
+ occurrences of the same MEM in the insn. */
+ replace_in_op0 = (op > 0 && rtx_equal_p (OP (op), OP (0)));
+ replace_in_op1 = (op > 1 && rtx_equal_p (OP (op), OP (1)));
+
+ OP (op) = transcode_memory_rtx (OP (op), HL, insn);
+ if (op == 2
+ && MEM_P (OP (op))
+ && ((GET_CODE (XEXP (OP (op), 0)) == REG
+ && REGNO (XEXP (OP (op), 0)) == SP_REG)
+ || (GET_CODE (XEXP (OP (op), 0)) == PLUS
+ && REGNO (XEXP (XEXP (OP (op), 0), 0)) == SP_REG)))
+ {
+ emit_insn_before (gen_movhi (HL, gen_rtx_REG (HImode, SP_REG)), insn);
+ OP (op) = replace_rtx (OP (op), gen_rtx_REG (HImode, SP_REG), HL);
+ }
+ if (replace_in_op0)
+ OP (0) = OP (op);
+ if (replace_in_op1)
+ OP (1) = OP (op);
+ break;
+ case 1:
+ OP (op) = transcode_memory_rtx (OP (op), DE, insn);
+ break;
+ case 2:
+ OP (op) = transcode_memory_rtx (OP (op), BC, insn);
+ break;
+ }
+ which ++;
+ }
+ }
+
+ MUST_BE_OK (insn);
+}
+
+/* Scan all insns and devirtualize them. */
+static void
+rl78_alloc_physical_registers (void)
+{
+ /* During most of the compile, gcc is dealing with virtual
+ registers. At this point, we need to assign physical registers
+ to the vitual ones, and copy in/out as needed. */
+
+ rtx insn, curr;
+ enum attr_valloc valloc_method;
+
+ for (insn = get_insns (); insn; insn = curr)
+ {
+ int i;
+
+ curr = next_nonnote_nondebug_insn (insn);
+
+ if (INSN_P (insn)
+ && (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CALL)
+ && INSN_CODE (insn) == -1)
+ {
+ if (GET_CODE (SET_SRC (PATTERN (insn))) == ASM_OPERANDS)
+ continue;
+ i = recog (PATTERN (insn), insn, 0);
+ if (i == -1)
+ {
+ debug_rtx (insn);
+ gcc_unreachable ();
+ }
+ INSN_CODE (insn) = i;
+ }
+ }
+
+ cfun->machine->virt_insns_ok = 0;
+ cfun->machine->real_insns_ok = 1;
+
+ clear_content_memory ();
+
+ for (insn = get_insns (); insn; insn = curr)
+ {
+ rtx pattern;
+
+ curr = insn ? next_nonnote_nondebug_insn (insn) : NULL;
+
+ if (!INSN_P (insn))
+ {
+ if (LABEL_P (insn))
+ clear_content_memory ();
+
+ continue;
+ }
+
+ if (dump_file)
+ fprintf (dump_file, "Converting insn %d\n", INSN_UID (insn));
+
+ pattern = PATTERN (insn);
+ if (GET_CODE (pattern) == PARALLEL)
+ pattern = XVECEXP (pattern, 0, 0);
+ if (JUMP_P (insn) || CALL_P (insn) || GET_CODE (pattern) == CALL)
+ clear_content_memory ();
+ if (GET_CODE (pattern) != SET
+ && GET_CODE (pattern) != CALL)
+ continue;
+ if (GET_CODE (pattern) == SET
+ && GET_CODE (SET_SRC (pattern)) == ASM_OPERANDS)
+ continue;
+
+ valloc_method = get_attr_valloc (insn);
+
+ PATTERN (insn) = copy_rtx_if_shared (PATTERN (insn));
+
+ if (valloc_method == VALLOC_MACAX)
+ {
+ record_content (AX, NULL_RTX);
+ record_content (BC, NULL_RTX);
+ record_content (DE, NULL_RTX);
+ }
+
+ if (insn_ok_now (insn))
+ continue;
+
+ INSN_CODE (insn) = -1;
+
+ if (RTX_FRAME_RELATED_P (insn))
+ virt_insn_was_frame = 1;
+ else
+ virt_insn_was_frame = 0;
+
+ switch (valloc_method)
+ {
+ case VALLOC_OP1:
+ rl78_alloc_physical_registers_op1 (insn);
+ break;
+ case VALLOC_OP2:
+ rl78_alloc_physical_registers_op2 (insn);
+ break;
+ case VALLOC_RO1:
+ rl78_alloc_physical_registers_ro1 (insn);
+ break;
+ case VALLOC_CMP:
+ rl78_alloc_physical_registers_cmp (insn);
+ break;
+ case VALLOC_UMUL:
+ rl78_alloc_physical_registers_umul (insn);
+ break;
+ case VALLOC_MACAX:
+ /* Macro that clobbers AX. */
+ rl78_alloc_address_registers_macax (insn);
+ record_content (AX, NULL_RTX);
+ record_content (BC, NULL_RTX);
+ record_content (DE, NULL_RTX);
+ break;
+ }
+
+ if (JUMP_P (insn) || CALL_P (insn) || GET_CODE (pattern) == CALL)
+ clear_content_memory ();
+ else
+ process_postponed_content_update ();
+ }
+
+#if DEBUG_ALLOC
+ fprintf (stderr, "\033[0m");
+#endif
+}
+
+/* Add REG_DEAD notes using DEAD[reg] for rtx S which is part of INSN.
+ This function scans for uses of registers; the last use (i.e. first
+ encounter when scanning backwards) triggers a REG_DEAD note if the
+ reg was previously in DEAD[]. */
+static void
+rl78_note_reg_uses (char *dead, rtx s, rtx insn)
+{
+ const char *fmt;
+ int i, r;
+ enum rtx_code code;
+
+ if (!s)
+ return;
+
+ code = GET_CODE (s);
+
+ switch (code)
+ {
+ /* Compare registers by number. */
+ case REG:
+ r = REGNO (s);
+ if (dump_file)
+ {
+ fprintf (dump_file, "note use reg %d size %d on insn %d\n",
+ r, GET_MODE_SIZE (GET_MODE (s)), INSN_UID (insn));
+ print_rtl_single (dump_file, s);
+ }
+ if (dead [r])
+ add_reg_note (insn, REG_DEAD, gen_rtx_REG (GET_MODE (s), r));
+ for (i = 0; i < GET_MODE_SIZE (GET_MODE (s)); i ++)
+ dead [r + i] = 0;
+ return;
+
+ /* These codes have no constituent expressions
+ and are unique. */
+ case SCRATCH:
+ case CC0:
+ case PC:
+ return;
+
+ case CONST_INT:
+ case CONST_VECTOR:
+ case CONST_DOUBLE:
+ case CONST_FIXED:
+ /* These are kept unique for a given value. */
+ return;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = XVECLEN (s, i) - 1; j >= 0; j--)
+ rl78_note_reg_uses (dead, XVECEXP (s, i, j), insn);
+ }
+ else if (fmt[i] == 'e')
+ rl78_note_reg_uses (dead, XEXP (s, i), insn);
+ }
+}
+
+/* Like the previous function, but scan for SETs instead. */
+static void
+rl78_note_reg_set (char *dead, rtx d, rtx insn)
+{
+ int r, i;
+
+ if (GET_CODE (d) != REG)
+ return;
+
+ r = REGNO (d);
+ if (dead [r])
+ add_reg_note (insn, REG_UNUSED, gen_rtx_REG (GET_MODE (d), r));
+ if (dump_file)
+ fprintf (dump_file, "note set reg %d size %d\n", r, GET_MODE_SIZE (GET_MODE (d)));
+ for (i = 0; i < GET_MODE_SIZE (GET_MODE (d)); i ++)
+ dead [r + i] = 1;
+}
+
+/* This is a rather crude register death pass. Death status is reset
+ at every jump or call insn. */
+static void
+rl78_calculate_death_notes (void)
+{
+ char dead[FIRST_PSEUDO_REGISTER];
+ rtx insn, p, s, d;
+ int i;
+
+ memset (dead, 0, sizeof (dead));
+
+ for (insn = get_last_insn ();
+ insn;
+ insn = prev_nonnote_nondebug_insn (insn))
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "\n--------------------------------------------------");
+ fprintf (dump_file, "\nDead:");
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i ++)
+ if (dead[i])
+ fprintf (dump_file, " %s", reg_names[i]);
+ fprintf (dump_file, "\n");
+ print_rtl_single (dump_file, insn);
+ }
+
+ switch (GET_CODE (insn))
+ {
+ case INSN:
+ p = PATTERN (insn);
+ switch (GET_CODE (p))
+ {
+ case SET:
+ s = SET_SRC (p);
+ d = SET_DEST (p);
+ rl78_note_reg_set (dead, d, insn);
+ rl78_note_reg_uses (dead, s, insn);
+ break;
+
+ case USE:
+ rl78_note_reg_uses (dead, p, insn);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case JUMP_INSN:
+ if (INSN_CODE (insn) == CODE_FOR_rl78_return)
+ {
+ memset (dead, 1, sizeof (dead));
+ /* We expect a USE just prior to this, which will mark
+ the actual return registers. The USE will have a
+ death note, but we aren't going to be modifying it
+ after this pass. */
+ break;
+ }
+ case CALL_INSN:
+ memset (dead, 0, sizeof (dead));
+ break;
+
+ default:
+ break;
+ }
+ if (dump_file)
+ print_rtl_single (dump_file, insn);
+ }
+}
+
+/* Helper function to reset the origins in RP and the age in AGE for
+ all registers. */
+static void
+reset_origins (int *rp, int *age)
+{
+ int i;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ rp[i] = i;
+ age[i] = 0;
+ }
+}
+
+/* The idea behind this optimization is to look for cases where we
+ move data from A to B to C, and instead move from A to B, and A to
+ C. If B is a virtual register or memory, this is a big win on its
+ own. If B turns out to be unneeded after this, it's a bigger win.
+ For each register, we try to determine where it's value originally
+ came from, if it's propogated purely through moves (and not
+ computes). The ORIGINS[] array has the regno for the "origin" of
+ the value in the [regno] it's indexed by. */
+static void
+rl78_propogate_register_origins (void)
+{
+ int origins[FIRST_PSEUDO_REGISTER];
+ int age[FIRST_PSEUDO_REGISTER];
+ int i;
+ rtx insn, ninsn = NULL_RTX;
+ rtx pat;
+
+ reset_origins (origins, age);
+
+ for (insn = get_insns (); insn; insn = ninsn)
+ {
+ ninsn = next_nonnote_nondebug_insn (insn);
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "\n");
+ fprintf (dump_file, "Origins:");
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i ++)
+ if (origins[i] != i)
+ fprintf (dump_file, " r%d=r%d", i, origins[i]);
+ fprintf (dump_file, "\n");
+ print_rtl_single (dump_file, insn);
+ }
+
+ switch (GET_CODE (insn))
+ {
+ case CODE_LABEL:
+ case BARRIER:
+ case CALL_INSN:
+ case JUMP_INSN:
+ reset_origins (origins, age);
+ break;
+
+ default:
+ break;
+
+ case INSN:
+ pat = PATTERN (insn);
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ rtx clobber = XVECEXP (pat, 0, 1);
+ pat = XVECEXP (pat, 0, 0);
+ if (GET_CODE (clobber) == CLOBBER
+ && GET_CODE (XEXP (clobber, 0)) == REG)
+ {
+ int cr = REGNO (XEXP (clobber, 0));
+ int mb = GET_MODE_SIZE (GET_MODE (XEXP (clobber, 0)));
+ if (dump_file)
+ fprintf (dump_file, "reset origins of %d regs at %d\n", mb, cr);
+ for (i = 0; i < mb; i++)
+ {
+ origins[cr + i] = cr + i;
+ age[cr + i] = 0;
+ }
+ }
+ else
+ break;
+ }
+
+ if (GET_CODE (pat) == SET)
+ {
+ rtx src = SET_SRC (pat);
+ rtx dest = SET_DEST (pat);
+ int mb = GET_MODE_SIZE (GET_MODE (dest));
+
+ if (GET_CODE (dest) == REG)
+ {
+ int dr = REGNO (dest);
+
+ if (GET_CODE (src) == REG)
+ {
+ int sr = REGNO (src);
+ int same = 1;
+ int best_age, best_reg;
+
+ /* See if the copy is not needed. */
+ for (i = 0; i < mb; i ++)
+ if (origins[dr + i] != origins[sr + i])
+ same = 0;
+ if (same)
+ {
+ if (dump_file)
+ fprintf (dump_file, "deleting because dest already has correct value\n");
+ delete_insn (insn);
+ break;
+ }
+
+ if (dr < 8 || sr >= 8)
+ {
+ int ar;
+
+ best_age = -1;
+ best_reg = -1;
+ /* See if the copy can be made from another
+ bank 0 register instead, instead of the
+ virtual src register. */
+ for (ar = 0; ar < 8; ar += mb)
+ {
+ same = 1;
+ for (i = 0; i < mb; i ++)
+ if (origins[ar + i] != origins[sr + i])
+ same = 0;
+
+ /* The chip has some reg-reg move limitations. */
+ if (mb == 1 && dr > 3)
+ same = 0;
+
+ if (same)
+ {
+ if (best_age == -1 || best_age > age[sr + i])
+ {
+ best_age = age[sr + i];
+ best_reg = sr;
+ }
+ }
+ }
+
+ if (best_reg != -1)
+ {
+ /* FIXME: copy debug info too. */
+ SET_SRC (pat) = gen_rtx_REG (GET_MODE (src), best_reg);
+ sr = best_reg;
+ }
+ }
+
+ for (i = 0; i < mb; i++)
+ {
+ origins[dr + i] = origins[sr + i];
+ age[dr + i] = age[sr + i] + 1;
+ }
+ }
+ else
+ {
+ /* The destination is computed, its origin is itself. */
+ if (dump_file)
+ fprintf (dump_file, "resetting origin of r%d for %d byte%s\n",
+ dr, mb, mb == 1 ? "" : "s");
+ for (i = 0; i < mb; i ++)
+ {
+ origins[dr + i] = dr + i;
+ age[dr + i] = 0;
+ }
+ }
+
+ /* Any registers marked with that reg as an origin are reset. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (origins[i] >= dr && origins[i] < dr + mb)
+ {
+ origins[i] = i;
+ age[i] = 0;
+ }
+ }
+
+ /* Special case - our ADDSI3 macro uses AX and sometimes BC. */
+ if (get_attr_valloc (insn) == VALLOC_MACAX)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Resetting origin of AX/BC for macro.\n");
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (i <= 3 || origins[i] <= 3)
+ {
+ origins[i] = i;
+ age[i] = 0;
+ }
+ }
+
+ if (GET_CODE (src) == ASHIFT
+ || GET_CODE (src) == ASHIFTRT
+ || GET_CODE (src) == LSHIFTRT)
+ {
+ rtx count = XEXP (src, 1);
+ if (GET_CODE (count) == REG)
+ {
+ /* Special case - our pattern clobbers the count register. */
+ int r = REGNO (count);
+ if (dump_file)
+ fprintf (dump_file, "Resetting origin of r%d for shift.\n", r);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (i == r || origins[i] == r)
+ {
+ origins[i] = i;
+ age[i] = 0;
+ }
+ }
+ }
+ }
+ else if (GET_CODE (pat) == CLOBBER
+ && GET_CODE (XEXP (pat, 0)) == REG)
+ {
+ if (REG_P (XEXP (pat, 0)))
+ {
+ unsigned int reg = REGNO (XEXP (pat, 0));
+
+ origins[reg] = reg;
+ age[reg] = 0;
+ }
+ }
+ }
+ }
+}
+
+/* Remove any SETs where the destination is unneeded. */
+static void
+rl78_remove_unused_sets (void)
+{
+ rtx insn, ninsn = NULL_RTX;
+ rtx dest;
+
+ for (insn = get_insns (); insn; insn = ninsn)
+ {
+ ninsn = next_nonnote_nondebug_insn (insn);
+
+ if ((insn = single_set (insn)) == NULL_RTX)
+ continue;
+
+ dest = SET_DEST (insn);
+
+ if (GET_CODE (dest) != REG || REGNO (dest) > 23)
+ continue;
+
+ if (find_regno_note (insn, REG_UNUSED, REGNO (dest)))
+ delete_insn (insn);
+ }
+}
+
+/* This is the top of the devritualization pass. */
+static void
+rl78_reorg (void)
+{
+ /* split2 only happens when optimizing, but we need all movSIs to be
+ split now. */
+ if (optimize <= 0)
+ split_all_insns ();
+
+ rl78_alloc_physical_registers ();
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "\n================DEVIRT:=AFTER=ALLOC=PHYSICAL=REGISTERS================\n");
+ print_rtl_with_bb (dump_file, get_insns (), 0);
+ }
+
+ rl78_propogate_register_origins ();
+ rl78_calculate_death_notes ();
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "\n================DEVIRT:=AFTER=PROPOGATION=============================\n");
+ print_rtl_with_bb (dump_file, get_insns (), 0);
+ fprintf (dump_file, "\n======================================================================\n");
+ }
+
+ rl78_remove_unused_sets ();
+
+ /* The code after devirtualizing has changed so much that at this point
+ we might as well just rescan everything. Note that
+ df_rescan_all_insns is not going to help here because it does not
+ touch the artificial uses and defs. */
+ df_finish_pass (true);
+ if (optimize > 1)
+ df_live_add_problem ();
+ df_scan_alloc (NULL);
+ df_scan_blocks ();
+
+ if (optimize)
+ df_analyze ();
+}
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY rl78_return_in_memory
+
+static bool
+rl78_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ const HOST_WIDE_INT size = int_size_in_bytes (type);
+ return (size == -1 || size > 8);
+}
+
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS rl78_rtx_costs
+
+static bool rl78_rtx_costs (rtx x,
+ int code,
+ int outer_code ATTRIBUTE_UNUSED,
+ int opno ATTRIBUTE_UNUSED,
+ int * total,
+ bool speed ATTRIBUTE_UNUSED)
+{
+ if (code == IF_THEN_ELSE)
+ return COSTS_N_INSNS (10);
+ if (GET_MODE (x) == SImode)
+ {
+ switch (code)
+ {
+ case MULT:
+ if (RL78_MUL_RL78)
+ *total = COSTS_N_INSNS (14);
+ else if (RL78_MUL_G13)
+ *total = COSTS_N_INSNS (29);
+ else
+ *total = COSTS_N_INSNS (500);
+ return true;
+ case PLUS:
+ *total = COSTS_N_INSNS (8);
+ return true;
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0: *total = COSTS_N_INSNS (0); break;
+ case 1: *total = COSTS_N_INSNS (6); break;
+ case 2: case 3: case 4: case 5: case 6: case 7:
+ *total = COSTS_N_INSNS (10); break;
+ case 8: *total = COSTS_N_INSNS (6); break;
+ case 9: case 10: case 11: case 12: case 13: case 14: case 15:
+ *total = COSTS_N_INSNS (10); break;
+ case 16: *total = COSTS_N_INSNS (3); break;
+ case 17: case 18: case 19: case 20: case 21: case 22: case 23:
+ *total = COSTS_N_INSNS (4); break;
+ case 24: *total = COSTS_N_INSNS (4); break;
+ case 25: case 26: case 27: case 28: case 29: case 30: case 31:
+ *total = COSTS_N_INSNS (5); break;
+ }
+ }
+ else
+ *total = COSTS_N_INSNS (10+4*16);
+ return true;
+ }
+ }
+ return false;
+}
+
+
+#undef TARGET_UNWIND_WORD_MODE
+#define TARGET_UNWIND_WORD_MODE rl78_unwind_word_mode
+
+static enum machine_mode
+rl78_unwind_word_mode (void)
+{
+ return HImode;
+}
+
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-rl78.h"
diff --git a/gcc-4.9/gcc/config/rl78/rl78.h b/gcc-4.9/gcc/config/rl78/rl78.h
new file mode 100644
index 000000000..8dee92be9
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/rl78.h
@@ -0,0 +1,473 @@
+/* GCC backend definitions for the Renesas RL78 processor.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+#define RL78_MUL_NONE (rl78_mul_type == MUL_NONE)
+#define RL78_MUL_RL78 (rl78_mul_type == MUL_RL78)
+#define RL78_MUL_G13 (rl78_mul_type == MUL_G13)
+
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__RL78__"); \
+ builtin_assert ("cpu=RL78"); \
+ if (RL78_MUL_RL78) \
+ builtin_define ("__RL78_MUL_RL78__"); \
+ if (RL78_MUL_G13) \
+ builtin_define ("__RL78_MUL_G13__"); \
+ if (TARGET_G10) \
+ builtin_define ("__RL78_G10__"); \
+ } \
+ while (0)
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{pg:gcrt0.o%s}%{!pg:crt0.o%s} crtbegin.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{mrelax:-relax} \
+%{mg10} \
+"
+
+#undef LINK_SPEC
+#define LINK_SPEC "\
+%{mrelax:-relax} \
+%{!r:--gc-sections} \
+"
+
+#undef LIB_SPEC
+#define LIB_SPEC " \
+--start-group \
+-lc \
+-lsim \
+%{fprofile-arcs|fprofile-generate|coverage:-lgcov} \
+--end-group \
+%{!T*: %{msim:%Trl78-sim.ld}%{!msim:%Trl78.ld}} \
+"
+
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN 0
+#define WORDS_BIG_ENDIAN 0
+
+#ifdef IN_LIBGCC2
+/* This is to get correct SI and DI modes in libgcc2.c (32 and 64 bits). */
+#define UNITS_PER_WORD 4
+/* We have a problem with libgcc2. It only defines two versions of
+ each function, one for "int" and one for "long long". Ie it assumes
+ that "sizeof (int) == sizeof (long)". For the RL78 this is not true
+ and we need a third set of functions. We explicitly define
+ LIBGCC2_UNITS_PER_WORD here so that it is clear that we are expecting
+ to get the SI and DI versions from the libgcc2.c sources, and we
+ provide our own set of HI functions, which is why this
+ definition is surrounded by #ifndef..#endif. */
+#ifndef LIBGCC2_UNITS_PER_WORD
+#define LIBGCC2_UNITS_PER_WORD 4
+#endif
+#else
+/* Actual width of a word, in units (bytes). */
+#define UNITS_PER_WORD 1
+#endif
+
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 16
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 32 /*64*/
+#define LONG_DOUBLE_TYPE_SIZE 64 /*DOUBLE_TYPE_SIZE*/
+
+#define LIBGCC2_HAS_DF_MODE 1
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define STRICT_ALIGNMENT 1
+#define FUNCTION_BOUNDARY 8
+#define BIGGEST_ALIGNMENT 16
+#define STACK_BOUNDARY 16
+#define PARM_BOUNDARY 16
+
+#define STACK_GROWS_DOWNWARD 1
+#define FRAME_GROWS_DOWNWARD 1
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+#define MAX_REGS_PER_ADDRESS 1
+
+#define Pmode HImode
+#define POINTER_SIZE 16
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+#define POINTERS_EXTEND_UNSIGNED 1
+#define FUNCTION_MODE HImode
+#define CASE_VECTOR_MODE Pmode
+#define WORD_REGISTER_OPERATIONS 0
+#define HAS_LONG_COND_BRANCH 0
+#define HAS_LONG_UNCOND_BRANCH 0
+
+#define MOVE_MAX 2
+#define STARTING_FRAME_OFFSET 0
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+#define ADDR_SPACE_FAR 1
+
+#define HAVE_PRE_DECCREMENT 0
+#define HAVE_POST_INCREMENT 0
+
+#define MOVE_RATIO(SPEED) ((SPEED) ? 24 : 16)
+#define SLOW_BYTE_ACCESS 0
+
+#define STORE_FLAG_VALUE 1
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+#define SHORT_IMMEDIATES_SIGN_EXTEND 0
+
+
+/* The RL78 has four register banks. Normal operation uses RB0 as
+ real registers, RB1 and RB2 as "virtual" registers (because we know
+ they'll be there, and not used as variables), and RB3 is reserved
+ for interrupt handlers. The virtual registers are accessed as
+ SADDRs:
+
+ FFEE0-FFEE7 RB0
+ FFEE8-FFEEF RB1
+ FFEF0-FFEF7 RB2
+ FFEF8-FFEFF RB3
+*/
+#define REGISTER_NAMES \
+ { \
+ "x", "a", "c", "b", "e", "d", "l", "h", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", \
+ "sp", "ap", "psw", "es", "cs" \
+ }
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+{ "ax", 0 }, \
+{ "bc", 2 }, \
+{ "de", 4 }, \
+{ "hl", 6 }, \
+{ "rp0", 0 }, \
+{ "rp1", 2 }, \
+{ "rp2", 4 }, \
+{ "rp3", 6 }, \
+{ "r0", 0 }, \
+{ "r1", 1 }, \
+{ "r2", 2 }, \
+{ "r3", 3 }, \
+{ "r4", 4 }, \
+{ "r5", 5 }, \
+{ "r6", 6 }, \
+{ "r7", 7 }, \
+}
+
+enum reg_class
+{
+ NO_REGS, /* No registers in set. */
+ XREG,
+ AREG,
+ AXREG,
+ CREG,
+ BREG,
+ BCREG,
+ EREG,
+ DREG,
+ DEREG,
+ LREG,
+ HREG,
+ HLREG,
+ IDX_REGS,
+ QI_REGS,
+ SPREG,
+ R8W_REGS,
+ R10W_REGS,
+ INT_REGS,
+ V_REGS, /* Virtual registers. */
+ GR_REGS, /* Integer registers. */
+ PSWREG,
+ ALL_REGS, /* All registers. */
+ LIM_REG_CLASSES /* Max value + 1. */
+};
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "XREG", \
+ "AREG", \
+ "AXREG", \
+ "CREG", \
+ "BREG", \
+ "BCREG", \
+ "EREG", \
+ "DREG", \
+ "DEREG", \
+ "LREG", \
+ "HREG", \
+ "HLREG", \
+ "IDX_REGS", \
+ "QI_REGS", \
+ "SPREG", \
+ "R8W_REGS", \
+ "R10W_REGS", \
+ "INT_REGS", \
+ "V_REGS", \
+ "GR_REGS", \
+ "PSWREG", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000 }, /* No registers, */ \
+ { 0x00000001, 0x00000000 }, \
+ { 0x00000002, 0x00000000 }, \
+ { 0x00000003, 0x00000000 }, \
+ { 0x00000004, 0x00000000 }, \
+ { 0x00000008, 0x00000000 }, \
+ { 0x0000000c, 0x00000000 }, \
+ { 0x00000010, 0x00000000 }, \
+ { 0x00000020, 0x00000000 }, \
+ { 0x00000030, 0x00000000 }, \
+ { 0x00000040, 0x00000000 }, \
+ { 0x00000080, 0x00000000 }, \
+ { 0x000000c0, 0x00000000 }, \
+ { 0x0000000c, 0x00000000 }, /* B and C - index regs. */ \
+ { 0x000000ff, 0x00000000 }, /* all real registers. */ \
+ { 0x00000000, 0x00000001 }, /* SP */ \
+ { 0x00000300, 0x00000000 }, /* R8 - HImode */ \
+ { 0x00000c00, 0x00000000 }, /* R10 - HImode */ \
+ { 0xff000000, 0x00000000 }, /* INT - HImode */ \
+ { 0xff7fff00, 0x00000000 }, /* Virtual registers. */ \
+ { 0xff7fffff, 0x00000002 }, /* General registers. */ \
+ { 0x04000000, 0x00000004 }, /* PSW. */ \
+ { 0xff7fffff, 0x0000001f } /* All registers. */ \
+}
+
+#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P hook_bool_mode_true
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+#define CLASS_MAX_NREGS(CLASS, MODE) ((GET_MODE_SIZE (MODE) \
+ + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+#define GENERAL_REGS GR_REGS
+#define BASE_REG_CLASS V_REGS
+#define INDEX_REG_CLASS V_REGS
+
+#define FIRST_PSEUDO_REGISTER 37
+
+#define REGNO_REG_CLASS(REGNO) ((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? GR_REGS : NO_REGS)
+
+#define FRAME_POINTER_REGNUM 22
+#define STACK_POINTER_REGNUM 32
+#define ARG_POINTER_REGNUM 33
+#define CC_REGNUM 34
+#define FUNC_RETURN_REGNUM 8
+#define STATIC_CHAIN_REGNUM 14
+
+/* Trampolines are implemented with a separate data stack. The memory
+ on stack only holds the function pointer for the chosen stub.
+ */
+
+#define TRAMPOLINE_SIZE 4
+#define TRAMPOLINE_ALIGNMENT 16
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM }, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = rl78_initial_elimination_offset ((FROM), (TO))
+
+
+#define FUNCTION_ARG_REGNO_P(N) 0
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 8)
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#define FIXED_REGISTERS \
+{ \
+ 1,1,1,1, 1,1,1,1, \
+ 0,0,0,0, 0,0,0,0, \
+ 0,0,0,0, 0,0,1,1, \
+ 1,1,1,1, 1,1,1,1, \
+ 0, 1, 0, 1, 1 \
+}
+
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, 1,1,1,1, \
+ 1,1,1,1, 1,1,1,1, \
+ 0,0,0,0, 0,0,1,1, \
+ 1,1,1,1, 1,1,1,1, \
+ 0, 1, 1, 1, 1 \
+}
+
+#define LIBCALL_VALUE(MODE) \
+ gen_rtx_REG ((MODE), \
+ FUNC_RETURN_REGNUM)
+
+/* Order of allocation of registers. */
+
+#define REG_ALLOC_ORDER \
+ { 8, 9, 10, 11, 12, 13, 14, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 0, 1, 6, 7, 2, 3, 4, 5, \
+ 24, 25, 26, 27, 28, 29, 30, 31, \
+ 32, 33, 34 \
+}
+
+#define REGNO_IN_RANGE(REGNO, MIN, MAX) \
+ (IN_RANGE ((REGNO), (MIN), (MAX)) \
+ || (reg_renumber != NULL \
+ && reg_renumber[(REGNO)] >= (MIN) \
+ && reg_renumber[(REGNO)] <= (MAX)))
+
+#ifdef REG_OK_STRICT
+#define REGNO_OK_FOR_BASE_P(regno) REGNO_IN_RANGE (regno, 16, 31)
+#else
+#define REGNO_OK_FOR_BASE_P(regno) 1
+#endif
+
+#define REGNO_OK_FOR_INDEX_P(regno) REGNO_OK_FOR_BASE_P (regno)
+
+#define REGNO_MODE_CODE_OK_FOR_BASE_P(regno, mode, address_space, outer_code, index_code) \
+ rl78_regno_mode_code_ok_for_base_p (regno, mode, address_space, outer_code, index_code)
+
+#define MODE_CODE_BASE_REG_CLASS(mode, address_space, outer_code, index_code) \
+ rl78_mode_code_base_reg_class (mode, address_space, outer_code, index_code)
+
+#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) \
+ ((COUNT) == 0 \
+ ? gen_rtx_MEM (Pmode, gen_rtx_PLUS (HImode, arg_pointer_rtx, GEN_INT (-4))) \
+ : NULL_RTX)
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_MEM (Pmode, stack_pointer_rtx)
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+typedef unsigned int CUMULATIVE_ARGS;
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ (CUM) = 0
+
+
+/* FIXME */
+#define NO_PROFILE_COUNTERS 1
+#define PROFILE_BEFORE_PROLOGUE 1
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tbsr\t__mcount\n");
+
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ rl78_hard_regno_nregs (REGNO, MODE)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ rl78_hard_regno_mode_ok (REGNO, MODE)
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ( ( GET_MODE_CLASS (MODE1) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE1) == MODE_COMPLEX_FLOAT) \
+ == ( GET_MODE_CLASS (MODE2) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE2) == MODE_COMPLEX_FLOAT))
+
+
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+#define CTORS_SECTION_ASM_OP ".section \".ctors\",\"a\""
+#define DTORS_SECTION_ASM_OP ".section \".dtors\",\"a\""
+
+#define ASM_COMMENT_START " ;"
+#define ASM_APP_ON ""
+#define ASM_APP_OFF ""
+#define LOCAL_LABEL_PREFIX ".L"
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+#define GLOBAL_ASM_OP "\t.global\t"
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\t.long .L%d\n", VALUE)
+
+/* This is how to output an element of a case-vector that is relative.
+ Note: The local label referenced by the "3b" below is emitted by
+ the tablejump insn. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t.long .L%d - 1b\n", VALUE)
+
+
+#define ASM_OUTPUT_ALIGN(STREAM, LOG) \
+ do \
+ { \
+ if ((LOG) == 0) \
+ break; \
+ fprintf (STREAM, "\t.balign %d\n", 1 << (LOG)); \
+ } \
+ while (0)
+
+/* For PIC put jump tables into the text section so that the offsets that
+ they contain are always computed between two same-section symbols. */
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+/* This is a version of REG_P that also returns TRUE for SUBREGs. */
+#define RL78_REG_P(rtl) (REG_P (rtl) || GET_CODE (rtl) == SUBREG)
+
+/* Like REG_P except that this macro is true for SET expressions. */
+#define SET_P(rtl) (GET_CODE (rtl) == SET)
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+#undef DWARF2_ADDR_SIZE
+#define DWARF2_ADDR_SIZE 4
+
+#define DWARF2_ASM_LINE_DEBUG_INFO 1
+
+#define EXIT_IGNORE_STACK 0
+#define INCOMING_FRAME_SP_OFFSET 4
+
+
+#define BRANCH_COST(SPEED,PREDICT) 1
+#define REGISTER_MOVE_COST(MODE,FROM,TO) 2
+
+#define EH_RETURN_DATA_REGNO(N) (N < 2 ? (8+(N)*2) : INVALID_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (HImode, 20)
+
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) DW_EH_PE_udata4
+
+/* NOTE: defined but zero means dwarf2 debugging, but sjlj EH. */
+#define DWARF2_UNWIND_INFO 0
+
+#define REGISTER_TARGET_PRAGMAS() rl78_register_pragmas()
diff --git a/gcc-4.9/gcc/config/rl78/rl78.md b/gcc-4.9/gcc/config/rl78/rl78.md
new file mode 100644
index 000000000..eb4c468ca
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/rl78.md
@@ -0,0 +1,443 @@
+;; Machine Description for Renesas RL78 processors
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_constants
+ [
+ (AX_REG 0)
+ (X_REG 0)
+ (A_REG 1)
+ (BC_REG 2)
+ (C_REG 2)
+ (B_REG 3)
+ (DE_REG 4)
+ (E_REG 4)
+ (D_REG 5)
+ (HL_REG 6)
+ (L_REG 6)
+ (H_REG 7)
+
+ (FP_REG 22)
+ (SP_REG 32)
+ (CC_REG 34)
+ (ES_REG 35)
+ (CS_REG 36)
+
+ (UNS_PROLOG 1)
+ (UNS_EPILOG 1)
+ (UNS_RETI 2)
+ (UNS_RETB 3)
+
+ (UNS_SET_RB 10)
+ (UNS_ES_ADDR 11)
+
+ (UNS_TRAMPOLINE_INIT 20)
+ (UNS_TRAMPOLINE_UNINIT 21)
+ (UNS_NONLOCAL_GOTO 22)
+
+ ])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ )
+
+(define_mode_iterator QHI [QI HI])
+
+(include "predicates.md")
+(include "constraints.md")
+(include "rl78-expand.md")
+(include "rl78-virt.md")
+(include "rl78-real.md")
+
+
+;; Function Prologue/Epilogue Instructions
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "rl78_expand_prologue (); DONE;"
+)
+
+(define_expand "epilogue"
+ [(const_int 0)]
+ ""
+ "rl78_expand_epilogue (); DONE;"
+)
+
+(define_expand "sibcall_epilogue"
+ [(return)]
+ ""
+ "FAIL;"
+)
+
+(define_insn "rl78_return"
+ [(return)]
+ ""
+ "ret"
+)
+
+(define_insn "interrupt_return"
+ [(unspec_volatile [(return)] UNS_RETI) ]
+ ""
+ "reti"
+)
+
+(define_insn "brk_interrupt_return"
+ [(unspec_volatile [(return)] UNS_RETB) ]
+ ""
+ "retb"
+)
+
+(define_expand "eh_return"
+ [(match_operand:HI 0 "" "")]
+ ""
+ "rl78_expand_eh_epilogue (operands[0]);
+ emit_barrier ();
+ DONE;"
+)
+
+;; These are used only by prologue/epilogue so it's "safe" to pass
+;; virtual registers.
+(define_insn "push"
+ [(set (reg:HI SP_REG)
+ (plus:HI (reg:HI SP_REG)
+ (const_int -2)))
+ (set (mem:HI (reg:HI SP_REG))
+ (match_operand:HI 0 "register_operand" "ABDT,vZint"))]
+ ""
+ "@
+ push\t%v0
+ push\t%v0 ; %0"
+)
+
+(define_insn "pop"
+ [(set (match_operand:HI 0 "register_operand" "=ABDT,vZint")
+ (mem:HI (reg:HI SP_REG)))
+ (set (reg:HI SP_REG)
+ (plus:HI (reg:HI SP_REG)
+ (const_int 2)))]
+ ""
+ "@
+ pop\t%v0
+ pop\t%v0 ; %0"
+)
+
+(define_insn "sel_rb"
+ [(unspec_volatile [(match_operand 0 "immediate_operand" "")] UNS_SET_RB)]
+ "!TARGET_G10"
+ "sel\trb%u0"
+ )
+
+(define_insn "trampoline_init"
+ [(set (match_operand 0 "register_operand" "=Z08W")
+ (unspec_volatile [(match_operand 1 "register_operand" "Z08W")
+ (match_operand 2 "register_operand" "Z10W")
+ ] UNS_TRAMPOLINE_INIT))
+ ]
+ ""
+ "call !!___trampoline_init ; %0 <= %1 %2"
+ )
+
+(define_insn "trampoline_uninit"
+ [(unspec_volatile [(const_int 0)] UNS_TRAMPOLINE_UNINIT)
+ ]
+ ""
+ "call !!___trampoline_uninit"
+ )
+
+;; GCC restores $fp *before* using it to access values on the *old*
+;; frame. So, we do it ourselves, to ensure this is not the case.
+;; Note that while %1 is usually a label_ref, we allow for a
+;; non-immediate as well.
+(define_expand "nonlocal_goto"
+ [(set (pc)
+ (unspec_volatile [(match_operand 0 "" "") ;; fp (ignore)
+ (match_operand 1 "" "vi") ;; target
+ (match_operand 2 "" "vi") ;; sp
+ (match_operand 3 "" "vi") ;; ?
+ ] UNS_NONLOCAL_GOTO))
+ ]
+ ""
+ "emit_jump_insn (gen_nonlocal_goto_insn (operands[0], operands[1], operands[2], operands[3]));
+ emit_barrier ();
+ DONE;"
+ )
+
+(define_insn "nonlocal_goto_insn"
+ [(set (pc)
+ (unspec_volatile [(match_operand 0 "" "") ;; fp (ignore)
+ (match_operand 1 "" "vi") ;; target
+ (match_operand 2 "" "vi") ;; sp
+ (match_operand 3 "" "vi") ;; ?
+ ] UNS_NONLOCAL_GOTO))
+ ]
+ ""
+ "; nonlocal goto
+ movw ax, %3
+ movw r22, ax
+ movw ax, %2
+ movw sp, ax
+ movw ax, %1
+ br ax
+"
+ )
+
+;;======================================================================
+;;
+;; "macro" insns - cases where inline chunks of code are more
+;; efficient than anything else.
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=&vm")
+ (plus:SI (match_operand:SI 1 "general_operand" "vim")
+ (match_operand 2 "general_operand" "vim")))
+ ]
+ ""
+ "emit_insn (gen_addsi3_internal_virt (operands[0], operands[1], operands[2]));
+ DONE;"
+)
+
+(define_insn "addsi3_internal_virt"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=v,&vm, vm")
+ (plus:SI (match_operand:SI 1 "general_operand" "0, vim, vim")
+ (match_operand 2 "general_operand" "vim,vim,vim")))
+ (clobber (reg:HI AX_REG))
+ (clobber (reg:HI BC_REG))
+ ]
+ "rl78_virt_insns_ok ()"
+ ""
+ [(set_attr "valloc" "macax")]
+)
+
+(define_insn "addsi3_internal_real"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=v,&vU, vU")
+ (plus:SI (match_operand:SI 1 "general_operand" "+0, viU, viU")
+ (match_operand 2 "general_operand" "viWabWhlWh1,viWabWhlWh1,viWabWhlWh1")))
+ (clobber (reg:HI AX_REG))
+ (clobber (reg:HI BC_REG))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ movw ax,%h1 \;addw ax,%h2 \;movw %h0, ax \;movw ax,%H1 \;sknc \;incw ax \;addw ax,%H2 \;movw %H0,ax
+ movw ax,%h1 \;addw ax,%h2 \;movw %h0, ax \;movw ax,%H1 \;sknc \;incw ax \;addw ax,%H2 \;movw %H0,ax
+ movw ax,%h1 \;addw ax,%h2 \;movw bc, ax \;movw ax,%H1 \;sknc \;incw ax \;addw ax,%H2 \;movw %H0,ax \;movw ax,bc \;movw %h0, ax"
+ [(set_attr "valloc" "macax")]
+)
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=&vm")
+ (minus:SI (match_operand:SI 1 "general_operand" "vim")
+ (match_operand 2 "general_operand" "vim")))
+ ]
+ ""
+ "emit_insn (gen_subsi3_internal_virt (operands[0], operands[1], operands[2]));
+ DONE;"
+)
+
+(define_insn "subsi3_internal_virt"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=v,&vm, vm")
+ (minus:SI (match_operand:SI 1 "general_operand" "0, vim, vim")
+ (match_operand 2 "general_operand" "vim,vim,vim")))
+ (clobber (reg:HI AX_REG))
+ (clobber (reg:HI BC_REG))
+ ]
+ "rl78_virt_insns_ok ()"
+ ""
+ [(set_attr "valloc" "macax")]
+)
+
+(define_insn "subsi3_internal_real"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=v,&vU, vU")
+ (minus:SI (match_operand:SI 1 "general_operand" "+0, viU, viU")
+ (match_operand 2 "general_operand" "viWabWhlWh1,viWabWhlWh1,viWabWhlWh1")))
+ (clobber (reg:HI AX_REG))
+ (clobber (reg:HI BC_REG))
+ ]
+ "rl78_real_insns_ok ()"
+ "@
+ movw ax,%h1 \;subw ax,%h2 \;movw %h0, ax \;movw ax,%H1 \;sknc \;decw ax \;subw ax,%H2 \;movw %H0,ax
+ movw ax,%h1 \;subw ax,%h2 \;movw %h0, ax \;movw ax,%H1 \;sknc \;decw ax \;subw ax,%H2 \;movw %H0,ax
+ movw ax,%h1 \;subw ax,%h2 \;movw bc, ax \;movw ax,%H1 \;sknc \;decw ax \;subw ax,%H2 \;movw %H0,ax \;movw ax,bc \;movw %h0, ax"
+ [(set_attr "valloc" "macax")]
+)
+
+(define_expand "mulqi3"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (mult:QI (match_operand:QI 1 "general_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ ]
+ "" ; mulu supported by all targets
+ ""
+)
+
+(define_expand "mulhi3"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (mult:HI (match_operand:HI 1 "general_operand" "")
+ (match_operand:HI 2 "nonmemory_operand" "")))
+ ]
+ "! RL78_MUL_NONE"
+ ""
+)
+
+(define_expand "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=&v")
+ (mult:SI (match_operand:SI 1 "general_operand" "+vim")
+ (match_operand:SI 2 "nonmemory_operand" "vi")))
+ ]
+ "! RL78_MUL_NONE"
+ ""
+)
+
+(define_insn "*mulqi3_rl78"
+ [(set (match_operand:QI 0 "register_operand" "=&v")
+ (mult:QI (match_operand:QI 1 "general_operand" "+viU")
+ (match_operand:QI 2 "general_operand" "vi")))
+ ]
+ "" ; mulu supported by all targets
+ "; mulqi macro %0 = %1 * %2
+ mov a, %h1
+ mov x, a
+ mov a, %h2
+ mulu x ; ax = a * x
+ mov a, x
+ mov %h0, a
+ ; end of mulqi macro"
+;; [(set_attr "valloc" "macax")]
+)
+
+(define_insn "*mulhi3_rl78"
+ [(set (match_operand:HI 0 "register_operand" "=&v")
+ (mult:HI (match_operand:HI 1 "general_operand" "+viU")
+ (match_operand:HI 2 "general_operand" "vi")))
+ ]
+ "RL78_MUL_RL78"
+ "; mulhi macro %0 = %1 * %2
+ movw ax, %h1
+ movw bc, %h2
+ mulhu ; bcax = bc * ax
+ movw %h0, ax
+ ; end of mulhi macro"
+;; [(set_attr "valloc" "macax")]
+)
+
+(define_insn "*mulhi3_g13"
+ [(set (match_operand:HI 0 "register_operand" "=&v")
+ (mult:HI (match_operand:HI 1 "general_operand" "+viU")
+ (match_operand:HI 2 "general_operand" "vi")))
+ ]
+ "RL78_MUL_G13"
+ "; mulhi macro %0 = %1 * %2
+ mov a, #0x00
+ mov !0xf00e8, a ; MDUC
+ movw ax, %h1
+ movw 0xffff0, ax ; MDAL
+ movw ax, %h2
+ movw 0xffff2, ax ; MDAH
+ nop ; mdb = mdal * mdah
+ movw ax, 0xffff6 ; MDBL
+ movw %h0, ax
+ ; end of mulhi macro"
+;; [(set_attr "valloc" "umul")]
+)
+
+;; 0xFFFF0 is MACR(L). 0xFFFF2 is MACR(H) but we don't care about it
+;; because we're only using the lower 16 bits (which is the upper 16
+;; bits of the result).
+(define_insn "mulsi3_rl78"
+ [(set (match_operand:SI 0 "register_operand" "=&v")
+ (mult:SI (match_operand:SI 1 "general_operand" "+viU")
+ (match_operand:SI 2 "general_operand" "vi")))
+ ]
+ "RL78_MUL_RL78"
+ "; mulsi macro %0 = %1 * %2
+ movw ax, %h1
+ movw bc, %h2
+ MULHU ; bcax = bc * ax
+ movw %h0, ax
+ movw ax, bc
+ movw 0xffff0, ax
+ movw ax, %H1
+ movw bc, %h2
+ MACHU ; MACR += bc * ax
+ movw ax, %h1
+ movw bc, %H2
+ MACHU ; MACR += bc * ax
+ movw ax, 0xffff0
+ movw %H0, ax
+ ; end of mulsi macro"
+ [(set_attr "valloc" "macax")]
+ )
+
+;; 0xFFFF0 is MDAL. 0xFFFF2 is MDAH.
+;; 0xFFFF6 is MDBL. 0xFFFF4 is MDBH.
+;; 0xF00E0 is MDCL. 0xF00E2 is MDCH.
+;; 0xF00E8 is MDUC.
+;; Warning: this matches the silicon not the documentation.
+(define_insn "mulsi3_g13"
+ [(set (match_operand:SI 0 "register_operand" "=&v")
+ (mult:SI (match_operand:SI 1 "general_operand" "viU")
+ (match_operand:SI 2 "general_operand" "viU")))
+ ]
+ "RL78_MUL_G13"
+ "; mulsi macro %0 = %1 * %2
+ mov a, #0x00
+ mov !0xf00e8, a ; MDUC
+ movw ax, %h1
+ movw 0xffff0, ax ; MDAL
+ movw ax, %h2
+ movw 0xffff2, ax ; MDAH
+ nop ; mdb = mdal * mdah
+ movw ax, 0xffff6 ; MDBL
+ movw %h0, ax
+
+ mov a, #0x40
+ mov !0xf00e8, a ; MDUC
+ movw ax, 0xffff4 ; MDBH
+ movw !0xf00e0, ax ; MDCL
+ movw ax, #0
+ movw !0xf00e2, ax ; MDCL
+ movw ax, %H1
+ movw 0xffff0, ax ; MDAL
+ movw ax, %h2
+ movw 0xffff2, ax ; MDAH
+ nop ; mdc += mdal * mdah
+
+ mov a, #0x40
+ mov !0xf00e8, a ; MDUC
+ movw ax, %h1
+ movw 0xffff0, ax ; MDAL
+ movw ax, %H2
+ movw 0xffff2, ax ; MDAH
+ nop ; mdc += mdal * mdah
+ nop ; Additional nop for MAC
+ movw ax, !0xf00e0 ; MDCL
+ movw %H0, ax
+ ; end of mulsi macro"
+ [(set_attr "valloc" "macax")]
+ )
+
+(define_expand "es_addr"
+ [(unspec:SI [(reg:QI ES_REG)
+ (match_operand:HI 0 "" "")
+ ] UNS_ES_ADDR)]
+ ""
+ ""
+)
diff --git a/gcc-4.9/gcc/config/rl78/rl78.opt b/gcc-4.9/gcc/config/rl78/rl78.opt
new file mode 100644
index 000000000..4d2be5baf
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/rl78.opt
@@ -0,0 +1,55 @@
+; Command line options for the Renesas RL78 port of GCC.
+; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+; Contributed by Red Hat.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+;---------------------------------------------------
+
+HeaderInclude
+config/rl78/rl78-opts.h
+
+msim
+Target Report
+Use the simulator runtime.
+
+mmul=
+Target RejectNegative Joined Var(rl78_mul_type) Report Tolower Enum(rl78_mul_types) Init(MUL_NONE)
+Select hardware or software multiplication support.
+
+Enum
+Name(rl78_mul_types) Type(enum rl78_mul_types)
+
+EnumValue
+Enum(rl78_mul_types) String(none) Value(MUL_NONE)
+
+EnumValue
+Enum(rl78_mul_types) String(rl78) Value(MUL_RL78)
+
+EnumValue
+Enum(rl78_mul_types) String(g13) Value(MUL_G13)
+
+mallregs
+Target Mask(ALLREGS) Report Optimization
+Use all registers, reserving none for interrupt handlers.
+
+mrelax
+Target Report Optimization
+Enable assembler and linker relaxation. Enabled by default at -Os.
+
+mg10
+Target Mask(G10) Report
+Target the RL78/G10 series
diff --git a/gcc-4.9/gcc/config/rl78/t-rl78 b/gcc-4.9/gcc/config/rl78/t-rl78
new file mode 100644
index 000000000..8db50e1a7
--- /dev/null
+++ b/gcc-4.9/gcc/config/rl78/t-rl78
@@ -0,0 +1,27 @@
+# Makefile fragment for building GCC for the Renesas RL78 target.
+# Copyright (C) 2011-2014 Free Software Foundation, Inc.
+# Contributed by Red Hat.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published
+# by the Free Software Foundation; either version 3, or (at your
+# option) any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+rl78-c.o: $(srcdir)/config/rl78/rl78-c.c $(RTL_H) $(TREE_H) $(CONFIG_H) $(TM_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+# Enable multilibs:
+
+MULTILIB_OPTIONS = mg10
+MULTILIB_DIRNAMES = g10