aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/gcc/config/m32r
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/gcc/config/m32r')
-rw-r--r--gcc-4.9/gcc/config/m32r/constraints.md147
-rw-r--r--gcc-4.9/gcc/config/m32r/linux.h91
-rw-r--r--gcc-4.9/gcc/config/m32r/little.h20
-rw-r--r--gcc-4.9/gcc/config/m32r/m32r-opts.h108
-rw-r--r--gcc-4.9/gcc/config/m32r/m32r-protos.h62
-rw-r--r--gcc-4.9/gcc/config/m32r/m32r.c2921
-rw-r--r--gcc-4.9/gcc/config/m32r/m32r.h1051
-rw-r--r--gcc-4.9/gcc/config/m32r/m32r.md2276
-rw-r--r--gcc-4.9/gcc/config/m32r/m32r.opt117
-rw-r--r--gcc-4.9/gcc/config/m32r/predicates.md440
-rw-r--r--gcc-4.9/gcc/config/m32r/rtems.h33
-rw-r--r--gcc-4.9/gcc/config/m32r/t-linux20
-rw-r--r--gcc-4.9/gcc/config/m32r/t-m32r31
13 files changed, 7317 insertions, 0 deletions
diff --git a/gcc-4.9/gcc/config/m32r/constraints.md b/gcc-4.9/gcc/config/m32r/constraints.md
new file mode 100644
index 000000000..79ff4b521
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/constraints.md
@@ -0,0 +1,147 @@
+;; Constraint definitions for Renesas M32R cpu for GNU C compiler
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; The letters I, J, K, L, M, N, O, P in a register constraint string
+;; can be used to stand for particular ranges of immediate operands.
+;; The letters Q, R, S, T, U are used to segregate specific types of
+;; operands, usually memory references, for the target machine.
+;;
+;; I is used for 8-bit signed immediates.
+;; J is used for 16-bit signed immediates.
+;; K is used for 16-bit unsigned immediates.
+;; L is used for 16-bit immediates left shifted by 16 (sign ???).
+;; M is used for 24-bit unsigned immediates.
+;; N is used for 8-bit signed immediates for compares
+;; (values in the range -127 to +128).
+;; O is used for 5-bit unsigned immediates (shift count).
+;; P is used for 16-bit signed immediates for compares
+;; (values in the range -32767 to +32768).
+;;
+;; Q is for symbolic addresses loadable with ld24.
+;; R is for symbolic addresses when ld24 can't be used.
+;; S is for stores with pre {inc,dec}rement
+;; T is for indirect of a pointer.
+;; U is for loads with post increment.
+;; W is used for an immediate value of 0.
+;;
+;; Register constraints
+
+(define_register_constraint "a" "ACCUM_REGS"
+ "@internal")
+
+(define_register_constraint "c" "CARRY_REG"
+ "@internal")
+
+;; Integer constraints
+(define_constraint "I"
+ "8-bit signed immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= -0x80 && ival <= 0x7f")))
+
+(define_constraint "J"
+ "16-bit signed immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= -0x8000 && ival <= 0x7fff")))
+
+(define_constraint "K"
+ "16-bit unsigned immediate."
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ival <= 0x0000ffff")))
+
+(define_constraint "L"
+ "16-bit signed immediate left shifted by 16."
+ (and (match_code "const_int")
+ (match_test "(ival & 0xffff) == 0")
+ (match_test "(ival >> 16) >= -0x8000 && (ival >> 16) <= 0x7fff")))
+
+(define_constraint "M"
+ "24-bit unsigned immediate."
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ival <= 0x00ffffff")))
+
+(define_constraint "N"
+ "8-bit signed immediate for compare."
+ (and (match_code "const_int")
+ (match_test "ival >= -127 && ival <= 128")))
+
+(define_constraint "O"
+ "5-bit unsigned immediate."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival < 32")))
+
+(define_constraint "P"
+ "16-bit signed immediate for compare."
+ (and (match_code "const_int")
+ (match_test "ival >= -0x7fff && ival <= 0x8000")))
+
+;; Floating-point constraints
+(define_constraint "G"
+ "Double constant loadable with 2 ldi insns."
+ (and (match_code "const_double")
+ (match_test "easy_di_const (op)")))
+
+(define_constraint "H"
+ "Double constant loadable with movdf."
+ (and (match_code "const_double")
+ (match_test "easy_df_const (op)")))
+
+;; Extra constraints
+(define_constraint "Q"
+ "A symbolic address loadable when ld24."
+ (ior (and (match_test "TARGET_ADDR24")
+ (match_test "GET_CODE (op) == LABEL_REF"))
+ (match_test "addr24_operand (op, VOIDmode)")))
+
+(define_constraint "R"
+ "A symbolic address loadable with ld24 can't be used."
+ (ior (and (match_test "TARGET_ADDR32")
+ (match_test "GET_CODE (op) == LABEL_REF"))
+ (match_test "addr32_operand (op, VOIDmode)")))
+
+(define_constraint "S"
+ "A store with pre {inc,dec}rement."
+ (and (match_code "mem")
+ (match_test "mode == SImode || mode == SFmode")
+ (match_code "pre_inc,pre_dec" "0")
+ (match_code "reg" "00")
+ (match_test "GPR_P (REGNO (XEXP (XEXP (op, 0), 0)))
+ || REGNO (XEXP (XEXP (op, 0), 0)) == ARG_POINTER_REGNUM
+ || ! HARD_REGISTER_P (XEXP (XEXP (op, 0), 0))")))
+
+(define_constraint "T"
+ "An indirect of a pointer."
+ (and (match_code "mem")
+ (match_test "memreg_operand (op, GET_MODE (op))")))
+
+(define_constraint "U"
+ "A load with post increment."
+ (and (match_code "mem")
+ (match_test "mode == SImode || mode == SFmode")
+ (match_code "post_inc" "0")
+ (match_code "reg" "00")
+ (match_test "GPR_P (REGNO (XEXP (XEXP (op, 0), 0)))
+ || REGNO (XEXP (XEXP (op, 0), 0)) == ARG_POINTER_REGNUM
+ || ! HARD_REGISTER_P (XEXP (XEXP (op, 0), 0))")))
+
+(define_constraint "W"
+ "zero immediate."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
diff --git a/gcc-4.9/gcc/config/m32r/linux.h b/gcc-4.9/gcc/config/m32r/linux.h
new file mode 100644
index 000000000..698086b3f
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/linux.h
@@ -0,0 +1,91 @@
+/* Definitions for Renesas M32R running Linux-based GNU systems using ELF.
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+/* Provide a LINK_SPEC appropriate for Linux. Here we provide support
+ for the special GCC options -static and -shared, which allow us to
+ link things in one of these three modes by applying the appropriate
+ combinations of options at link-time.
+
+ When the -shared link option is used a final link is not being
+ done. */
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2"
+
+#undef LINK_SPEC
+#if TARGET_LITTLE_ENDIAN
+#define LINK_SPEC "%(link_cpu) -m m32rlelf_linux %{shared:-shared} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
+ %{static:-static}}"
+#else
+#define LINK_SPEC "%(link_cpu) -m m32relf_linux %{shared:-shared} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
+ %{static:-static}}"
+#endif
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{pthread:-lpthread} \
+ %{shared: -lc} \
+ %{!shared: \
+ %{mieee-fp:-lieee} \
+ %{profile:-lc_p} %{!profile: -lc}}"
+
+#undef STARTFILE_SPEC
+#if defined HAVE_LD_PIE
+#define STARTFILE_SPEC \
+ "%{!shared: %{pg|p|profile:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}} \
+ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+#else
+#define STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
+ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+#endif
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "\
+ %{posix:-D_POSIX_SOURCE} \
+ %{pthread:-D_REENTRANT -D_PTHREADS} \
+"
+
+#define TARGET_OS_CPP_BUILTINS() GNU_USER_TARGET_OS_CPP_BUILTINS()
+
+#define TARGET_ASM_FILE_END file_end_indicate_exec_stack
diff --git a/gcc-4.9/gcc/config/m32r/little.h b/gcc-4.9/gcc/config/m32r/little.h
new file mode 100644
index 000000000..7a0817e09
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/little.h
@@ -0,0 +1,20 @@
+/* Definitions for Renesas little endian M32R cpu.
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#define TARGET_LITTLE_ENDIAN 1
diff --git a/gcc-4.9/gcc/config/m32r/m32r-opts.h b/gcc-4.9/gcc/config/m32r/m32r-opts.h
new file mode 100644
index 000000000..db9886f08
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/m32r-opts.h
@@ -0,0 +1,108 @@
+/* Definitions for option handling for Renesas M32R cpu.
+ Copyright (C) 1996-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef M32R_OPTS_H
+#define M32R_OPTS_H
+
+/* Code Models
+
+ Code models are used to select between two choices of two separate
+ possibilities (address space size, call insn to use):
+
+ small: addresses use 24 bits, use bl to make calls
+ medium: addresses use 32 bits, use bl to make calls (*1)
+ large: addresses use 32 bits, use seth/add3/jl to make calls (*2)
+
+ The fourth is "addresses use 24 bits, use seth/add3/jl to make calls" but
+ using this one doesn't make much sense.
+
+ (*1) The linker may eventually be able to relax seth/add3 -> ld24.
+ (*2) The linker may eventually be able to relax seth/add3/jl -> bl.
+
+ Internally these are recorded as TARGET_ADDR{24,32} and
+ TARGET_CALL{26,32}.
+
+ The __model__ attribute can be used to select the code model to use when
+ accessing particular objects. */
+
+enum m32r_model { M32R_MODEL_SMALL, M32R_MODEL_MEDIUM, M32R_MODEL_LARGE };
+
+#define TARGET_MODEL_SMALL (m32r_model_selected == M32R_MODEL_SMALL)
+#define TARGET_MODEL_MEDIUM (m32r_model_selected == M32R_MODEL_MEDIUM)
+#define TARGET_MODEL_LARGE (m32r_model_selected == M32R_MODEL_LARGE)
+#define TARGET_ADDR24 (m32r_model_selected == M32R_MODEL_SMALL)
+#define TARGET_ADDR32 (! TARGET_ADDR24)
+#define TARGET_CALL26 (! TARGET_CALL32)
+#define TARGET_CALL32 (m32r_model_selected == M32R_MODEL_LARGE)
+
+/* The default is the small model. */
+#ifndef M32R_MODEL_DEFAULT
+#define M32R_MODEL_DEFAULT M32R_MODEL_SMALL
+#endif
+
+/* Small Data Area
+
+ The SDA consists of sections .sdata, .sbss, and .scommon.
+ .scommon isn't a real section, symbols in it have their section index
+ set to SHN_M32R_SCOMMON, though support for it exists in the linker script.
+
+ Two switches control the SDA:
+
+ -G NNN - specifies the maximum size of variable to go in the SDA
+
+ -msdata=foo - specifies how such variables are handled
+
+ -msdata=none - small data area is disabled
+
+ -msdata=sdata - small data goes in the SDA, special code isn't
+ generated to use it, and special relocs aren't
+ generated
+
+ -msdata=use - small data goes in the SDA, special code is generated
+ to use the SDA and special relocs are generated
+
+ The SDA is not multilib'd, it isn't necessary.
+ MULTILIB_EXTRA_OPTS is set in tmake_file to -msdata=sdata so multilib'd
+ libraries have small data in .sdata/SHN_M32R_SCOMMON so programs that use
+ -msdata=use will successfully link with them (references in header files
+ will cause the compiler to emit code that refers to library objects in
+ .data). ??? There can be a problem if the user passes a -G value greater
+ than the default and a library object in a header file is that size.
+ The default is 8 so this should be rare - if it occurs the user
+ is required to rebuild the libraries or use a smaller value for -G. */
+
+/* Maximum size of variables that go in .sdata/.sbss.
+ The -msdata=foo switch also controls how small variables are handled. */
+#ifndef SDATA_DEFAULT_SIZE
+#define SDATA_DEFAULT_SIZE 8
+#endif
+
+enum m32r_sdata { M32R_SDATA_NONE, M32R_SDATA_SDATA, M32R_SDATA_USE };
+
+#define TARGET_SDATA_NONE (m32r_sdata_selected == M32R_SDATA_NONE)
+#define TARGET_SDATA_SDATA (m32r_sdata_selected == M32R_SDATA_SDATA)
+#define TARGET_SDATA_USE (m32r_sdata_selected == M32R_SDATA_USE)
+
+/* Default is to disable the SDA
+ [for upward compatibility with previous toolchains]. */
+#ifndef M32R_SDATA_DEFAULT
+#define M32R_SDATA_DEFAULT M32R_SDATA_NONE
+#endif
+
+#endif
diff --git a/gcc-4.9/gcc/config/m32r/m32r-protos.h b/gcc-4.9/gcc/config/m32r/m32r-protos.h
new file mode 100644
index 000000000..c1b613c1b
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/m32r-protos.h
@@ -0,0 +1,62 @@
+/* Prototypes for m32r.c functions used in the md file & elsewhere.
+ Copyright (C) 1999-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Function prototypes that cannot exist in m32r.h due to dependency
+ complications. */
+
+extern void m32r_init (void);
+extern void m32r_init_expanders (void);
+extern unsigned m32r_compute_frame_size (int);
+extern void m32r_expand_prologue (void);
+extern void m32r_expand_epilogue (void);
+extern int direct_return (void);
+extern void m32r_load_pic_register (void);
+extern enum m32r_function_type m32r_compute_function_type (tree);
+
+#ifdef RTX_CODE
+extern int easy_di_const (rtx);
+extern int easy_df_const (rtx);
+extern rtx gen_compare (enum rtx_code, rtx, rtx, int);
+extern bool gen_cond_store (enum rtx_code, rtx, rtx, rtx);
+extern rtx gen_split_move_double (rtx *);
+extern int m32r_address_code (rtx);
+extern void m32r_initialize_trampoline (rtx, rtx, rtx);
+extern int zero_and_one (rtx, rtx);
+extern char * emit_cond_move (rtx *, rtx);
+extern void m32r_output_block_move (rtx, rtx *);
+extern int m32r_expand_block_move (rtx *);
+extern int m32r_not_same_reg (rtx, rtx);
+extern int m32r_hard_regno_rename_ok (unsigned int, unsigned int);
+extern int m32r_legitimate_pic_operand_p (rtx);
+extern rtx m32r_legitimize_pic_address (rtx, rtx);
+extern rtx m32r_return_addr (int);
+extern rtx m32r_function_symbol (const char *);
+
+#ifdef HAVE_MACHINE_MODES
+extern int call_operand (rtx, enum machine_mode);
+extern int small_data_operand (rtx, enum machine_mode);
+extern int addr24_operand (rtx, enum machine_mode);
+extern int addr32_operand (rtx, enum machine_mode);
+extern int call26_operand (rtx, enum machine_mode);
+extern int memreg_operand (rtx, enum machine_mode);
+extern int small_insn_p (rtx, enum machine_mode);
+
+#endif /* HAVE_MACHINE_MODES */
+
+#endif /* RTX_CODE */
diff --git a/gcc-4.9/gcc/config/m32r/m32r.c b/gcc-4.9/gcc/config/m32r/m32r.c
new file mode 100644
index 000000000..83bc3a7bf
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/m32r.c
@@ -0,0 +1,2921 @@
+/* Subroutines used for code generation on the Renesas M32R cpu.
+ Copyright (C) 1996-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "stringpool.h"
+#include "calls.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "dbxout.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "expr.h"
+#include "function.h"
+#include "recog.h"
+#include "diagnostic-core.h"
+#include "ggc.h"
+#include "df.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "tm-constrs.h"
+#include "opts.h"
+
+/* Array of valid operand punctuation characters. */
+static char m32r_punct_chars[256];
+
+/* Machine-specific symbol_ref flags. */
+#define SYMBOL_FLAG_MODEL_SHIFT SYMBOL_FLAG_MACH_DEP_SHIFT
+#define SYMBOL_REF_MODEL(X) \
+ ((enum m32r_model) ((SYMBOL_REF_FLAGS (X) >> SYMBOL_FLAG_MODEL_SHIFT) & 3))
+
+/* For string literals, etc. */
+#define LIT_NAME_P(NAME) ((NAME)[0] == '*' && (NAME)[1] == '.')
+
+/* Forward declaration. */
+static void m32r_option_override (void);
+static void init_reg_tables (void);
+static void block_move_call (rtx, rtx, rtx);
+static int m32r_is_insn (rtx);
+static bool m32r_legitimate_address_p (enum machine_mode, rtx, bool);
+static rtx m32r_legitimize_address (rtx, rtx, enum machine_mode);
+static bool m32r_mode_dependent_address_p (const_rtx, addr_space_t);
+static tree m32r_handle_model_attribute (tree *, tree, tree, int, bool *);
+static void m32r_print_operand (FILE *, rtx, int);
+static void m32r_print_operand_address (FILE *, rtx);
+static bool m32r_print_operand_punct_valid_p (unsigned char code);
+static void m32r_output_function_prologue (FILE *, HOST_WIDE_INT);
+static void m32r_output_function_epilogue (FILE *, HOST_WIDE_INT);
+
+static void m32r_file_start (void);
+
+static int m32r_adjust_priority (rtx, int);
+static int m32r_issue_rate (void);
+
+static void m32r_encode_section_info (tree, rtx, int);
+static bool m32r_in_small_data_p (const_tree);
+static bool m32r_return_in_memory (const_tree, const_tree);
+static rtx m32r_function_value (const_tree, const_tree, bool);
+static rtx m32r_libcall_value (enum machine_mode, const_rtx);
+static bool m32r_function_value_regno_p (const unsigned int);
+static void m32r_setup_incoming_varargs (cumulative_args_t, enum machine_mode,
+ tree, int *, int);
+static void init_idents (void);
+static bool m32r_rtx_costs (rtx, int, int, int, int *, bool speed);
+static int m32r_memory_move_cost (enum machine_mode, reg_class_t, bool);
+static bool m32r_pass_by_reference (cumulative_args_t, enum machine_mode,
+ const_tree, bool);
+static int m32r_arg_partial_bytes (cumulative_args_t, enum machine_mode,
+ tree, bool);
+static rtx m32r_function_arg (cumulative_args_t, enum machine_mode,
+ const_tree, bool);
+static void m32r_function_arg_advance (cumulative_args_t, enum machine_mode,
+ const_tree, bool);
+static bool m32r_can_eliminate (const int, const int);
+static void m32r_conditional_register_usage (void);
+static void m32r_trampoline_init (rtx, tree, rtx);
+static bool m32r_legitimate_constant_p (enum machine_mode, rtx);
+
+/* M32R specific attributes. */
+
+static const struct attribute_spec m32r_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
+ affects_type_identity } */
+ { "interrupt", 0, 0, true, false, false, NULL, false },
+ { "model", 1, 1, true, false, false, m32r_handle_model_attribute,
+ false },
+ { NULL, 0, 0, false, false, false, NULL, false }
+};
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE m32r_attribute_table
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P m32r_legitimate_address_p
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS m32r_legitimize_address
+#undef TARGET_MODE_DEPENDENT_ADDRESS_P
+#define TARGET_MODE_DEPENDENT_ADDRESS_P m32r_mode_dependent_address_p
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+
+#undef TARGET_PRINT_OPERAND
+#define TARGET_PRINT_OPERAND m32r_print_operand
+#undef TARGET_PRINT_OPERAND_ADDRESS
+#define TARGET_PRINT_OPERAND_ADDRESS m32r_print_operand_address
+#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
+#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32r_print_operand_punct_valid_p
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE m32r_output_function_prologue
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE m32r_output_function_epilogue
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START m32r_file_start
+
+#undef TARGET_SCHED_ADJUST_PRIORITY
+#define TARGET_SCHED_ADJUST_PRIORITY m32r_adjust_priority
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE m32r_issue_rate
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE m32r_option_override
+
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO m32r_encode_section_info
+#undef TARGET_IN_SMALL_DATA_P
+#define TARGET_IN_SMALL_DATA_P m32r_in_small_data_p
+
+
+#undef TARGET_MEMORY_MOVE_COST
+#define TARGET_MEMORY_MOVE_COST m32r_memory_move_cost
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS m32r_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
+
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY m32r_return_in_memory
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE m32r_function_value
+#undef TARGET_LIBCALL_VALUE
+#define TARGET_LIBCALL_VALUE m32r_libcall_value
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P m32r_function_value_regno_p
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS m32r_setup_incoming_varargs
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE m32r_pass_by_reference
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES m32r_arg_partial_bytes
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG m32r_function_arg
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE m32r_function_arg_advance
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE m32r_can_eliminate
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE m32r_conditional_register_usage
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT m32r_trampoline_init
+
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P m32r_legitimate_constant_p
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* Called by m32r_option_override to initialize various things. */
+
+void
+m32r_init (void)
+{
+ init_reg_tables ();
+
+ /* Initialize array for TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
+ memset (m32r_punct_chars, 0, sizeof (m32r_punct_chars));
+ m32r_punct_chars['#'] = 1;
+ m32r_punct_chars['@'] = 1; /* ??? no longer used */
+
+ /* Provide default value if not specified. */
+ if (!global_options_set.x_g_switch_value)
+ g_switch_value = SDATA_DEFAULT_SIZE;
+}
+
+static void
+m32r_option_override (void)
+{
+ /* These need to be done at start up.
+ It's convenient to do them here. */
+ m32r_init ();
+ SUBTARGET_OVERRIDE_OPTIONS;
+}
+
+/* Vectors to keep interesting information about registers where it can easily
+ be got. We use to use the actual mode value as the bit number, but there
+ is (or may be) more than 32 modes now. Instead we use two tables: one
+ indexed by hard register number, and one indexed by mode. */
+
+/* The purpose of m32r_mode_class is to shrink the range of modes so that
+ they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
+ mapped into one m32r_mode_class mode. */
+
+enum m32r_mode_class
+{
+ C_MODE,
+ S_MODE, D_MODE, T_MODE, O_MODE,
+ SF_MODE, DF_MODE, TF_MODE, OF_MODE, A_MODE
+};
+
+/* Modes for condition codes. */
+#define C_MODES (1 << (int) C_MODE)
+
+/* Modes for single-word and smaller quantities. */
+#define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
+
+/* Modes for double-word and smaller quantities. */
+#define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
+
+/* Modes for quad-word and smaller quantities. */
+#define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
+
+/* Modes for accumulators. */
+#define A_MODES (1 << (int) A_MODE)
+
+/* Value is 1 if register/mode pair is acceptable on arc. */
+
+const unsigned int m32r_hard_regno_mode_ok[FIRST_PSEUDO_REGISTER] =
+{
+ T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES,
+ T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, S_MODES, S_MODES, S_MODES,
+ S_MODES, C_MODES, A_MODES, A_MODES
+};
+
+unsigned int m32r_mode_class [NUM_MACHINE_MODES];
+
+enum reg_class m32r_regno_reg_class[FIRST_PSEUDO_REGISTER];
+
+static void
+init_reg_tables (void)
+{
+ int i;
+
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ {
+ switch (GET_MODE_CLASS (i))
+ {
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ case MODE_COMPLEX_INT:
+ if (GET_MODE_SIZE (i) <= 4)
+ m32r_mode_class[i] = 1 << (int) S_MODE;
+ else if (GET_MODE_SIZE (i) == 8)
+ m32r_mode_class[i] = 1 << (int) D_MODE;
+ else if (GET_MODE_SIZE (i) == 16)
+ m32r_mode_class[i] = 1 << (int) T_MODE;
+ else if (GET_MODE_SIZE (i) == 32)
+ m32r_mode_class[i] = 1 << (int) O_MODE;
+ else
+ m32r_mode_class[i] = 0;
+ break;
+ case MODE_FLOAT:
+ case MODE_COMPLEX_FLOAT:
+ if (GET_MODE_SIZE (i) <= 4)
+ m32r_mode_class[i] = 1 << (int) SF_MODE;
+ else if (GET_MODE_SIZE (i) == 8)
+ m32r_mode_class[i] = 1 << (int) DF_MODE;
+ else if (GET_MODE_SIZE (i) == 16)
+ m32r_mode_class[i] = 1 << (int) TF_MODE;
+ else if (GET_MODE_SIZE (i) == 32)
+ m32r_mode_class[i] = 1 << (int) OF_MODE;
+ else
+ m32r_mode_class[i] = 0;
+ break;
+ case MODE_CC:
+ m32r_mode_class[i] = 1 << (int) C_MODE;
+ break;
+ default:
+ m32r_mode_class[i] = 0;
+ break;
+ }
+ }
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (GPR_P (i))
+ m32r_regno_reg_class[i] = GENERAL_REGS;
+ else if (i == ARG_POINTER_REGNUM)
+ m32r_regno_reg_class[i] = GENERAL_REGS;
+ else
+ m32r_regno_reg_class[i] = NO_REGS;
+ }
+}
+
+/* M32R specific attribute support.
+
+ interrupt - for interrupt functions
+
+ model - select code model used to access object
+
+ small: addresses use 24 bits, use bl to make calls
+ medium: addresses use 32 bits, use bl to make calls
+ large: addresses use 32 bits, use seth/add3/jl to make calls
+
+ Grep for MODEL in m32r.h for more info. */
+
+static tree small_ident1;
+static tree small_ident2;
+static tree medium_ident1;
+static tree medium_ident2;
+static tree large_ident1;
+static tree large_ident2;
+
+static void
+init_idents (void)
+{
+ if (small_ident1 == 0)
+ {
+ small_ident1 = get_identifier ("small");
+ small_ident2 = get_identifier ("__small__");
+ medium_ident1 = get_identifier ("medium");
+ medium_ident2 = get_identifier ("__medium__");
+ large_ident1 = get_identifier ("large");
+ large_ident2 = get_identifier ("__large__");
+ }
+}
+
+/* Handle an "model" attribute; arguments as in
+ struct attribute_spec.handler. */
+static tree
+m32r_handle_model_attribute (tree *node ATTRIBUTE_UNUSED, tree name,
+ tree args, int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ tree arg;
+
+ init_idents ();
+ arg = TREE_VALUE (args);
+
+ if (arg != small_ident1
+ && arg != small_ident2
+ && arg != medium_ident1
+ && arg != medium_ident2
+ && arg != large_ident1
+ && arg != large_ident2)
+ {
+ warning (OPT_Wattributes, "invalid argument of %qs attribute",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Encode section information of DECL, which is either a VAR_DECL,
+ FUNCTION_DECL, STRING_CST, CONSTRUCTOR, or ???.
+
+ For the M32R we want to record:
+
+ - whether the object lives in .sdata/.sbss.
+ - what code model should be used to access the object
+*/
+
+static void
+m32r_encode_section_info (tree decl, rtx rtl, int first)
+{
+ int extra_flags = 0;
+ tree model_attr;
+ enum m32r_model model;
+
+ default_encode_section_info (decl, rtl, first);
+
+ if (!DECL_P (decl))
+ return;
+
+ model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
+ if (model_attr)
+ {
+ tree id;
+
+ init_idents ();
+
+ id = TREE_VALUE (TREE_VALUE (model_attr));
+
+ if (id == small_ident1 || id == small_ident2)
+ model = M32R_MODEL_SMALL;
+ else if (id == medium_ident1 || id == medium_ident2)
+ model = M32R_MODEL_MEDIUM;
+ else if (id == large_ident1 || id == large_ident2)
+ model = M32R_MODEL_LARGE;
+ else
+ gcc_unreachable (); /* shouldn't happen */
+ }
+ else
+ {
+ if (TARGET_MODEL_SMALL)
+ model = M32R_MODEL_SMALL;
+ else if (TARGET_MODEL_MEDIUM)
+ model = M32R_MODEL_MEDIUM;
+ else if (TARGET_MODEL_LARGE)
+ model = M32R_MODEL_LARGE;
+ else
+ gcc_unreachable (); /* shouldn't happen */
+ }
+ extra_flags |= model << SYMBOL_FLAG_MODEL_SHIFT;
+
+ if (extra_flags)
+ SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
+}
+
+/* Only mark the object as being small data area addressable if
+ it hasn't been explicitly marked with a code model.
+
+ The user can explicitly put an object in the small data area with the
+ section attribute. If the object is in sdata/sbss and marked with a
+ code model do both [put the object in .sdata and mark it as being
+ addressed with a specific code model - don't mark it as being addressed
+ with an SDA reloc though]. This is ok and might be useful at times. If
+ the object doesn't fit the linker will give an error. */
+
+static bool
+m32r_in_small_data_p (const_tree decl)
+{
+ const_tree section;
+
+ if (TREE_CODE (decl) != VAR_DECL)
+ return false;
+
+ if (lookup_attribute ("model", DECL_ATTRIBUTES (decl)))
+ return false;
+
+ section = DECL_SECTION_NAME (decl);
+ if (section)
+ {
+ const char *const name = TREE_STRING_POINTER (section);
+ if (strcmp (name, ".sdata") == 0 || strcmp (name, ".sbss") == 0)
+ return true;
+ }
+ else
+ {
+ if (! TREE_READONLY (decl) && ! TARGET_SDATA_NONE)
+ {
+ int size = int_size_in_bytes (TREE_TYPE (decl));
+
+ if (size > 0 && size <= g_switch_value)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Do anything needed before RTL is emitted for each function. */
+
+void
+m32r_init_expanders (void)
+{
+ /* ??? At one point there was code here. The function is left in
+ to make it easy to experiment. */
+}
+
+int
+call_operand (rtx op, enum machine_mode mode)
+{
+ if (!MEM_P (op))
+ return 0;
+ op = XEXP (op, 0);
+ return call_address_operand (op, mode);
+}
+
+/* Return 1 if OP is a reference to an object in .sdata/.sbss. */
+
+int
+small_data_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ if (! TARGET_SDATA_USE)
+ return 0;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ return SYMBOL_REF_SMALL_P (op);
+
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ && satisfies_constraint_J (XEXP (XEXP (op, 0), 1)))
+ return SYMBOL_REF_SMALL_P (XEXP (XEXP (op, 0), 0));
+
+ return 0;
+}
+
+/* Return 1 if OP is a symbol that can use 24-bit addressing. */
+
+int
+addr24_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ rtx sym;
+
+ if (flag_pic)
+ return 0;
+
+ if (GET_CODE (op) == LABEL_REF)
+ return TARGET_ADDR24;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ sym = op;
+ else if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ && satisfies_constraint_M (XEXP (XEXP (op, 0), 1)))
+ sym = XEXP (XEXP (op, 0), 0);
+ else
+ return 0;
+
+ if (SYMBOL_REF_MODEL (sym) == M32R_MODEL_SMALL)
+ return 1;
+
+ if (TARGET_ADDR24
+ && (CONSTANT_POOL_ADDRESS_P (sym)
+ || LIT_NAME_P (XSTR (sym, 0))))
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if OP is a symbol that needs 32-bit addressing. */
+
+int
+addr32_operand (rtx op, enum machine_mode mode)
+{
+ rtx sym;
+
+ if (GET_CODE (op) == LABEL_REF)
+ return TARGET_ADDR32;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ sym = op;
+ else if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ && CONST_INT_P (XEXP (XEXP (op, 0), 1))
+ && ! flag_pic)
+ sym = XEXP (XEXP (op, 0), 0);
+ else
+ return 0;
+
+ return (! addr24_operand (sym, mode)
+ && ! small_data_operand (sym, mode));
+}
+
+/* Return 1 if OP is a function that can be called with the `bl' insn. */
+
+int
+call26_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ if (flag_pic)
+ return 1;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ return SYMBOL_REF_MODEL (op) != M32R_MODEL_LARGE;
+
+ return TARGET_CALL26;
+}
+
+/* Return 1 if OP is a DImode const we want to handle inline.
+ This must match the code in the movdi pattern.
+ It is used by the 'G' CONST_DOUBLE_OK_FOR_LETTER. */
+
+int
+easy_di_const (rtx op)
+{
+ rtx high_rtx, low_rtx;
+ HOST_WIDE_INT high, low;
+
+ split_double (op, &high_rtx, &low_rtx);
+ high = INTVAL (high_rtx);
+ low = INTVAL (low_rtx);
+ /* Pick constants loadable with 2 16-bit `ldi' insns. */
+ if (high >= -128 && high <= 127
+ && low >= -128 && low <= 127)
+ return 1;
+ return 0;
+}
+
+/* Return 1 if OP is a DFmode const we want to handle inline.
+ This must match the code in the movdf pattern.
+ It is used by the 'H' CONST_DOUBLE_OK_FOR_LETTER. */
+
+int
+easy_df_const (rtx op)
+{
+ REAL_VALUE_TYPE r;
+ long l[2];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (r, l);
+ if (l[0] == 0 && l[1] == 0)
+ return 1;
+ if ((l[0] & 0xffff) == 0 && l[1] == 0)
+ return 1;
+ return 0;
+}
+
+/* Return 1 if OP is (mem (reg ...)).
+ This is used in insn length calcs. */
+
+int
+memreg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return MEM_P (op) && REG_P (XEXP (op, 0));
+}
+
+/* Return nonzero if TYPE must be passed by indirect reference. */
+
+static bool
+m32r_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
+ enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ int size;
+
+ if (type)
+ size = int_size_in_bytes (type);
+ else
+ size = GET_MODE_SIZE (mode);
+
+ return (size < 0 || size > 8);
+}
+
+/* Comparisons. */
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for compare [arg0 of the if_then_else].
+ If need_compare is true then the comparison insn must be generated, rather
+ than being subsumed into the following branch instruction. */
+
+rtx
+gen_compare (enum rtx_code code, rtx x, rtx y, int need_compare)
+{
+ enum rtx_code compare_code;
+ enum rtx_code branch_code;
+ rtx cc_reg = gen_rtx_REG (CCmode, CARRY_REGNUM);
+ int must_swap = 0;
+
+ switch (code)
+ {
+ case EQ: compare_code = EQ; branch_code = NE; break;
+ case NE: compare_code = EQ; branch_code = EQ; break;
+ case LT: compare_code = LT; branch_code = NE; break;
+ case LE: compare_code = LT; branch_code = EQ; must_swap = 1; break;
+ case GT: compare_code = LT; branch_code = NE; must_swap = 1; break;
+ case GE: compare_code = LT; branch_code = EQ; break;
+ case LTU: compare_code = LTU; branch_code = NE; break;
+ case LEU: compare_code = LTU; branch_code = EQ; must_swap = 1; break;
+ case GTU: compare_code = LTU; branch_code = NE; must_swap = 1; break;
+ case GEU: compare_code = LTU; branch_code = EQ; break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (need_compare)
+ {
+ switch (compare_code)
+ {
+ case EQ:
+ if (satisfies_constraint_P (y) /* Reg equal to small const. */
+ && y != const0_rtx)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_addsi3 (tmp, x, GEN_INT (-INTVAL (y))));
+ x = tmp;
+ y = const0_rtx;
+ }
+ else if (CONSTANT_P (y)) /* Reg equal to const. */
+ {
+ rtx tmp = force_reg (GET_MODE (x), y);
+ y = tmp;
+ }
+
+ if (register_operand (y, SImode) /* Reg equal to reg. */
+ || y == const0_rtx) /* Reg equal to zero. */
+ {
+ emit_insn (gen_cmp_eqsi_insn (x, y));
+
+ return gen_rtx_fmt_ee (code, CCmode, cc_reg, const0_rtx);
+ }
+ break;
+
+ case LT:
+ if (register_operand (y, SImode)
+ || satisfies_constraint_P (y))
+ {
+ rtx tmp = gen_reg_rtx (SImode); /* Reg compared to reg. */
+
+ switch (code)
+ {
+ case LT:
+ emit_insn (gen_cmp_ltsi_insn (x, y));
+ code = EQ;
+ break;
+ case LE:
+ if (y == const0_rtx)
+ tmp = const1_rtx;
+ else
+ emit_insn (gen_addsi3 (tmp, y, constm1_rtx));
+ emit_insn (gen_cmp_ltsi_insn (x, tmp));
+ code = EQ;
+ break;
+ case GT:
+ if (CONST_INT_P (y))
+ tmp = gen_rtx_PLUS (SImode, y, const1_rtx);
+ else
+ emit_insn (gen_addsi3 (tmp, y, constm1_rtx));
+ emit_insn (gen_cmp_ltsi_insn (x, tmp));
+ code = NE;
+ break;
+ case GE:
+ emit_insn (gen_cmp_ltsi_insn (x, y));
+ code = NE;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ return gen_rtx_fmt_ee (code, CCmode, cc_reg, const0_rtx);
+ }
+ break;
+
+ case LTU:
+ if (register_operand (y, SImode)
+ || satisfies_constraint_P (y))
+ {
+ rtx tmp = gen_reg_rtx (SImode); /* Reg (unsigned) compared to reg. */
+
+ switch (code)
+ {
+ case LTU:
+ emit_insn (gen_cmp_ltusi_insn (x, y));
+ code = EQ;
+ break;
+ case LEU:
+ if (y == const0_rtx)
+ tmp = const1_rtx;
+ else
+ emit_insn (gen_addsi3 (tmp, y, constm1_rtx));
+ emit_insn (gen_cmp_ltusi_insn (x, tmp));
+ code = EQ;
+ break;
+ case GTU:
+ if (CONST_INT_P (y))
+ tmp = gen_rtx_PLUS (SImode, y, const1_rtx);
+ else
+ emit_insn (gen_addsi3 (tmp, y, constm1_rtx));
+ emit_insn (gen_cmp_ltusi_insn (x, tmp));
+ code = NE;
+ break;
+ case GEU:
+ emit_insn (gen_cmp_ltusi_insn (x, y));
+ code = NE;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ return gen_rtx_fmt_ee (code, CCmode, cc_reg, const0_rtx);
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ /* Reg/reg equal comparison. */
+ if (compare_code == EQ
+ && register_operand (y, SImode))
+ return gen_rtx_fmt_ee (code, CCmode, x, y);
+
+ /* Reg/zero signed comparison. */
+ if ((compare_code == EQ || compare_code == LT)
+ && y == const0_rtx)
+ return gen_rtx_fmt_ee (code, CCmode, x, y);
+
+ /* Reg/smallconst equal comparison. */
+ if (compare_code == EQ
+ && satisfies_constraint_P (y))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_addsi3 (tmp, x, GEN_INT (-INTVAL (y))));
+ return gen_rtx_fmt_ee (code, CCmode, tmp, const0_rtx);
+ }
+
+ /* Reg/const equal comparison. */
+ if (compare_code == EQ
+ && CONSTANT_P (y))
+ {
+ rtx tmp = force_reg (GET_MODE (x), y);
+
+ return gen_rtx_fmt_ee (code, CCmode, x, tmp);
+ }
+ }
+
+ if (CONSTANT_P (y))
+ {
+ if (must_swap)
+ y = force_reg (GET_MODE (x), y);
+ else
+ {
+ int ok_const = reg_or_int16_operand (y, GET_MODE (y));
+
+ if (! ok_const)
+ y = force_reg (GET_MODE (x), y);
+ }
+ }
+
+ switch (compare_code)
+ {
+ case EQ :
+ emit_insn (gen_cmp_eqsi_insn (must_swap ? y : x, must_swap ? x : y));
+ break;
+ case LT :
+ emit_insn (gen_cmp_ltsi_insn (must_swap ? y : x, must_swap ? x : y));
+ break;
+ case LTU :
+ emit_insn (gen_cmp_ltusi_insn (must_swap ? y : x, must_swap ? x : y));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return gen_rtx_fmt_ee (branch_code, VOIDmode, cc_reg, CONST0_RTX (CCmode));
+}
+
+bool
+gen_cond_store (enum rtx_code code, rtx op0, rtx op1, rtx op2)
+{
+ enum machine_mode mode = GET_MODE (op0);
+
+ gcc_assert (mode == SImode);
+ switch (code)
+ {
+ case EQ:
+ if (!register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (TARGET_M32RX || TARGET_M32R2)
+ {
+ if (!reg_or_zero_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_seq_insn_m32rx (op0, op1, op2));
+ return true;
+ }
+ if (CONST_INT_P (op2) && INTVAL (op2) == 0)
+ {
+ emit_insn (gen_seq_zero_insn (op0, op1));
+ return true;
+ }
+
+ if (!reg_or_eq_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_seq_insn (op0, op1, op2));
+ return true;
+
+ case NE:
+ if (!CONST_INT_P (op2)
+ || (INTVAL (op2) != 0 && satisfies_constraint_K (op2)))
+ {
+ rtx reg;
+
+ if (reload_completed || reload_in_progress)
+ return false;
+
+ reg = gen_reg_rtx (SImode);
+ emit_insn (gen_xorsi3 (reg, op1, op2));
+ op1 = reg;
+
+ if (!register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ emit_insn (gen_sne_zero_insn (op0, op1));
+ return true;
+ }
+ return false;
+
+ case LT:
+ case GT:
+ if (code == GT)
+ {
+ rtx tmp = op2;
+ op2 = op1;
+ op1 = tmp;
+ code = LT;
+ }
+
+ if (!register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (!reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_slt_insn (op0, op1, op2));
+ return true;
+
+ case LTU:
+ case GTU:
+ if (code == GTU)
+ {
+ rtx tmp = op2;
+ op2 = op1;
+ op1 = tmp;
+ code = LTU;
+ }
+
+ if (!register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (!reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sltu_insn (op0, op1, op2));
+ return true;
+
+ case GE:
+ case GEU:
+ if (!register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (!reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ if (code == GE)
+ emit_insn (gen_sge_insn (op0, op1, op2));
+ else
+ emit_insn (gen_sgeu_insn (op0, op1, op2));
+ return true;
+
+ case LE:
+ case LEU:
+ if (!register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (CONST_INT_P (op2))
+ {
+ HOST_WIDE_INT value = INTVAL (op2);
+ if (value >= 2147483647)
+ {
+ emit_move_insn (op0, const1_rtx);
+ return true;
+ }
+
+ op2 = GEN_INT (value + 1);
+ if (value < -32768 || value >= 32767)
+ op2 = force_reg (mode, op2);
+
+ if (code == LEU)
+ emit_insn (gen_sltu_insn (op0, op1, op2));
+ else
+ emit_insn (gen_slt_insn (op0, op1, op2));
+ return true;
+ }
+
+ if (!register_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ if (code == LEU)
+ emit_insn (gen_sleu_insn (op0, op1, op2));
+ else
+ emit_insn (gen_sle_insn (op0, op1, op2));
+ return true;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+
+/* Split a 2 word move (DI or DF) into component parts. */
+
+rtx
+gen_split_move_double (rtx operands[])
+{
+ enum machine_mode mode = GET_MODE (operands[0]);
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ rtx val;
+
+ /* We might have (SUBREG (MEM)) here, so just get rid of the
+ subregs to make this code simpler. It is safe to call
+ alter_subreg any time after reload. */
+ if (GET_CODE (dest) == SUBREG)
+ alter_subreg (&dest, true);
+ if (GET_CODE (src) == SUBREG)
+ alter_subreg (&src, true);
+
+ start_sequence ();
+ if (REG_P (dest))
+ {
+ int dregno = REGNO (dest);
+
+ /* Reg = reg. */
+ if (REG_P (src))
+ {
+ int sregno = REGNO (src);
+
+ int reverse = (dregno == sregno + 1);
+
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register of
+ operand 1, we must copy in the opposite order. */
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, reverse, TRUE, mode),
+ operand_subword (src, reverse, TRUE, mode)));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, !reverse, TRUE, mode),
+ operand_subword (src, !reverse, TRUE, mode)));
+ }
+
+ /* Reg = constant. */
+ else if (CONST_INT_P (src) || GET_CODE (src) == CONST_DOUBLE)
+ {
+ rtx words[2];
+ split_double (src, &words[0], &words[1]);
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, 0, TRUE, mode),
+ words[0]));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, 1, TRUE, mode),
+ words[1]));
+ }
+
+ /* Reg = mem. */
+ else if (MEM_P (src))
+ {
+ /* If the high-address word is used in the address, we must load it
+ last. Otherwise, load it first. */
+ int reverse
+ = (refers_to_regno_p (dregno, dregno + 1, XEXP (src, 0), 0) != 0);
+
+ /* We used to optimize loads from single registers as
+
+ ld r1,r3+; ld r2,r3
+
+ if r3 were not used subsequently. However, the REG_NOTES aren't
+ propagated correctly by the reload phase, and it can cause bad
+ code to be generated. We could still try:
+
+ ld r1,r3+; ld r2,r3; addi r3,-4
+
+ which saves 2 bytes and doesn't force longword alignment. */
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, reverse, TRUE, mode),
+ adjust_address (src, SImode,
+ reverse * UNITS_PER_WORD)));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, !reverse, TRUE, mode),
+ adjust_address (src, SImode,
+ !reverse * UNITS_PER_WORD)));
+ }
+ else
+ gcc_unreachable ();
+ }
+
+ /* Mem = reg. */
+ /* We used to optimize loads from single registers as
+
+ st r1,r3; st r2,+r3
+
+ if r3 were not used subsequently. However, the REG_NOTES aren't
+ propagated correctly by the reload phase, and it can cause bad
+ code to be generated. We could still try:
+
+ st r1,r3; st r2,+r3; addi r3,-4
+
+ which saves 2 bytes and doesn't force longword alignment. */
+ else if (MEM_P (dest) && REG_P (src))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode,
+ adjust_address (dest, SImode, 0),
+ operand_subword (src, 0, TRUE, mode)));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ adjust_address (dest, SImode, UNITS_PER_WORD),
+ operand_subword (src, 1, TRUE, mode)));
+ }
+
+ else
+ gcc_unreachable ();
+
+ val = get_insns ();
+ end_sequence ();
+ return val;
+}
+
+
+static int
+m32r_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
+ tree type, bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+
+ int words;
+ unsigned int size =
+ (((mode == BLKmode && type)
+ ? (unsigned int) int_size_in_bytes (type)
+ : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
+ / UNITS_PER_WORD;
+
+ if (*cum >= M32R_MAX_PARM_REGS)
+ words = 0;
+ else if (*cum + size > M32R_MAX_PARM_REGS)
+ words = (*cum + size) - M32R_MAX_PARM_REGS;
+ else
+ words = 0;
+
+ return words * UNITS_PER_WORD;
+}
+
+/* The ROUND_ADVANCE* macros are local to this file. */
+/* Round SIZE up to a word boundary. */
+#define ROUND_ADVANCE(SIZE) \
+ (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Round arg MODE/TYPE up to the next word boundary. */
+#define ROUND_ADVANCE_ARG(MODE, TYPE) \
+ ((MODE) == BLKmode \
+ ? ROUND_ADVANCE ((unsigned int) int_size_in_bytes (TYPE)) \
+ : ROUND_ADVANCE ((unsigned int) GET_MODE_SIZE (MODE)))
+
+/* Round CUM up to the necessary point for argument MODE/TYPE. */
+#define ROUND_ADVANCE_CUM(CUM, MODE, TYPE) (CUM)
+
+/* Return boolean indicating arg of type TYPE and mode MODE will be passed in
+ a reg. This includes arguments that have to be passed by reference as the
+ pointer to them is passed in a reg if one is available (and that is what
+ we're given).
+ This macro is only used in this file. */
+#define PASS_IN_REG_P(CUM, MODE, TYPE) \
+ (ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE)) < M32R_MAX_PARM_REGS)
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+/* On the M32R the first M32R_MAX_PARM_REGS args are normally in registers
+ and the rest are pushed. */
+
+static rtx
+m32r_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+
+ return (PASS_IN_REG_P (*cum, mode, type)
+ ? gen_rtx_REG (mode, ROUND_ADVANCE_CUM (*cum, mode, type))
+ : NULL_RTX);
+}
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+static void
+m32r_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+
+ *cum = (ROUND_ADVANCE_CUM (*cum, mode, type)
+ + ROUND_ADVANCE_ARG (mode, type));
+}
+
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+
+static bool
+m32r_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ cumulative_args_t dummy = pack_cumulative_args (NULL);
+
+ return m32r_pass_by_reference (dummy, TYPE_MODE (type), type, false);
+}
+
+/* Worker function for TARGET_FUNCTION_VALUE. */
+
+static rtx
+m32r_function_value (const_tree valtype,
+ const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (TYPE_MODE (valtype), 0);
+}
+
+/* Worker function for TARGET_LIBCALL_VALUE. */
+
+static rtx
+m32r_libcall_value (enum machine_mode mode,
+ const_rtx fun ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (mode, 0);
+}
+
+/* Worker function for TARGET_FUNCTION_VALUE_REGNO_P.
+
+ ??? What about r1 in DI/DF values. */
+
+static bool
+m32r_function_value_regno_p (const unsigned int regno)
+{
+ return (regno == 0);
+}
+
+/* Do any needed setup for a variadic function. For the M32R, we must
+ create a register parameter block, and then copy any anonymous arguments
+ in registers to memory.
+
+ CUM has not been updated for the last named argument which has type TYPE
+ and mode MODE, and we rely on this fact. */
+
+static void
+m32r_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
+ tree type, int *pretend_size, int no_rtl)
+{
+ int first_anon_arg;
+
+ if (no_rtl)
+ return;
+
+ /* All BLKmode values are passed by reference. */
+ gcc_assert (mode != BLKmode);
+
+ first_anon_arg = (ROUND_ADVANCE_CUM (*get_cumulative_args (cum), mode, type)
+ + ROUND_ADVANCE_ARG (mode, type));
+
+ if (first_anon_arg < M32R_MAX_PARM_REGS)
+ {
+ /* Note that first_reg_offset < M32R_MAX_PARM_REGS. */
+ int first_reg_offset = first_anon_arg;
+ /* Size in words to "pretend" allocate. */
+ int size = M32R_MAX_PARM_REGS - first_reg_offset;
+ rtx regblock;
+
+ regblock = gen_frame_mem (BLKmode,
+ plus_constant (Pmode, arg_pointer_rtx,
+ FIRST_PARM_OFFSET (0)));
+ set_mem_alias_set (regblock, get_varargs_alias_set ());
+ move_block_from_reg (first_reg_offset, regblock, size);
+
+ *pretend_size = (size * UNITS_PER_WORD);
+ }
+}
+
+
+/* Return true if INSN is real instruction bearing insn. */
+
+static int
+m32r_is_insn (rtx insn)
+{
+ return (NONDEBUG_INSN_P (insn)
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER);
+}
+
+/* Increase the priority of long instructions so that the
+ short instructions are scheduled ahead of the long ones. */
+
+static int
+m32r_adjust_priority (rtx insn, int priority)
+{
+ if (m32r_is_insn (insn)
+ && get_attr_insn_size (insn) != INSN_SIZE_SHORT)
+ priority <<= 3;
+
+ return priority;
+}
+
+
+/* Indicate how many instructions can be issued at the same time.
+ This is sort of a lie. The m32r can issue only 1 long insn at
+ once, but it can issue 2 short insns. The default therefore is
+ set at 2, but this can be overridden by the command line option
+ -missue-rate=1. */
+
+static int
+m32r_issue_rate (void)
+{
+ return ((TARGET_LOW_ISSUE_RATE) ? 1 : 2);
+}
+
+/* Cost functions. */
+/* Memory is 3 times as expensive as registers.
+ ??? Is that the right way to look at it? */
+
+static int
+m32r_memory_move_cost (enum machine_mode mode,
+ reg_class_t rclass ATTRIBUTE_UNUSED,
+ bool in ATTRIBUTE_UNUSED)
+{
+ if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
+ return 6;
+ else
+ return 12;
+}
+
+static bool
+m32r_rtx_costs (rtx x, int code, int outer_code ATTRIBUTE_UNUSED,
+ int opno ATTRIBUTE_UNUSED, int *total,
+ bool speed ATTRIBUTE_UNUSED)
+{
+ switch (code)
+ {
+ /* Small integers are as cheap as registers. 4 byte values can be
+ fetched as immediate constants - let's give that the cost of an
+ extra insn. */
+ case CONST_INT:
+ if (INT16_P (INTVAL (x)))
+ {
+ *total = 0;
+ return true;
+ }
+ /* FALLTHRU */
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ *total = COSTS_N_INSNS (1);
+ return true;
+
+ case CONST_DOUBLE:
+ {
+ rtx high, low;
+
+ split_double (x, &high, &low);
+ *total = COSTS_N_INSNS (!INT16_P (INTVAL (high))
+ + !INT16_P (INTVAL (low)));
+ return true;
+ }
+
+ case MULT:
+ *total = COSTS_N_INSNS (3);
+ return true;
+
+ case DIV:
+ case UDIV:
+ case MOD:
+ case UMOD:
+ *total = COSTS_N_INSNS (10);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Type of function DECL.
+
+ The result is cached. To reset the cache at the end of a function,
+ call with DECL = NULL_TREE. */
+
+enum m32r_function_type
+m32r_compute_function_type (tree decl)
+{
+ /* Cached value. */
+ static enum m32r_function_type fn_type = M32R_FUNCTION_UNKNOWN;
+ /* Last function we were called for. */
+ static tree last_fn = NULL_TREE;
+
+ /* Resetting the cached value? */
+ if (decl == NULL_TREE)
+ {
+ fn_type = M32R_FUNCTION_UNKNOWN;
+ last_fn = NULL_TREE;
+ return fn_type;
+ }
+
+ if (decl == last_fn && fn_type != M32R_FUNCTION_UNKNOWN)
+ return fn_type;
+
+ /* Compute function type. */
+ fn_type = (lookup_attribute ("interrupt", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE
+ ? M32R_FUNCTION_INTERRUPT
+ : M32R_FUNCTION_NORMAL);
+
+ last_fn = decl;
+ return fn_type;
+}
+ /* Function prologue/epilogue handlers. */
+
+/* M32R stack frames look like:
+
+ Before call After call
+ +-----------------------+ +-----------------------+
+ | | | |
+ high | local variables, | | local variables, |
+ mem | reg save area, etc. | | reg save area, etc. |
+ | | | |
+ +-----------------------+ +-----------------------+
+ | | | |
+ | arguments on stack. | | arguments on stack. |
+ | | | |
+ SP+0->+-----------------------+ +-----------------------+
+ | reg parm save area, |
+ | only created for |
+ | variable argument |
+ | functions |
+ +-----------------------+
+ | previous frame ptr |
+ +-----------------------+
+ | |
+ | register save area |
+ | |
+ +-----------------------+
+ | return address |
+ +-----------------------+
+ | |
+ | local variables |
+ | |
+ +-----------------------+
+ | |
+ | alloca allocations |
+ | |
+ +-----------------------+
+ | |
+ low | arguments on stack |
+ memory | |
+ SP+0->+-----------------------+
+
+Notes:
+1) The "reg parm save area" does not exist for non variable argument fns.
+2) The "reg parm save area" can be eliminated completely if we saved regs
+ containing anonymous args separately but that complicates things too
+ much (so it's not done).
+3) The return address is saved after the register save area so as to have as
+ many insns as possible between the restoration of `lr' and the `jmp lr'. */
+
+/* Structure to be filled in by m32r_compute_frame_size with register
+ save masks, and offsets for the current function. */
+struct m32r_frame_info
+{
+ unsigned int total_size; /* # bytes that the entire frame takes up. */
+ unsigned int extra_size; /* # bytes of extra stuff. */
+ unsigned int pretend_size; /* # bytes we push and pretend caller did. */
+ unsigned int args_size; /* # bytes that outgoing arguments take up. */
+ unsigned int reg_size; /* # bytes needed to store regs. */
+ unsigned int var_size; /* # bytes that variables take up. */
+ unsigned int gmask; /* Mask of saved gp registers. */
+ unsigned int save_fp; /* Nonzero if fp must be saved. */
+ unsigned int save_lr; /* Nonzero if lr (return addr) must be saved. */
+ int initialized; /* Nonzero if frame size already calculated. */
+};
+
+/* Current frame information calculated by m32r_compute_frame_size. */
+static struct m32r_frame_info current_frame_info;
+
+/* Zero structure to initialize current_frame_info. */
+static struct m32r_frame_info zero_frame_info;
+
+#define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
+#define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
+
+/* Tell prologue and epilogue if register REGNO should be saved / restored.
+ The return address and frame pointer are treated separately.
+ Don't consider them here. */
+#define MUST_SAVE_REGISTER(regno, interrupt_p) \
+ ((regno) != RETURN_ADDR_REGNUM && (regno) != FRAME_POINTER_REGNUM \
+ && (df_regs_ever_live_p (regno) && (!call_really_used_regs[regno] || interrupt_p)))
+
+#define MUST_SAVE_FRAME_POINTER (df_regs_ever_live_p (FRAME_POINTER_REGNUM))
+#define MUST_SAVE_RETURN_ADDR (df_regs_ever_live_p (RETURN_ADDR_REGNUM) || crtl->profile)
+
+#define SHORT_INSN_SIZE 2 /* Size of small instructions. */
+#define LONG_INSN_SIZE 4 /* Size of long instructions. */
+
+/* Return the bytes needed to compute the frame pointer from the current
+ stack pointer.
+
+ SIZE is the size needed for local variables. */
+
+unsigned int
+m32r_compute_frame_size (int size) /* # of var. bytes allocated. */
+{
+ unsigned int regno;
+ unsigned int total_size, var_size, args_size, pretend_size, extra_size;
+ unsigned int reg_size;
+ unsigned int gmask;
+ enum m32r_function_type fn_type;
+ int interrupt_p;
+ int pic_reg_used = flag_pic && (crtl->uses_pic_offset_table
+ | crtl->profile);
+
+ var_size = M32R_STACK_ALIGN (size);
+ args_size = M32R_STACK_ALIGN (crtl->outgoing_args_size);
+ pretend_size = crtl->args.pretend_args_size;
+ extra_size = FIRST_PARM_OFFSET (0);
+ total_size = extra_size + pretend_size + args_size + var_size;
+ reg_size = 0;
+ gmask = 0;
+
+ /* See if this is an interrupt handler. Call used registers must be saved
+ for them too. */
+ fn_type = m32r_compute_function_type (current_function_decl);
+ interrupt_p = M32R_INTERRUPT_P (fn_type);
+
+ /* Calculate space needed for registers. */
+ for (regno = 0; regno < M32R_MAX_INT_REGS; regno++)
+ {
+ if (MUST_SAVE_REGISTER (regno, interrupt_p)
+ || (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
+ {
+ reg_size += UNITS_PER_WORD;
+ gmask |= 1 << regno;
+ }
+ }
+
+ current_frame_info.save_fp = MUST_SAVE_FRAME_POINTER;
+ current_frame_info.save_lr = MUST_SAVE_RETURN_ADDR || pic_reg_used;
+
+ reg_size += ((current_frame_info.save_fp + current_frame_info.save_lr)
+ * UNITS_PER_WORD);
+ total_size += reg_size;
+
+ /* ??? Not sure this is necessary, and I don't think the epilogue
+ handler will do the right thing if this changes total_size. */
+ total_size = M32R_STACK_ALIGN (total_size);
+
+ /* frame_size = total_size - (pretend_size + reg_size); */
+
+ /* Save computed information. */
+ current_frame_info.total_size = total_size;
+ current_frame_info.extra_size = extra_size;
+ current_frame_info.pretend_size = pretend_size;
+ current_frame_info.var_size = var_size;
+ current_frame_info.args_size = args_size;
+ current_frame_info.reg_size = reg_size;
+ current_frame_info.gmask = gmask;
+ current_frame_info.initialized = reload_completed;
+
+ /* Ok, we're done. */
+ return total_size;
+}
+
+/* Worker function for TARGET_CAN_ELIMINATE. */
+
+bool
+m32r_can_eliminate (const int from, const int to)
+{
+ return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
+ ? ! frame_pointer_needed
+ : true);
+}
+
+
+/* The table we use to reference PIC data. */
+static rtx global_offset_table;
+
+static void
+m32r_reload_lr (rtx sp, int size)
+{
+ rtx lr = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
+
+ if (size == 0)
+ emit_insn (gen_movsi (lr, gen_frame_mem (Pmode, sp)));
+ else if (size < 32768)
+ emit_insn (gen_movsi (lr, gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, sp,
+ GEN_INT (size)))));
+ else
+ {
+ rtx tmp = gen_rtx_REG (Pmode, PROLOGUE_TMP_REGNUM);
+
+ emit_insn (gen_movsi (tmp, GEN_INT (size)));
+ emit_insn (gen_addsi3 (tmp, tmp, sp));
+ emit_insn (gen_movsi (lr, gen_frame_mem (Pmode, tmp)));
+ }
+
+ emit_use (lr);
+}
+
+void
+m32r_load_pic_register (void)
+{
+ global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
+ emit_insn (gen_get_pc (pic_offset_table_rtx, global_offset_table,
+ GEN_INT (TARGET_MODEL_SMALL)));
+
+ /* Need to emit this whether or not we obey regdecls,
+ since setjmp/longjmp can cause life info to screw up. */
+ emit_use (pic_offset_table_rtx);
+}
+
+/* Expand the m32r prologue as a series of insns. */
+
+void
+m32r_expand_prologue (void)
+{
+ int regno;
+ int frame_size;
+ unsigned int gmask;
+ int pic_reg_used = flag_pic && (crtl->uses_pic_offset_table
+ | crtl->profile);
+
+ if (! current_frame_info.initialized)
+ m32r_compute_frame_size (get_frame_size ());
+
+ gmask = current_frame_info.gmask;
+
+ /* These cases shouldn't happen. Catch them now. */
+ gcc_assert (current_frame_info.total_size || !gmask);
+
+ /* Allocate space for register arguments if this is a variadic function. */
+ if (current_frame_info.pretend_size != 0)
+ {
+ /* Use a HOST_WIDE_INT temporary, since negating an unsigned int gives
+ the wrong result on a 64-bit host. */
+ HOST_WIDE_INT pretend_size = current_frame_info.pretend_size;
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-pretend_size)));
+ }
+
+ /* Save any registers we need to and set up fp. */
+ if (current_frame_info.save_fp)
+ emit_insn (gen_movsi_push (stack_pointer_rtx, frame_pointer_rtx));
+
+ gmask &= ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK);
+
+ /* Save any needed call-saved regs (and call-used if this is an
+ interrupt handler). */
+ for (regno = 0; regno <= M32R_MAX_INT_REGS; ++regno)
+ {
+ if ((gmask & (1 << regno)) != 0)
+ emit_insn (gen_movsi_push (stack_pointer_rtx,
+ gen_rtx_REG (Pmode, regno)));
+ }
+
+ if (current_frame_info.save_lr)
+ emit_insn (gen_movsi_push (stack_pointer_rtx,
+ gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM)));
+
+ /* Allocate the stack frame. */
+ frame_size = (current_frame_info.total_size
+ - (current_frame_info.pretend_size
+ + current_frame_info.reg_size));
+
+ if (frame_size == 0)
+ ; /* Nothing to do. */
+ else if (frame_size <= 32768)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-frame_size)));
+ else
+ {
+ rtx tmp = gen_rtx_REG (Pmode, PROLOGUE_TMP_REGNUM);
+
+ emit_insn (gen_movsi (tmp, GEN_INT (frame_size)));
+ emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
+ }
+
+ if (frame_pointer_needed)
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+
+ if (crtl->profile)
+ /* Push lr for mcount (form_pc, x). */
+ emit_insn (gen_movsi_push (stack_pointer_rtx,
+ gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM)));
+
+ if (pic_reg_used)
+ {
+ m32r_load_pic_register ();
+ m32r_reload_lr (stack_pointer_rtx,
+ (crtl->profile ? 0 : frame_size));
+ }
+
+ if (crtl->profile && !pic_reg_used)
+ emit_insn (gen_blockage ());
+}
+
+
+/* Set up the stack and frame pointer (if desired) for the function.
+ Note, if this is changed, you need to mirror the changes in
+ m32r_compute_frame_size which calculates the prolog size. */
+
+static void
+m32r_output_function_prologue (FILE * file, HOST_WIDE_INT size)
+{
+ enum m32r_function_type fn_type = m32r_compute_function_type (current_function_decl);
+
+ /* If this is an interrupt handler, mark it as such. */
+ if (M32R_INTERRUPT_P (fn_type))
+ fprintf (file, "\t%s interrupt handler\n", ASM_COMMENT_START);
+
+ if (! current_frame_info.initialized)
+ m32r_compute_frame_size (size);
+
+ /* This is only for the human reader. */
+ fprintf (file,
+ "\t%s PROLOGUE, vars= %d, regs= %d, args= %d, extra= %d\n",
+ ASM_COMMENT_START,
+ current_frame_info.var_size,
+ current_frame_info.reg_size / 4,
+ current_frame_info.args_size,
+ current_frame_info.extra_size);
+}
+
+/* Output RTL to pop register REGNO from the stack. */
+
+static void
+pop (int regno)
+{
+ rtx x;
+
+ x = emit_insn (gen_movsi_pop (gen_rtx_REG (Pmode, regno),
+ stack_pointer_rtx));
+ add_reg_note (x, REG_INC, stack_pointer_rtx);
+}
+
+/* Expand the m32r epilogue as a series of insns. */
+
+void
+m32r_expand_epilogue (void)
+{
+ int regno;
+ int noepilogue = FALSE;
+ int total_size;
+
+ gcc_assert (current_frame_info.initialized);
+ total_size = current_frame_info.total_size;
+
+ if (total_size == 0)
+ {
+ rtx insn = get_last_insn ();
+
+ /* If the last insn was a BARRIER, we don't have to write any code
+ because a jump (aka return) was put there. */
+ if (insn && NOTE_P (insn))
+ insn = prev_nonnote_insn (insn);
+ if (insn && BARRIER_P (insn))
+ noepilogue = TRUE;
+ }
+
+ if (!noepilogue)
+ {
+ unsigned int var_size = current_frame_info.var_size;
+ unsigned int args_size = current_frame_info.args_size;
+ unsigned int gmask = current_frame_info.gmask;
+ int can_trust_sp_p = !cfun->calls_alloca;
+
+ if (flag_exceptions)
+ emit_insn (gen_blockage ());
+
+ /* The first thing to do is point the sp at the bottom of the register
+ save area. */
+ if (can_trust_sp_p)
+ {
+ unsigned int reg_offset = var_size + args_size;
+
+ if (reg_offset == 0)
+ ; /* Nothing to do. */
+ else if (reg_offset < 32768)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (reg_offset)));
+ else
+ {
+ rtx tmp = gen_rtx_REG (Pmode, PROLOGUE_TMP_REGNUM);
+
+ emit_insn (gen_movsi (tmp, GEN_INT (reg_offset)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ tmp));
+ }
+ }
+ else if (frame_pointer_needed)
+ {
+ unsigned int reg_offset = var_size + args_size;
+
+ if (reg_offset == 0)
+ emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
+ else if (reg_offset < 32768)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, frame_pointer_rtx,
+ GEN_INT (reg_offset)));
+ else
+ {
+ rtx tmp = gen_rtx_REG (Pmode, PROLOGUE_TMP_REGNUM);
+
+ emit_insn (gen_movsi (tmp, GEN_INT (reg_offset)));
+ emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ tmp));
+ }
+ }
+ else
+ gcc_unreachable ();
+
+ if (current_frame_info.save_lr)
+ pop (RETURN_ADDR_REGNUM);
+
+ /* Restore any saved registers, in reverse order of course. */
+ gmask &= ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK);
+ for (regno = M32R_MAX_INT_REGS - 1; regno >= 0; --regno)
+ {
+ if ((gmask & (1L << regno)) != 0)
+ pop (regno);
+ }
+
+ if (current_frame_info.save_fp)
+ pop (FRAME_POINTER_REGNUM);
+
+ /* Remove varargs area if present. */
+ if (current_frame_info.pretend_size != 0)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (current_frame_info.pretend_size)));
+
+ emit_insn (gen_blockage ());
+ }
+}
+
+/* Do any necessary cleanup after a function to restore stack, frame,
+ and regs. */
+
+static void
+m32r_output_function_epilogue (FILE * file ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+{
+ /* Reset state info for each function. */
+ current_frame_info = zero_frame_info;
+ m32r_compute_function_type (NULL_TREE);
+}
+
+/* Return nonzero if this function is known to have a null or 1 instruction
+ epilogue. */
+
+int
+direct_return (void)
+{
+ if (!reload_completed)
+ return FALSE;
+
+ if (M32R_INTERRUPT_P (m32r_compute_function_type (current_function_decl)))
+ return FALSE;
+
+ if (! current_frame_info.initialized)
+ m32r_compute_frame_size (get_frame_size ());
+
+ return current_frame_info.total_size == 0;
+}
+
+
+/* PIC. */
+
+int
+m32r_legitimate_pic_operand_p (rtx x)
+{
+ if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
+ return 0;
+
+ if (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF)
+ && (CONST_INT_P (XEXP (XEXP (x, 0), 1))))
+ return 0;
+
+ return 1;
+}
+
+rtx
+m32r_legitimize_pic_address (rtx orig, rtx reg)
+{
+#ifdef DEBUG_PIC
+ printf("m32r_legitimize_pic_address()\n");
+#endif
+
+ if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
+ {
+ rtx pic_ref, address;
+ int subregs = 0;
+
+ if (reg == 0)
+ {
+ gcc_assert (!reload_in_progress && !reload_completed);
+ reg = gen_reg_rtx (Pmode);
+
+ subregs = 1;
+ }
+
+ if (subregs)
+ address = gen_reg_rtx (Pmode);
+ else
+ address = reg;
+
+ crtl->uses_pic_offset_table = 1;
+
+ if (GET_CODE (orig) == LABEL_REF
+ || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
+ {
+ emit_insn (gen_gotoff_load_addr (reg, orig));
+ emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
+ return reg;
+ }
+
+ emit_insn (gen_pic_load_addr (address, orig));
+
+ emit_insn (gen_addsi3 (address, address, pic_offset_table_rtx));
+ pic_ref = gen_const_mem (Pmode, address);
+ emit_move_insn (reg, pic_ref);
+ return reg;
+ }
+ else if (GET_CODE (orig) == CONST)
+ {
+ rtx base, offset;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 1) == pic_offset_table_rtx)
+ return orig;
+
+ if (reg == 0)
+ {
+ gcc_assert (!reload_in_progress && !reload_completed);
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
+ {
+ base = m32r_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
+ if (base == reg)
+ offset = m32r_legitimize_pic_address (XEXP (XEXP (orig, 0), 1), NULL_RTX);
+ else
+ offset = m32r_legitimize_pic_address (XEXP (XEXP (orig, 0), 1), reg);
+ }
+ else
+ return orig;
+
+ if (CONST_INT_P (offset))
+ {
+ if (INT16_P (INTVAL (offset)))
+ return plus_constant (Pmode, base, INTVAL (offset));
+ else
+ {
+ gcc_assert (! reload_in_progress && ! reload_completed);
+ offset = force_reg (Pmode, offset);
+ }
+ }
+
+ return gen_rtx_PLUS (Pmode, base, offset);
+ }
+
+ return orig;
+}
+
+static rtx
+m32r_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ if (flag_pic)
+ return m32r_legitimize_pic_address (x, NULL_RTX);
+ else
+ return x;
+}
+
+/* Worker function for TARGET_MODE_DEPENDENT_ADDRESS_P. */
+
+static bool
+m32r_mode_dependent_address_p (const_rtx addr, addr_space_t as ATTRIBUTE_UNUSED)
+{
+ if (GET_CODE (addr) == LO_SUM)
+ return true;
+
+ return false;
+}
+
+/* Nested function support. */
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+void
+m32r_initialize_trampoline (rtx tramp ATTRIBUTE_UNUSED,
+ rtx fnaddr ATTRIBUTE_UNUSED,
+ rtx cxt ATTRIBUTE_UNUSED)
+{
+}
+
+static void
+m32r_file_start (void)
+{
+ default_file_start ();
+
+ if (flag_verbose_asm)
+ fprintf (asm_out_file,
+ "%s M32R/D special options: -G %d\n",
+ ASM_COMMENT_START, g_switch_value);
+
+ if (TARGET_LITTLE_ENDIAN)
+ fprintf (asm_out_file, "\t.little\n");
+}
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+static void
+m32r_print_operand (FILE * file, rtx x, int code)
+{
+ rtx addr;
+
+ switch (code)
+ {
+ /* The 's' and 'p' codes are used by output_block_move() to
+ indicate post-increment 's'tores and 'p're-increment loads. */
+ case 's':
+ if (REG_P (x))
+ fprintf (file, "@+%s", reg_names [REGNO (x)]);
+ else
+ output_operand_lossage ("invalid operand to %%s code");
+ return;
+
+ case 'p':
+ if (REG_P (x))
+ fprintf (file, "@%s+", reg_names [REGNO (x)]);
+ else
+ output_operand_lossage ("invalid operand to %%p code");
+ return;
+
+ case 'R' :
+ /* Write second word of DImode or DFmode reference,
+ register or memory. */
+ if (REG_P (x))
+ fputs (reg_names[REGNO (x)+1], file);
+ else if (MEM_P (x))
+ {
+ fprintf (file, "@(");
+ /* Handle possible auto-increment. Since it is pre-increment and
+ we have already done it, we can just use an offset of four. */
+ /* ??? This is taken from rs6000.c I think. I don't think it is
+ currently necessary, but keep it around. */
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 4));
+ else
+ output_address (plus_constant (Pmode, XEXP (x, 0), 4));
+ fputc (')', file);
+ }
+ else
+ output_operand_lossage ("invalid operand to %%R code");
+ return;
+
+ case 'H' : /* High word. */
+ case 'L' : /* Low word. */
+ if (REG_P (x))
+ {
+ /* L = least significant word, H = most significant word. */
+ if ((WORDS_BIG_ENDIAN != 0) ^ (code == 'L'))
+ fputs (reg_names[REGNO (x)], file);
+ else
+ fputs (reg_names[REGNO (x)+1], file);
+ }
+ else if (CONST_INT_P (x)
+ || GET_CODE (x) == CONST_DOUBLE)
+ {
+ rtx first, second;
+
+ split_double (x, &first, &second);
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ code == 'L' ? INTVAL (first) : INTVAL (second));
+ }
+ else
+ output_operand_lossage ("invalid operand to %%H/%%L code");
+ return;
+
+ case 'A' :
+ {
+ char str[30];
+
+ if (GET_CODE (x) != CONST_DOUBLE
+ || GET_MODE_CLASS (GET_MODE (x)) != MODE_FLOAT)
+ fatal_insn ("bad insn for 'A'", x);
+
+ real_to_decimal (str, CONST_DOUBLE_REAL_VALUE (x), sizeof (str), 0, 1);
+ fprintf (file, "%s", str);
+ return;
+ }
+
+ case 'B' : /* Bottom half. */
+ case 'T' : /* Top half. */
+ /* Output the argument to a `seth' insn (sets the Top half-word).
+ For constants output arguments to a seth/or3 pair to set Top and
+ Bottom halves. For symbols output arguments to a seth/add3 pair to
+ set Top and Bottom halves. The difference exists because for
+ constants seth/or3 is more readable but for symbols we need to use
+ the same scheme as `ld' and `st' insns (16-bit addend is signed). */
+ switch (GET_CODE (x))
+ {
+ case CONST_INT :
+ case CONST_DOUBLE :
+ {
+ rtx first, second;
+
+ split_double (x, &first, &second);
+ x = WORDS_BIG_ENDIAN ? second : first;
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ (code == 'B'
+ ? INTVAL (x) & 0xffff
+ : (INTVAL (x) >> 16) & 0xffff));
+ }
+ return;
+ case CONST :
+ case SYMBOL_REF :
+ if (code == 'B'
+ && small_data_operand (x, VOIDmode))
+ {
+ fputs ("sda(", file);
+ output_addr_const (file, x);
+ fputc (')', file);
+ return;
+ }
+ /* fall through */
+ case LABEL_REF :
+ fputs (code == 'T' ? "shigh(" : "low(", file);
+ output_addr_const (file, x);
+ fputc (')', file);
+ return;
+ default :
+ output_operand_lossage ("invalid operand to %%T/%%B code");
+ return;
+ }
+ break;
+
+ case 'U' :
+ /* ??? wip */
+ /* Output a load/store with update indicator if appropriate. */
+ if (MEM_P (x))
+ {
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ fputs (".a", file);
+ }
+ else
+ output_operand_lossage ("invalid operand to %%U code");
+ return;
+
+ case 'N' :
+ /* Print a constant value negated. */
+ if (CONST_INT_P (x))
+ output_addr_const (file, GEN_INT (- INTVAL (x)));
+ else
+ output_operand_lossage ("invalid operand to %%N code");
+ return;
+
+ case 'X' :
+ /* Print a const_int in hex. Used in comments. */
+ if (CONST_INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
+ return;
+
+ case '#' :
+ fputs (IMMEDIATE_PREFIX, file);
+ return;
+
+ case 0 :
+ /* Do nothing special. */
+ break;
+
+ default :
+ /* Unknown flag. */
+ output_operand_lossage ("invalid operand output code");
+ }
+
+ switch (GET_CODE (x))
+ {
+ case REG :
+ fputs (reg_names[REGNO (x)], file);
+ break;
+
+ case MEM :
+ addr = XEXP (x, 0);
+ if (GET_CODE (addr) == PRE_INC)
+ {
+ if (!REG_P (XEXP (addr, 0)))
+ fatal_insn ("pre-increment address is not a register", x);
+
+ fprintf (file, "@+%s", reg_names[REGNO (XEXP (addr, 0))]);
+ }
+ else if (GET_CODE (addr) == PRE_DEC)
+ {
+ if (!REG_P (XEXP (addr, 0)))
+ fatal_insn ("pre-decrement address is not a register", x);
+
+ fprintf (file, "@-%s", reg_names[REGNO (XEXP (addr, 0))]);
+ }
+ else if (GET_CODE (addr) == POST_INC)
+ {
+ if (!REG_P (XEXP (addr, 0)))
+ fatal_insn ("post-increment address is not a register", x);
+
+ fprintf (file, "@%s+", reg_names[REGNO (XEXP (addr, 0))]);
+ }
+ else
+ {
+ fputs ("@(", file);
+ output_address (XEXP (x, 0));
+ fputc (')', file);
+ }
+ break;
+
+ case CONST_DOUBLE :
+ /* We handle SFmode constants here as output_addr_const doesn't. */
+ if (GET_MODE (x) == SFmode)
+ {
+ REAL_VALUE_TYPE d;
+ long l;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ REAL_VALUE_TO_TARGET_SINGLE (d, l);
+ fprintf (file, "0x%08lx", l);
+ break;
+ }
+
+ /* Fall through. Let output_addr_const deal with it. */
+
+ default :
+ output_addr_const (file, x);
+ break;
+ }
+}
+
+/* Print a memory address as an operand to reference that memory location. */
+
+static void
+m32r_print_operand_address (FILE * file, rtx addr)
+{
+ rtx base;
+ rtx index = 0;
+ int offset = 0;
+
+ switch (GET_CODE (addr))
+ {
+ case REG :
+ fputs (reg_names[REGNO (addr)], file);
+ break;
+
+ case PLUS :
+ if (CONST_INT_P (XEXP (addr, 0)))
+ offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
+ else if (CONST_INT_P (XEXP (addr, 1)))
+ offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
+ else
+ base = XEXP (addr, 0), index = XEXP (addr, 1);
+ if (REG_P (base))
+ {
+ /* Print the offset first (if present) to conform to the manual. */
+ if (index == 0)
+ {
+ if (offset != 0)
+ fprintf (file, "%d,", offset);
+ fputs (reg_names[REGNO (base)], file);
+ }
+ /* The chip doesn't support this, but left in for generality. */
+ else if (REG_P (index))
+ fprintf (file, "%s,%s",
+ reg_names[REGNO (base)], reg_names[REGNO (index)]);
+ /* Not sure this can happen, but leave in for now. */
+ else if (GET_CODE (index) == SYMBOL_REF)
+ {
+ output_addr_const (file, index);
+ fputc (',', file);
+ fputs (reg_names[REGNO (base)], file);
+ }
+ else
+ fatal_insn ("bad address", addr);
+ }
+ else if (GET_CODE (base) == LO_SUM)
+ {
+ gcc_assert (!index && REG_P (XEXP (base, 0)));
+ if (small_data_operand (XEXP (base, 1), VOIDmode))
+ fputs ("sda(", file);
+ else
+ fputs ("low(", file);
+ output_addr_const (file, plus_constant (Pmode, XEXP (base, 1),
+ offset));
+ fputs ("),", file);
+ fputs (reg_names[REGNO (XEXP (base, 0))], file);
+ }
+ else
+ fatal_insn ("bad address", addr);
+ break;
+
+ case LO_SUM :
+ if (!REG_P (XEXP (addr, 0)))
+ fatal_insn ("lo_sum not of register", addr);
+ if (small_data_operand (XEXP (addr, 1), VOIDmode))
+ fputs ("sda(", file);
+ else
+ fputs ("low(", file);
+ output_addr_const (file, XEXP (addr, 1));
+ fputs ("),", file);
+ fputs (reg_names[REGNO (XEXP (addr, 0))], file);
+ break;
+
+ case PRE_INC : /* Assume SImode. */
+ fprintf (file, "+%s", reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ case PRE_DEC : /* Assume SImode. */
+ fprintf (file, "-%s", reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ case POST_INC : /* Assume SImode. */
+ fprintf (file, "%s+", reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ default :
+ output_addr_const (file, addr);
+ break;
+ }
+}
+
+static bool
+m32r_print_operand_punct_valid_p (unsigned char code)
+{
+ return m32r_punct_chars[code];
+}
+
+/* Return true if the operands are the constants 0 and 1. */
+
+int
+zero_and_one (rtx operand1, rtx operand2)
+{
+ return
+ CONST_INT_P (operand1)
+ && CONST_INT_P (operand2)
+ && ( ((INTVAL (operand1) == 0) && (INTVAL (operand2) == 1))
+ ||((INTVAL (operand1) == 1) && (INTVAL (operand2) == 0)));
+}
+
+/* Generate the correct assembler code to handle the conditional loading of a
+ value into a register. It is known that the operands satisfy the
+ conditional_move_operand() function above. The destination is operand[0].
+ The condition is operand [1]. The 'true' value is operand [2] and the
+ 'false' value is operand [3]. */
+
+char *
+emit_cond_move (rtx * operands, rtx insn ATTRIBUTE_UNUSED)
+{
+ static char buffer [100];
+ const char * dest = reg_names [REGNO (operands [0])];
+
+ buffer [0] = 0;
+
+ /* Destination must be a register. */
+ gcc_assert (REG_P (operands [0]));
+ gcc_assert (conditional_move_operand (operands [2], SImode));
+ gcc_assert (conditional_move_operand (operands [3], SImode));
+
+ /* Check to see if the test is reversed. */
+ if (GET_CODE (operands [1]) == NE)
+ {
+ rtx tmp = operands [2];
+ operands [2] = operands [3];
+ operands [3] = tmp;
+ }
+
+ sprintf (buffer, "mvfc %s, cbr", dest);
+
+ /* If the true value was '0' then we need to invert the results of the move. */
+ if (INTVAL (operands [2]) == 0)
+ sprintf (buffer + strlen (buffer), "\n\txor3 %s, %s, #1",
+ dest, dest);
+
+ return buffer;
+}
+
+/* Returns true if the registers contained in the two
+ rtl expressions are different. */
+
+int
+m32r_not_same_reg (rtx a, rtx b)
+{
+ int reg_a = -1;
+ int reg_b = -2;
+
+ while (GET_CODE (a) == SUBREG)
+ a = SUBREG_REG (a);
+
+ if (REG_P (a))
+ reg_a = REGNO (a);
+
+ while (GET_CODE (b) == SUBREG)
+ b = SUBREG_REG (b);
+
+ if (REG_P (b))
+ reg_b = REGNO (b);
+
+ return reg_a != reg_b;
+}
+
+
+rtx
+m32r_function_symbol (const char *name)
+{
+ int extra_flags = 0;
+ enum m32r_model model;
+ rtx sym = gen_rtx_SYMBOL_REF (Pmode, name);
+
+ if (TARGET_MODEL_SMALL)
+ model = M32R_MODEL_SMALL;
+ else if (TARGET_MODEL_MEDIUM)
+ model = M32R_MODEL_MEDIUM;
+ else if (TARGET_MODEL_LARGE)
+ model = M32R_MODEL_LARGE;
+ else
+ gcc_unreachable (); /* Shouldn't happen. */
+ extra_flags |= model << SYMBOL_FLAG_MODEL_SHIFT;
+
+ if (extra_flags)
+ SYMBOL_REF_FLAGS (sym) |= extra_flags;
+
+ return sym;
+}
+
+/* Use a library function to move some bytes. */
+
+static void
+block_move_call (rtx dest_reg, rtx src_reg, rtx bytes_rtx)
+{
+ /* We want to pass the size as Pmode, which will normally be SImode
+ but will be DImode if we are using 64-bit longs and pointers. */
+ if (GET_MODE (bytes_rtx) != VOIDmode
+ && GET_MODE (bytes_rtx) != Pmode)
+ bytes_rtx = convert_to_mode (Pmode, bytes_rtx, 1);
+
+ emit_library_call (m32r_function_symbol ("memcpy"), LCT_NORMAL,
+ VOIDmode, 3, dest_reg, Pmode, src_reg, Pmode,
+ convert_to_mode (TYPE_MODE (sizetype), bytes_rtx,
+ TYPE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+}
+
+/* Expand string/block move operations.
+
+ operands[0] is the pointer to the destination.
+ operands[1] is the pointer to the source.
+ operands[2] is the number of bytes to move.
+ operands[3] is the alignment.
+
+ Returns 1 upon success, 0 otherwise. */
+
+int
+m32r_expand_block_move (rtx operands[])
+{
+ rtx orig_dst = operands[0];
+ rtx orig_src = operands[1];
+ rtx bytes_rtx = operands[2];
+ rtx align_rtx = operands[3];
+ int constp = CONST_INT_P (bytes_rtx);
+ HOST_WIDE_INT bytes = constp ? INTVAL (bytes_rtx) : 0;
+ int align = INTVAL (align_rtx);
+ int leftover;
+ rtx src_reg;
+ rtx dst_reg;
+
+ if (constp && bytes <= 0)
+ return 1;
+
+ /* Move the address into scratch registers. */
+ dst_reg = copy_addr_to_reg (XEXP (orig_dst, 0));
+ src_reg = copy_addr_to_reg (XEXP (orig_src, 0));
+
+ if (align > UNITS_PER_WORD)
+ align = UNITS_PER_WORD;
+
+ /* If we prefer size over speed, always use a function call.
+ If we do not know the size, use a function call.
+ If the blocks are not word aligned, use a function call. */
+ if (optimize_size || ! constp || align != UNITS_PER_WORD)
+ {
+ block_move_call (dst_reg, src_reg, bytes_rtx);
+ return 0;
+ }
+
+ leftover = bytes % MAX_MOVE_BYTES;
+ bytes -= leftover;
+
+ /* If necessary, generate a loop to handle the bulk of the copy. */
+ if (bytes)
+ {
+ rtx label = NULL_RTX;
+ rtx final_src = NULL_RTX;
+ rtx at_a_time = GEN_INT (MAX_MOVE_BYTES);
+ rtx rounded_total = GEN_INT (bytes);
+ rtx new_dst_reg = gen_reg_rtx (SImode);
+ rtx new_src_reg = gen_reg_rtx (SImode);
+
+ /* If we are going to have to perform this loop more than
+ once, then generate a label and compute the address the
+ source register will contain upon completion of the final
+ iteration. */
+ if (bytes > MAX_MOVE_BYTES)
+ {
+ final_src = gen_reg_rtx (Pmode);
+
+ if (INT16_P(bytes))
+ emit_insn (gen_addsi3 (final_src, src_reg, rounded_total));
+ else
+ {
+ emit_insn (gen_movsi (final_src, rounded_total));
+ emit_insn (gen_addsi3 (final_src, final_src, src_reg));
+ }
+
+ label = gen_label_rtx ();
+ emit_label (label);
+ }
+
+ /* It is known that output_block_move() will update src_reg to point
+ to the word after the end of the source block, and dst_reg to point
+ to the last word of the destination block, provided that the block
+ is MAX_MOVE_BYTES long. */
+ emit_insn (gen_movmemsi_internal (dst_reg, src_reg, at_a_time,
+ new_dst_reg, new_src_reg));
+ emit_move_insn (dst_reg, new_dst_reg);
+ emit_move_insn (src_reg, new_src_reg);
+ emit_insn (gen_addsi3 (dst_reg, dst_reg, GEN_INT (4)));
+
+ if (bytes > MAX_MOVE_BYTES)
+ {
+ rtx test = gen_rtx_NE (VOIDmode, src_reg, final_src);
+ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
+ }
+ }
+
+ if (leftover)
+ emit_insn (gen_movmemsi_internal (dst_reg, src_reg, GEN_INT (leftover),
+ gen_reg_rtx (SImode),
+ gen_reg_rtx (SImode)));
+ return 1;
+}
+
+
+/* Emit load/stores for a small constant word aligned block_move.
+
+ operands[0] is the memory address of the destination.
+ operands[1] is the memory address of the source.
+ operands[2] is the number of bytes to move.
+ operands[3] is a temp register.
+ operands[4] is a temp register. */
+
+void
+m32r_output_block_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
+{
+ HOST_WIDE_INT bytes = INTVAL (operands[2]);
+ int first_time;
+ int got_extra = 0;
+
+ gcc_assert (bytes >= 1 && bytes <= MAX_MOVE_BYTES);
+
+ /* We do not have a post-increment store available, so the first set of
+ stores are done without any increment, then the remaining ones can use
+ the pre-increment addressing mode.
+
+ Note: expand_block_move() also relies upon this behavior when building
+ loops to copy large blocks. */
+ first_time = 1;
+
+ while (bytes > 0)
+ {
+ if (bytes >= 8)
+ {
+ if (first_time)
+ {
+ output_asm_insn ("ld\t%5, %p1", operands);
+ output_asm_insn ("ld\t%6, %p1", operands);
+ output_asm_insn ("st\t%5, @%0", operands);
+ output_asm_insn ("st\t%6, %s0", operands);
+ }
+ else
+ {
+ output_asm_insn ("ld\t%5, %p1", operands);
+ output_asm_insn ("ld\t%6, %p1", operands);
+ output_asm_insn ("st\t%5, %s0", operands);
+ output_asm_insn ("st\t%6, %s0", operands);
+ }
+
+ bytes -= 8;
+ }
+ else if (bytes >= 4)
+ {
+ if (bytes > 4)
+ got_extra = 1;
+
+ output_asm_insn ("ld\t%5, %p1", operands);
+
+ if (got_extra)
+ output_asm_insn ("ld\t%6, %p1", operands);
+
+ if (first_time)
+ output_asm_insn ("st\t%5, @%0", operands);
+ else
+ output_asm_insn ("st\t%5, %s0", operands);
+
+ bytes -= 4;
+ }
+ else
+ {
+ /* Get the entire next word, even though we do not want all of it.
+ The saves us from doing several smaller loads, and we assume that
+ we cannot cause a page fault when at least part of the word is in
+ valid memory [since we don't get called if things aren't properly
+ aligned]. */
+ int dst_offset = first_time ? 0 : 4;
+ /* The amount of increment we have to make to the
+ destination pointer. */
+ int dst_inc_amount = dst_offset + bytes - 4;
+ /* The same for the source pointer. */
+ int src_inc_amount = bytes;
+ int last_shift;
+ rtx my_operands[3];
+
+ /* If got_extra is true then we have already loaded
+ the next word as part of loading and storing the previous word. */
+ if (! got_extra)
+ output_asm_insn ("ld\t%6, @%1", operands);
+
+ if (bytes >= 2)
+ {
+ bytes -= 2;
+
+ output_asm_insn ("sra3\t%5, %6, #16", operands);
+ my_operands[0] = operands[5];
+ my_operands[1] = GEN_INT (dst_offset);
+ my_operands[2] = operands[0];
+ output_asm_insn ("sth\t%0, @(%1,%2)", my_operands);
+
+ /* If there is a byte left to store then increment the
+ destination address and shift the contents of the source
+ register down by 8 bits. We could not do the address
+ increment in the store half word instruction, because it does
+ not have an auto increment mode. */
+ if (bytes > 0) /* assert (bytes == 1) */
+ {
+ dst_offset += 2;
+ last_shift = 8;
+ }
+ }
+ else
+ last_shift = 24;
+
+ if (bytes > 0)
+ {
+ my_operands[0] = operands[6];
+ my_operands[1] = GEN_INT (last_shift);
+ output_asm_insn ("srai\t%0, #%1", my_operands);
+ my_operands[0] = operands[6];
+ my_operands[1] = GEN_INT (dst_offset);
+ my_operands[2] = operands[0];
+ output_asm_insn ("stb\t%0, @(%1,%2)", my_operands);
+ }
+
+ /* Update the destination pointer if needed. We have to do
+ this so that the patterns matches what we output in this
+ function. */
+ if (dst_inc_amount
+ && !find_reg_note (insn, REG_UNUSED, operands[0]))
+ {
+ my_operands[0] = operands[0];
+ my_operands[1] = GEN_INT (dst_inc_amount);
+ output_asm_insn ("addi\t%0, #%1", my_operands);
+ }
+
+ /* Update the source pointer if needed. We have to do this
+ so that the patterns matches what we output in this
+ function. */
+ if (src_inc_amount
+ && !find_reg_note (insn, REG_UNUSED, operands[1]))
+ {
+ my_operands[0] = operands[1];
+ my_operands[1] = GEN_INT (src_inc_amount);
+ output_asm_insn ("addi\t%0, #%1", my_operands);
+ }
+
+ bytes = 0;
+ }
+
+ first_time = 0;
+ }
+}
+
+/* Return true if using NEW_REG in place of OLD_REG is ok. */
+
+int
+m32r_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
+ unsigned int new_reg)
+{
+ /* Interrupt routines can't clobber any register that isn't already used. */
+ if (lookup_attribute ("interrupt", DECL_ATTRIBUTES (current_function_decl))
+ && !df_regs_ever_live_p (new_reg))
+ return 0;
+
+ return 1;
+}
+
+rtx
+m32r_return_addr (int count)
+{
+ if (count != 0)
+ return const0_rtx;
+
+ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
+}
+
+static void
+m32r_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+{
+ emit_move_insn (adjust_address (m_tramp, SImode, 0),
+ gen_int_mode (TARGET_LITTLE_ENDIAN ?
+ 0x017e8e17 : 0x178e7e01, SImode));
+ emit_move_insn (adjust_address (m_tramp, SImode, 4),
+ gen_int_mode (TARGET_LITTLE_ENDIAN ?
+ 0x0c00ae86 : 0x86ae000c, SImode));
+ emit_move_insn (adjust_address (m_tramp, SImode, 8),
+ gen_int_mode (TARGET_LITTLE_ENDIAN ?
+ 0xe627871e : 0x1e8727e6, SImode));
+ emit_move_insn (adjust_address (m_tramp, SImode, 12),
+ gen_int_mode (TARGET_LITTLE_ENDIAN ?
+ 0xc616c626 : 0x26c61fc6, SImode));
+ emit_move_insn (adjust_address (m_tramp, SImode, 16),
+ chain_value);
+ emit_move_insn (adjust_address (m_tramp, SImode, 20),
+ XEXP (DECL_RTL (fndecl), 0));
+
+ if (m32r_cache_flush_trap >= 0)
+ emit_insn (gen_flush_icache
+ (validize_mem (adjust_address (m_tramp, SImode, 0)),
+ gen_int_mode (m32r_cache_flush_trap, SImode)));
+ else if (m32r_cache_flush_func && m32r_cache_flush_func[0])
+ emit_library_call (m32r_function_symbol (m32r_cache_flush_func),
+ LCT_NORMAL, VOIDmode, 3, XEXP (m_tramp, 0), Pmode,
+ gen_int_mode (TRAMPOLINE_SIZE, SImode), SImode,
+ GEN_INT (3), SImode);
+}
+
+/* True if X is a reg that can be used as a base reg. */
+
+static bool
+m32r_rtx_ok_for_base_p (const_rtx x, bool strict)
+{
+ if (! REG_P (x))
+ return false;
+
+ if (strict)
+ {
+ if (GPR_P (REGNO (x)))
+ return true;
+ }
+ else
+ {
+ if (GPR_P (REGNO (x))
+ || REGNO (x) == ARG_POINTER_REGNUM
+ || ! HARD_REGISTER_P (x))
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool
+m32r_rtx_ok_for_offset_p (const_rtx x)
+{
+ return (CONST_INT_P (x) && INT16_P (INTVAL (x)));
+}
+
+static inline bool
+m32r_legitimate_offset_addres_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_rtx x, bool strict)
+{
+ if (GET_CODE (x) == PLUS
+ && m32r_rtx_ok_for_base_p (XEXP (x, 0), strict)
+ && m32r_rtx_ok_for_offset_p (XEXP (x, 1)))
+ return true;
+
+ return false;
+}
+
+/* For LO_SUM addresses, do not allow them if the MODE is > 1 word,
+ since more than one instruction will be required. */
+
+static inline bool
+m32r_legitimate_lo_sum_addres_p (enum machine_mode mode, const_rtx x,
+ bool strict)
+{
+ if (GET_CODE (x) == LO_SUM
+ && (mode != BLKmode && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
+ && m32r_rtx_ok_for_base_p (XEXP (x, 0), strict)
+ && CONSTANT_P (XEXP (x, 1)))
+ return true;
+
+ return false;
+}
+
+/* Is this a load and increment operation. */
+
+static inline bool
+m32r_load_postinc_p (enum machine_mode mode, const_rtx x, bool strict)
+{
+ if ((mode == SImode || mode == SFmode)
+ && GET_CODE (x) == POST_INC
+ && REG_P (XEXP (x, 0))
+ && m32r_rtx_ok_for_base_p (XEXP (x, 0), strict))
+ return true;
+
+ return false;
+}
+
+/* Is this an increment/decrement and store operation. */
+
+static inline bool
+m32r_store_preinc_predec_p (enum machine_mode mode, const_rtx x, bool strict)
+{
+ if ((mode == SImode || mode == SFmode)
+ && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
+ && REG_P (XEXP (x, 0)) \
+ && m32r_rtx_ok_for_base_p (XEXP (x, 0), strict))
+ return true;
+
+ return false;
+}
+
+/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
+
+static bool
+m32r_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
+{
+ if (m32r_rtx_ok_for_base_p (x, strict)
+ || m32r_legitimate_offset_addres_p (mode, x, strict)
+ || m32r_legitimate_lo_sum_addres_p (mode, x, strict)
+ || m32r_load_postinc_p (mode, x, strict)
+ || m32r_store_preinc_predec_p (mode, x, strict))
+ return true;
+
+ return false;
+}
+
+static void
+m32r_conditional_register_usage (void)
+{
+ if (flag_pic)
+ {
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
+ }
+}
+
+/* Implement TARGET_LEGITIMATE_CONSTANT_P
+
+ We don't allow (plus symbol large-constant) as the relocations can't
+ describe it. INTVAL > 32767 handles both 16-bit and 24-bit relocations.
+ We allow all CONST_DOUBLE's as the md file patterns will force the
+ constant to memory if they can't handle them. */
+
+static bool
+m32r_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+ return !(GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF)
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+ && UINTVAL (XEXP (XEXP (x, 0), 1)) > 32767);
+}
diff --git a/gcc-4.9/gcc/config/m32r/m32r.h b/gcc-4.9/gcc/config/m32r/m32r.h
new file mode 100644
index 000000000..485137929
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/m32r.h
@@ -0,0 +1,1051 @@
+/* Definitions of target machine for GNU compiler, Renesas M32R cpu.
+ Copyright (C) 1996-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Things to do:
+- longlong.h?
+*/
+
+#undef SIZE_TYPE
+#undef PTRDIFF_TYPE
+#undef WCHAR_TYPE
+#undef WCHAR_TYPE_SIZE
+#undef CPP_SPEC
+#undef ASM_SPEC
+#undef LINK_SPEC
+#undef STARTFILE_SPEC
+#undef ENDFILE_SPEC
+
+#undef ASM_APP_ON
+#undef ASM_APP_OFF
+
+
+/* M32R/X overrides. */
+
+/* Additional flags for the preprocessor. */
+#define CPP_CPU_SPEC "%{m32rx:-D__M32RX__ -D__m32rx__ -U__M32R2__ -U__m32r2__} \
+%{m32r2:-D__M32R2__ -D__m32r2__ -U__M32RX__ -U__m32rx__} \
+%{m32r:-U__M32RX__ -U__m32rx__ -U__M32R2__ -U__m32r2__} \
+ "
+
+/* Assembler switches. */
+#define ASM_CPU_SPEC \
+"%{m32r} %{m32rx} %{m32r2} %{!O0: %{O*: -O}} --no-warn-explicit-parallel-conflicts"
+
+/* Use m32rx specific crt0/crtinit/crtfini files. */
+#define STARTFILE_CPU_SPEC "%{!shared:crt0.o%s} %{m32rx:m32rx/crtinit.o%s} %{!m32rx:crtinit.o%s}"
+#define ENDFILE_CPU_SPEC "-lgloss %{m32rx:m32rx/crtfini.o%s} %{!m32rx:crtfini.o%s}"
+
+/* Define this macro as a C expression for the initializer of an array of
+ strings to tell the driver program which options are defaults for this
+ target and thus do not need to be handled specially when using
+ `MULTILIB_OPTIONS'. */
+#define SUBTARGET_MULTILIB_DEFAULTS , "m32r"
+
+/* Number of additional registers the subtarget defines. */
+#define SUBTARGET_NUM_REGISTERS 1
+
+/* 1 for registers that cannot be allocated. */
+#define SUBTARGET_FIXED_REGISTERS , 1
+
+/* 1 for registers that are not available across function calls. */
+#define SUBTARGET_CALL_USED_REGISTERS , 1
+
+/* Order to allocate model specific registers. */
+#define SUBTARGET_REG_ALLOC_ORDER , 19
+
+/* Registers which are accumulators. */
+#define SUBTARGET_REG_CLASS_ACCUM 0x80000
+
+/* All registers added. */
+#define SUBTARGET_REG_CLASS_ALL SUBTARGET_REG_CLASS_ACCUM
+
+/* Additional accumulator registers. */
+#define SUBTARGET_ACCUM_P(REGNO) ((REGNO) == 19)
+
+/* Define additional register names. */
+#define SUBTARGET_REGISTER_NAMES , "a1"
+/* end M32R/X overrides. */
+
+/* Names to predefine in the preprocessor for this target machine. */
+/* __M32R__ is defined by the existing compiler so we use that. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__M32R__"); \
+ builtin_define ("__m32r__"); \
+ builtin_assert ("cpu=m32r"); \
+ builtin_assert ("machine=m32r"); \
+ builtin_define (TARGET_BIG_ENDIAN \
+ ? "__BIG_ENDIAN__" : "__LITTLE_ENDIAN__"); \
+ } \
+ while (0)
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GCC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#ifndef ASM_CPU_SPEC
+#define ASM_CPU_SPEC ""
+#endif
+
+#ifndef CPP_CPU_SPEC
+#define CPP_CPU_SPEC ""
+#endif
+
+#ifndef CC1_CPU_SPEC
+#define CC1_CPU_SPEC ""
+#endif
+
+#ifndef LINK_CPU_SPEC
+#define LINK_CPU_SPEC ""
+#endif
+
+#ifndef STARTFILE_CPU_SPEC
+#define STARTFILE_CPU_SPEC "%{!shared:crt0.o%s} crtinit.o%s"
+#endif
+
+#ifndef ENDFILE_CPU_SPEC
+#define ENDFILE_CPU_SPEC "-lgloss crtfini.o%s"
+#endif
+
+#ifndef RELAX_SPEC
+#if 0 /* Not supported yet. */
+#define RELAX_SPEC "%{mrelax:-relax}"
+#else
+#define RELAX_SPEC ""
+#endif
+#endif
+
+#define EXTRA_SPECS \
+ { "asm_cpu", ASM_CPU_SPEC }, \
+ { "cpp_cpu", CPP_CPU_SPEC }, \
+ { "cc1_cpu", CC1_CPU_SPEC }, \
+ { "link_cpu", LINK_CPU_SPEC }, \
+ { "startfile_cpu", STARTFILE_CPU_SPEC }, \
+ { "endfile_cpu", ENDFILE_CPU_SPEC }, \
+ { "relax", RELAX_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define CPP_SPEC "%(cpp_cpu)"
+
+#undef CC1_SPEC
+#define CC1_SPEC "%{G*} %(cc1_cpu)"
+
+/* Options to pass on to the assembler. */
+#undef ASM_SPEC
+#define ASM_SPEC "%(asm_cpu) %(relax) %{fpic|fpie:-K PIC} %{fPIC|fPIE:-K PIC}"
+
+#define LINK_SPEC "%{v} %(link_cpu) %(relax)"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%(startfile_cpu)"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "%(endfile_cpu)"
+
+#undef LIB_SPEC
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+#define TARGET_M32R (! TARGET_M32RX && ! TARGET_M32R2)
+
+#ifndef TARGET_LITTLE_ENDIAN
+#define TARGET_LITTLE_ENDIAN 0
+#endif
+#define TARGET_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
+
+/* This defaults us to m32r. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT 0
+#endif
+
+#ifndef M32R_OPTS_H
+#include "config/m32r/m32r-opts.h"
+#endif
+
+/* Define this macro as a C expression for the initializer of an array of
+ strings to tell the driver program which options are defaults for this
+ target and thus do not need to be handled specially when using
+ `MULTILIB_OPTIONS'. */
+#ifndef SUBTARGET_MULTILIB_DEFAULTS
+#define SUBTARGET_MULTILIB_DEFAULTS
+#endif
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mmodel=small" SUBTARGET_MULTILIB_DEFAULTS }
+#endif
+
+#ifndef SUBTARGET_OVERRIDE_OPTIONS
+#define SUBTARGET_OVERRIDE_OPTIONS
+#endif
+
+/* Target machine storage layout. */
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 1
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_LITTLE_ENDIAN == 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+#define WORDS_BIG_ENDIAN (TARGET_LITTLE_ENDIAN == 0)
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ { \
+ (MODE) = SImode; \
+ }
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 32
+
+/* ALIGN FRAMES on word boundaries */
+#define M32R_STACK_ALIGN(LOC) (((LOC) + 3) & ~ 3)
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bit-field declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 32
+
+/* The best alignment to use in cases where we have a choice. */
+#define FASTEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < FASTEST_ALIGNMENT) \
+ ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Make arrays of chars word-aligned for the same reasons. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* Define LAVEL_ALIGN to calculate code length of PNOP at labels. */
+#define LABEL_ALIGN(insn) 2
+
+/* Layout of source language data types. */
+
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+#define SIZE_TYPE "long unsigned int"
+#define PTRDIFF_TYPE "long int"
+#define WCHAR_TYPE "short unsigned int"
+#define WCHAR_TYPE_SIZE 16
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers. */
+
+#define M32R_NUM_REGISTERS 19
+
+#ifndef SUBTARGET_NUM_REGISTERS
+#define SUBTARGET_NUM_REGISTERS 0
+#endif
+
+#define FIRST_PSEUDO_REGISTER (M32R_NUM_REGISTERS + SUBTARGET_NUM_REGISTERS)
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+
+ 0-3 - arguments/results
+ 4-5 - call used [4 is used as a tmp during prologue/epilogue generation]
+ 6 - call used, gptmp
+ 7 - call used, static chain pointer
+ 8-11 - call saved
+ 12 - call saved [reserved for global pointer]
+ 13 - frame pointer
+ 14 - subroutine link register
+ 15 - stack pointer
+ 16 - arg pointer
+ 17 - carry flag
+ 18 - accumulator
+ 19 - accumulator 1 in the m32r/x
+ By default, the extension registers are not available. */
+
+#ifndef SUBTARGET_FIXED_REGISTERS
+#define SUBTARGET_FIXED_REGISTERS
+#endif
+
+#define FIXED_REGISTERS \
+{ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 1, \
+ 1, 1, 1 \
+ SUBTARGET_FIXED_REGISTERS \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+#ifndef SUBTARGET_CALL_USED_REGISTERS
+#define SUBTARGET_CALL_USED_REGISTERS
+#endif
+
+#define CALL_USED_REGISTERS \
+{ \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 1, 1, \
+ 1, 1, 1 \
+ SUBTARGET_CALL_USED_REGISTERS \
+}
+
+#define CALL_REALLY_USED_REGISTERS CALL_USED_REGISTERS
+
+/* If defined, an initializer for a vector of integers, containing the
+ numbers of hard registers in the order in which GCC should
+ prefer to use them (from most preferred to least). */
+
+#ifndef SUBTARGET_REG_ALLOC_ORDER
+#define SUBTARGET_REG_ALLOC_ORDER
+#endif
+
+#if 1 /* Better for int code. */
+#define REG_ALLOC_ORDER \
+{ \
+ 4, 5, 6, 7, 2, 3, 8, 9, 10, \
+ 11, 12, 13, 14, 0, 1, 15, 16, 17, 18 \
+ SUBTARGET_REG_ALLOC_ORDER \
+}
+
+#else /* Better for fp code at expense of int code. */
+#define REG_ALLOC_ORDER \
+{ \
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, \
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18 \
+ SUBTARGET_REG_ALLOC_ORDER \
+}
+#endif
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
+extern const unsigned int m32r_hard_regno_mode_ok[FIRST_PSEUDO_REGISTER];
+extern unsigned int m32r_mode_class[];
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((m32r_hard_regno_mode_ok[REGNO] & m32r_mode_class[MODE]) != 0)
+
+/* A C expression that is nonzero if it is desirable to choose
+ register allocation so as to avoid move instructions between a
+ value of mode MODE1 and a value of mode MODE2.
+
+ If `HARD_REGNO_MODE_OK (R, MODE1)' and `HARD_REGNO_MODE_OK (R,
+ MODE2)' are ever different for any R, then `MODES_TIEABLE_P (MODE1,
+ MODE2)' must be zero. */
+
+/* Tie QI/HI/SI modes together. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ( GET_MODE_CLASS (MODE1) == MODE_INT \
+ && GET_MODE_CLASS (MODE2) == MODE_INT \
+ && GET_MODE_SIZE (MODE1) <= UNITS_PER_WORD \
+ && GET_MODE_SIZE (MODE2) <= UNITS_PER_WORD)
+
+#define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \
+ m32r_hard_regno_rename_ok (OLD_REG, NEW_REG)
+
+/* Register classes and constants. */
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union.
+
+ It is important that any condition codes have class NO_REGS.
+ See `register_operand'. */
+
+enum reg_class
+{
+ NO_REGS, CARRY_REG, ACCUM_REGS, GENERAL_REGS, ALL_REGS, LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+ { "NO_REGS", "CARRY_REG", "ACCUM_REGS", "GENERAL_REGS", "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#ifndef SUBTARGET_REG_CLASS_CARRY
+#define SUBTARGET_REG_CLASS_CARRY 0
+#endif
+
+#ifndef SUBTARGET_REG_CLASS_ACCUM
+#define SUBTARGET_REG_CLASS_ACCUM 0
+#endif
+
+#ifndef SUBTARGET_REG_CLASS_GENERAL
+#define SUBTARGET_REG_CLASS_GENERAL 0
+#endif
+
+#ifndef SUBTARGET_REG_CLASS_ALL
+#define SUBTARGET_REG_CLASS_ALL 0
+#endif
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000 }, \
+ { 0x20000 | SUBTARGET_REG_CLASS_CARRY }, \
+ { 0x40000 | SUBTARGET_REG_CLASS_ACCUM }, \
+ { 0x1ffff | SUBTARGET_REG_CLASS_GENERAL }, \
+ { 0x7ffff | SUBTARGET_REG_CLASS_ALL }, \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+extern enum reg_class m32r_regno_reg_class[FIRST_PSEUDO_REGISTER];
+#define REGNO_REG_CLASS(REGNO) (m32r_regno_reg_class[REGNO])
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in reginfo.c during register
+ allocation. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? GPR_P (REGNO) || (REGNO) == ARG_POINTER_REGNUM \
+ : GPR_P (reg_renumber[REGNO]))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Return true if a value is inside a range. */
+#define IN_RANGE_P(VALUE, LOW, HIGH) \
+ (((unsigned HOST_WIDE_INT)((VALUE) - (LOW))) \
+ <= ((unsigned HOST_WIDE_INT)((HIGH) - (LOW))))
+
+/* Some range macros. */
+#define INT16_P(X) ((X) >= - 0x8000 && (X) <= 0x7fff)
+#define CMP_INT16_P(X) ((X) >= - 0x7fff && (X) <= 0x8000)
+#define UINT16_P(X) (((unsigned HOST_WIDE_INT) (X)) <= 0x0000ffff)
+#define UINT24_P(X) (((unsigned HOST_WIDE_INT) (X)) <= 0x00ffffff)
+
+/* Stack layout and stack pointer usage. */
+
+/* Define this macro if pushing a word onto the stack moves the stack
+ pointer to a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Offset from frame pointer to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+/* The frame pointer points at the same place as the stack pointer, except if
+ alloca has been called. */
+#define STARTING_FRAME_OFFSET \
+ M32R_STACK_ALIGN (crtl->outgoing_args_size)
+
+/* Offset from the stack pointer register to the first location at which
+ outgoing arguments are placed. */
+#define STACK_POINTER_OFFSET 0
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 15
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 13
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 16
+
+/* Register in which static-chain is passed to a function.
+ This must not be a register used by the prologue. */
+#define STATIC_CHAIN_REGNUM 7
+
+/* These aren't official macros. */
+#define PROLOGUE_TMP_REGNUM 4
+#define RETURN_ADDR_REGNUM 14
+/* #define GP_REGNUM 12 */
+#define CARRY_REGNUM 17
+#define ACCUM_REGNUM 18
+#define M32R_MAX_INT_REGS 16
+
+#ifndef SUBTARGET_GPR_P
+#define SUBTARGET_GPR_P(REGNO) 0
+#endif
+
+#ifndef SUBTARGET_ACCUM_P
+#define SUBTARGET_ACCUM_P(REGNO) 0
+#endif
+
+#ifndef SUBTARGET_CARRY_P
+#define SUBTARGET_CARRY_P(REGNO) 0
+#endif
+
+#define GPR_P(REGNO) (IN_RANGE_P ((REGNO), 0, 15) || SUBTARGET_GPR_P (REGNO))
+#define ACCUM_P(REGNO) ((REGNO) == ACCUM_REGNUM || SUBTARGET_ACCUM_P (REGNO))
+#define CARRY_P(REGNO) ((REGNO) == CARRY_REGNUM || SUBTARGET_CARRY_P (REGNO))
+
+/* Eliminating the frame and arg pointers. */
+
+#if 0
+/* C statement to store the difference between the frame pointer
+ and the stack pointer values immediately after the function prologue.
+ If `ELIMINABLE_REGS' is defined, this macro will be not be used and
+ need not be defined. */
+#define INITIAL_FRAME_POINTER_OFFSET(VAR) \
+((VAR) = m32r_compute_frame_size (get_frame_size ()))
+#endif
+
+/* If defined, this macro specifies a table of register pairs used to
+ eliminate unneeded registers that point into the stack frame. If
+ it is not defined, the only elimination attempted by the compiler
+ is to replace references to the frame pointer with references to
+ the stack pointer.
+
+ Note that the elimination of the argument pointer with the stack
+ pointer is specified first since that is the preferred elimination. */
+
+#define ELIMINABLE_REGS \
+{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM }}
+
+/* This macro is similar to `INITIAL_FRAME_POINTER_OFFSET'. It
+ specifies the initial difference between the specified pair of
+ registers. This macro must be defined if `ELIMINABLE_REGS' is
+ defined. */
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ do \
+ { \
+ int size = m32r_compute_frame_size (get_frame_size ()); \
+ \
+ if ((FROM) == FRAME_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = 0; \
+ else if ((FROM) == ARG_POINTER_REGNUM && (TO) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = size - crtl->args.pretend_args_size; \
+ else if ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = size - crtl->args.pretend_args_size; \
+ else \
+ gcc_unreachable (); \
+ } \
+ while (0)
+
+/* Function argument passing. */
+
+/* If defined, the maximum amount of space required for outgoing
+ arguments will be computed and placed into the variable
+ `crtl->outgoing_args_size'. No space will be pushed
+ onto the stack for each call; instead, the function prologue should
+ increase the stack frame size by this amount. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go. */
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ ((CUM) = 0)
+
+/* The number of registers used for parameter passing. Local to this file. */
+#define M32R_MAX_PARM_REGS 4
+
+/* 1 if N is a possible register number for function argument passing. */
+#define FUNCTION_ARG_REGNO_P(N) \
+ ((unsigned) (N) < M32R_MAX_PARM_REGS)
+
+
+/* Function results. */
+
+/* Tell GCC to use TARGET_RETURN_IN_MEMORY. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Function entry and exit. */
+
+/* Initialize data used by insn expanders. This is called from
+ init_emit, once for each function, before code is generated. */
+#define INIT_EXPANDERS m32r_init_expanders ()
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+#define EXIT_IGNORE_STACK 1
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ do \
+ { \
+ if (flag_pic) \
+ { \
+ fprintf (FILE, "\tld24 r14,#mcount\n"); \
+ fprintf (FILE, "\tadd r14,r12\n"); \
+ fprintf (FILE, "\tld r14,@r14\n"); \
+ fprintf (FILE, "\tjl r14\n"); \
+ } \
+ else \
+ { \
+ if (TARGET_ADDR24) \
+ fprintf (FILE, "\tbl mcount\n"); \
+ else \
+ { \
+ fprintf (FILE, "\tseth r14,#high(mcount)\n"); \
+ fprintf (FILE, "\tor3 r14,r14,#low(mcount)\n"); \
+ fprintf (FILE, "\tjl r14\n"); \
+ } \
+ } \
+ fprintf (FILE, "\taddi sp,#4\n"); \
+ } \
+ while (0)
+
+/* Trampolines. */
+
+/* On the M32R, the trampoline is:
+
+ mv r7, lr -> bl L1 ; 178e 7e01
+L1: add3 r6, lr, #L2-L1 ; 86ae 000c (L2 - L1 = 12)
+ mv lr, r7 -> ld r7,@r6+ ; 1e87 27e6
+ ld r6, @r6 -> jmp r6 ; 26c6 1fc6
+L2: .word STATIC
+ .word FUNCTION */
+
+#ifndef CACHE_FLUSH_FUNC
+#define CACHE_FLUSH_FUNC "_flush_cache"
+#endif
+#ifndef CACHE_FLUSH_TRAP
+#define CACHE_FLUSH_TRAP 12
+#endif
+
+/* Length in bytes of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) m32r_return_addr (COUNT)
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM)
+
+/* Addressing modes, and classification of registers for them. */
+
+/* Maximum number of registers that can appear in a valid memory address. */
+#define MAX_REGS_PER_ADDRESS 1
+
+/* We have post-inc load and pre-dec,pre-inc store,
+ but only for 4 byte vals. */
+#define HAVE_PRE_DECREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_INCREMENT 1
+
+/* Recognize any constant value that is a valid address. */
+#define CONSTANT_ADDRESS_P(X) \
+ ( GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF \
+ || CONST_INT_P (X) \
+ || (GET_CODE (X) == CONST \
+ && ! (flag_pic && ! m32r_legitimate_pic_operand_p (X))))
+
+/* Condition code usage. */
+
+/* Return nonzero if SELECT_CC_MODE will never return MODE for a
+ floating point inequality comparison. */
+#define REVERSIBLE_CC_MODE(MODE) 1 /*???*/
+
+/* Costs. */
+
+/* The cost of a branch insn. */
+/* A value of 2 here causes GCC to avoid using branches in comparisons like
+ while (a < N && a). Branches aren't that expensive on the M32R so
+ we define this as 1. Defining it as 2 had a heavy hit in fp-bit.c. */
+#define BRANCH_COST(speed_p, predictable_p) ((TARGET_BRANCH_COST) ? 2 : 1)
+
+/* Nonzero if access to memory by bytes is slow and undesirable.
+ For RISC chips, it means that access to memory by bytes is no
+ better than access by words when possible, so grab a whole word
+ and maybe make use of that. */
+#define SLOW_BYTE_ACCESS 1
+
+/* Define this macro if it is as good or better to call a constant
+ function address than to call an address kept in a register. */
+#define NO_FUNCTION_CSE
+
+/* Section selection. */
+
+#define TEXT_SECTION_ASM_OP "\t.section .text"
+#define DATA_SECTION_ASM_OP "\t.section .data"
+#define BSS_SECTION_ASM_OP "\t.section .bss"
+
+/* Define this macro if jump tables (for tablejump insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used.
+ This macro is irrelevant if there is no separate readonly data section. */
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+/* Position Independent Code. */
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. In some cases this register is defined by a
+ processor's ``application binary interface'' (ABI). When this macro
+ is defined, RTL is generated for this register once, as with the stack
+ pointer and frame pointer registers. If this macro is not defined, it
+ is up to the machine-dependent files to allocate such a register (if
+ necessary). */
+#define PIC_OFFSET_TABLE_REGNUM 12
+
+/* Define this macro if the register defined by PIC_OFFSET_TABLE_REGNUM is
+ clobbered by calls. Do not define this macro if PIC_OFFSET_TABLE_REGNUM
+ is not defined. */
+/* This register is call-saved on the M32R. */
+/*#define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED*/
+
+/* A C expression that is nonzero if X is a legitimate immediate
+ operand on the target machine when generating position independent code.
+ You can assume that X satisfies CONSTANT_P, so you need not
+ check this. You can also assume `flag_pic' is true, so you need not
+ check it either. You need not define this macro if all constants
+ (including SYMBOL_REF) can be immediate operands when generating
+ position independent code. */
+#define LEGITIMATE_PIC_OPERAND_P(X) m32r_legitimate_pic_operand_p (X)
+
+/* Control the assembler format that we output. */
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will
+ end at the end of the line. */
+#define ASM_COMMENT_START ";"
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+#define ASM_APP_OFF ""
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.global\t"
+
+/* We do not use DBX_LINES_FUNCTION_RELATIVE or
+ dbxout_stab_value_internal_label_diff here because
+ we need to use .debugsym for the line label. */
+
+#define DBX_OUTPUT_SOURCE_LINE(file, line, counter) \
+ do \
+ { \
+ const char * begin_label = \
+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0); \
+ char label[64]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "LM", counter); \
+ \
+ dbxout_begin_stabn_sline (line); \
+ assemble_name (file, label); \
+ putc ('-', file); \
+ assemble_name (file, begin_label); \
+ fputs ("\n\t.debugsym ", file); \
+ assemble_name (file, label); \
+ putc ('\n', file); \
+ counter += 1; \
+ } \
+ while (0)
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+#ifndef SUBTARGET_REGISTER_NAMES
+#define SUBTARGET_REGISTER_NAMES
+#endif
+
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "fp", "lr", "sp", \
+ "ap", "cbit", "a0" \
+ SUBTARGET_REGISTER_NAMES \
+}
+
+/* If defined, a C initializer for an array of structures containing
+ a name and a register number. This macro defines additional names
+ for hard registers, thus allowing the `asm' option in declarations
+ to refer to registers using alternate names. */
+#ifndef SUBTARGET_ADDITIONAL_REGISTER_NAMES
+#define SUBTARGET_ADDITIONAL_REGISTER_NAMES
+#endif
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ /*{ "gp", GP_REGNUM },*/ \
+ { "r13", FRAME_POINTER_REGNUM }, \
+ { "r14", RETURN_ADDR_REGNUM }, \
+ { "r15", STACK_POINTER_REGNUM }, \
+ SUBTARGET_ADDITIONAL_REGISTER_NAMES \
+}
+
+/* If defined, C string expressions to be used for the `%R', `%L',
+ `%U', and `%I' options of `asm_fprintf' (see `final.c'). These
+ are useful when a single `md' file must support multiple assembler
+ formats. In that case, the various `tm.h' files can define these
+ macros differently. */
+#define REGISTER_PREFIX ""
+#define LOCAL_LABEL_PREFIX ".L"
+#define USER_LABEL_PREFIX ""
+#define IMMEDIATE_PREFIX "#"
+
+/* This is how to output an element of a case-vector that is absolute. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ do \
+ { \
+ char label[30]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
+ fprintf (FILE, "\t.word\t"); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "\n"); \
+ } \
+ while (0)
+
+/* This is how to output an element of a case-vector that is relative. */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL)\
+ do \
+ { \
+ char label[30]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
+ fprintf (FILE, "\t.word\t"); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", REL); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "\n"); \
+ } \
+ while (0)
+
+/* The desired alignment for the location counter at the beginning
+ of a loop. */
+/* On the M32R, align loops to 32 byte boundaries (cache line size)
+ if -malign-loops. */
+#define LOOP_ALIGN(LABEL) (TARGET_ALIGN_LOOPS ? 5 : 0)
+
+/* Define this to be the maximum number of insns to move around when moving
+ a loop test from the top of a loop to the bottom
+ and seeing whether to duplicate it. The default is thirty.
+
+ Loop unrolling currently doesn't like this optimization, so
+ disable doing if we are unrolling loops and saving space. */
+#define LOOP_TEST_THRESHOLD (optimize_size \
+ && !flag_unroll_loops \
+ && !flag_unroll_all_loops ? 2 : 30)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+/* .balign is used to avoid confusion. */
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ do \
+ { \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.balign %d\n", 1 << (LOG)); \
+ } \
+ while (0)
+
+/* Like `ASM_OUTPUT_COMMON' except takes the required alignment as a
+ separate, explicit argument. If you define this macro, it is used in
+ place of `ASM_OUTPUT_COMMON', and gives you more flexibility in
+ handling the required alignment of the variable. The alignment is
+ specified as the number of bits. */
+
+#define SCOMMON_ASM_OP "\t.scomm\t"
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if (! TARGET_SDATA_NONE \
+ && (SIZE) > 0 \
+ && (SIZE) <= (unsigned HOST_WIDE_INT) g_switch_value) \
+ fprintf ((FILE), "%s", SCOMMON_ASM_OP); \
+ else \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%u,%u\n", (int)(SIZE), (ALIGN) / BITS_PER_UNIT);\
+ } \
+ while (0)
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if (! TARGET_SDATA_NONE \
+ && (SIZE) > 0 \
+ && (SIZE) <= (unsigned HOST_WIDE_INT) g_switch_value) \
+ switch_to_section (get_named_section (NULL, ".sbss", 0)); \
+ else \
+ switch_to_section (bss_section); \
+ ASM_OUTPUT_ALIGN (FILE, floor_log2 (ALIGN / BITS_PER_UNIT)); \
+ last_assemble_variable_decl = DECL; \
+ ASM_DECLARE_OBJECT_NAME (FILE, NAME, DECL); \
+ ASM_OUTPUT_SKIP (FILE, SIZE ? SIZE : 1); \
+ } \
+ while (0)
+
+/* Debugging information. */
+
+/* Generate DBX and DWARF debugging information. */
+#define DBX_DEBUGGING_INFO 1
+#define DWARF2_DEBUGGING_INFO 1
+
+/* Use DWARF2 debugging info by default. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Turn off splitting of long stabs. */
+#define DBX_CONTIN_LENGTH 0
+
+/* Miscellaneous. */
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE (flag_pic ? SImode : Pmode)
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, UNKNOWN if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Max number of bytes we can move from memory
+ to memory in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+/* ??? The M32R doesn't have full 32-bit pointers, but making this PSImode has
+ its own problems (you have to add extendpsisi2 and truncsipsi2).
+ Try to avoid it. */
+#define Pmode SImode
+
+/* A function address in a call instruction. */
+#define FUNCTION_MODE SImode
+
+/* M32R function types. */
+enum m32r_function_type
+{
+ M32R_FUNCTION_UNKNOWN, M32R_FUNCTION_NORMAL, M32R_FUNCTION_INTERRUPT
+};
+
+#define M32R_INTERRUPT_P(TYPE) ((TYPE) == M32R_FUNCTION_INTERRUPT)
+
+/* The maximum number of bytes to copy using pairs of load/store instructions.
+ If a block is larger than this then a loop will be generated to copy
+ MAX_MOVE_BYTES chunks at a time. The value of 32 is a semi-arbitrary choice.
+ A customer uses Dhrystome as their benchmark, and Dhrystone has a 31 byte
+ string copy in it. */
+#define MAX_MOVE_BYTES 32
diff --git a/gcc-4.9/gcc/config/m32r/m32r.md b/gcc-4.9/gcc/config/m32r/m32r.md
new file mode 100644
index 000000000..47efb910d
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/m32r.md
@@ -0,0 +1,2276 @@
+;; Machine description of the Renesas M32R cpu for GNU C compiler
+;; Copyright (C) 1996-2014 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; UNSPEC_VOLATILE usage
+(define_constants
+ [(UNSPECV_BLOCKAGE 0)
+ (UNSPECV_FLUSH_ICACHE 1)])
+
+;; UNSPEC usage
+(define_constants
+ [(UNSPEC_LOAD_SDA_BASE 2)
+ (UNSPEC_SET_CBIT 3)
+ (UNSPEC_PIC_LOAD_ADDR 4)
+ (UNSPEC_GET_PC 5)
+ (UNSPEC_GOTOFF 6)
+ ])
+
+;; Insn type. Used to default other attribute values.
+(define_attr "type"
+ "int2,int4,load2,load4,load8,store2,store4,store8,shift2,shift4,mul2,div4,uncond_branch,branch,call,multi,misc"
+ (const_string "misc"))
+
+;; Length in bytes.
+(define_attr "length" ""
+ (cond [(eq_attr "type" "int2,load2,store2,shift2,mul2")
+ (const_int 2)
+
+ (eq_attr "type" "int4,load4,store4,shift4,div4")
+ (const_int 4)
+
+ (eq_attr "type" "multi")
+ (const_int 8)
+
+ (eq_attr "type" "uncond_branch,branch,call")
+ (const_int 4)]
+
+ (const_int 4)))
+
+;; The length here is the length of a single asm. Unfortunately it might be
+;; 2 or 4 so we must allow for 4. That's ok though.
+(define_asm_attributes
+ [(set_attr "length" "4")
+ (set_attr "type" "multi")])
+
+;; Whether an instruction is short (16-bit) or long (32-bit).
+(define_attr "insn_size" "short,long"
+ (if_then_else (eq_attr "type" "int2,load2,store2,shift2,mul2")
+ (const_string "short")
+ (const_string "long")))
+
+;; The target CPU we're compiling for.
+(define_attr "cpu" "m32r,m32r2,m32rx"
+ (cond [(match_test "TARGET_M32RX")
+ (const_string "m32rx")
+ (match_test "TARGET_M32R2")
+ (const_string "m32r2")]
+ (const_string "m32r")))
+
+;; Defines the pipeline where an instruction can be executed on.
+;; For the M32R, a short instruction can execute one of the two pipes.
+;; For the M32Rx, the restrictions are modelled in the second
+;; condition of this attribute definition.
+(define_attr "m32r_pipeline" "either,s,o,long"
+ (cond [(and (eq_attr "cpu" "m32r")
+ (eq_attr "insn_size" "short"))
+ (const_string "either")
+ (eq_attr "insn_size" "!short")
+ (const_string "long")]
+ (cond [(eq_attr "type" "int2")
+ (const_string "either")
+ (eq_attr "type" "load2,store2,shift2,uncond_branch,branch,call")
+ (const_string "o")
+ (eq_attr "type" "mul2")
+ (const_string "s")]
+ (const_string "long"))))
+
+;; ::::::::::::::::::::
+;; ::
+;; :: Pipeline description
+;; ::
+;; ::::::::::::::::::::
+
+;; This model is based on Chapter 2, Appendix 3 and Appendix 4 of the
+;; "M32R-FPU Software Manual", Revision 1.01, plus additional information
+;; obtained by our best friend and mine, Google.
+;;
+;; The pipeline is modelled as a fetch unit, and a core with a memory unit,
+;; two execution units, where "fetch" models IF and D, "memory" for MEM1
+;; and MEM2, and "EXEC" for E, E1, E2, EM, and EA. Writeback and
+;; bypasses are not modelled.
+(define_automaton "m32r")
+
+;; We pretend there are two short (16 bits) instruction fetchers. The
+;; "s" short fetcher cannot be reserved until the "o" short fetcher is
+;; reserved. Some instructions reserve both the left and right fetchers.
+;; These fetch units are a hack to get GCC to better pack the instructions
+;; for the M32Rx processor, which has two execution pipes.
+;;
+;; In reality there is only one decoder, which can decode either two 16-bit
+;; instructions, or a single 32-bit instruction.
+;;
+;; Note, "fetch" models both the IF and the D pipeline stages.
+;;
+;; The m32rx core has two execution pipes. We name them o_E and s_E.
+;; In addition, there's a memory unit.
+
+(define_cpu_unit "o_IF,s_IF,o_E,s_E,memory" "m32r")
+
+;; Prevent the s pipe from being reserved before the o pipe.
+(absence_set "s_IF" "o_IF")
+(absence_set "s_E" "o_E")
+
+;; On the M32Rx, long instructions execute on both pipes, so reserve
+;; both fetch slots and both pipes.
+(define_reservation "long_IF" "o_IF+s_IF")
+(define_reservation "long_E" "o_E+s_E")
+
+;; ::::::::::::::::::::
+
+;; Simple instructions do 4 stages: IF D E WB. WB is not modelled.
+;; Hence, ready latency is 1.
+(define_insn_reservation "short_left" 1
+ (and (eq_attr "m32r_pipeline" "o")
+ (and (eq_attr "insn_size" "short")
+ (eq_attr "type" "!load2")))
+ "o_IF,o_E")
+
+(define_insn_reservation "short_right" 1
+ (and (eq_attr "m32r_pipeline" "s")
+ (and (eq_attr "insn_size" "short")
+ (eq_attr "type" "!load2")))
+ "s_IF,s_E")
+
+(define_insn_reservation "short_either" 1
+ (and (eq_attr "m32r_pipeline" "either")
+ (and (eq_attr "insn_size" "short")
+ (eq_attr "type" "!load2")))
+ "o_IF|s_IF,o_E|s_E")
+
+(define_insn_reservation "long_m32r" 1
+ (and (eq_attr "cpu" "m32r")
+ (and (eq_attr "insn_size" "long")
+ (eq_attr "type" "!load4,load8")))
+ "long_IF,long_E")
+
+(define_insn_reservation "long_m32rx" 2
+ (and (eq_attr "m32r_pipeline" "long")
+ (and (eq_attr "insn_size" "long")
+ (eq_attr "type" "!load4,load8")))
+ "long_IF,long_E")
+
+;; Load/store instructions do 6 stages: IF D E MEM1 MEM2 WB.
+;; MEM1 may require more than one cycle depending on locality. We
+;; optimistically assume all memory is nearby, i.e. MEM1 takes only
+;; one cycle. Hence, ready latency is 3.
+
+;; The M32Rx can do short load/store only on the left pipe.
+(define_insn_reservation "short_load_left" 3
+ (and (eq_attr "m32r_pipeline" "o")
+ (and (eq_attr "insn_size" "short")
+ (eq_attr "type" "load2")))
+ "o_IF,o_E,memory*2")
+
+(define_insn_reservation "short_load" 3
+ (and (eq_attr "m32r_pipeline" "either")
+ (and (eq_attr "insn_size" "short")
+ (eq_attr "type" "load2")))
+ "s_IF|o_IF,s_E|o_E,memory*2")
+
+(define_insn_reservation "long_load" 3
+ (and (eq_attr "cpu" "m32r")
+ (and (eq_attr "insn_size" "long")
+ (eq_attr "type" "load4,load8")))
+ "long_IF,long_E,memory*2")
+
+(define_insn_reservation "long_load_m32rx" 3
+ (and (eq_attr "m32r_pipeline" "long")
+ (eq_attr "type" "load4,load8"))
+ "long_IF,long_E,memory*2")
+
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; Expand prologue as RTL
+(define_expand "prologue"
+ [(const_int 1)]
+ ""
+ "
+{
+ m32r_expand_prologue ();
+ DONE;
+}")
+
+;; Expand epilogue as RTL
+(define_expand "epilogue"
+ [(return)]
+ ""
+ "
+{
+ m32r_expand_epilogue ();
+ emit_jump_insn (gen_return_normal ());
+ DONE;
+}")
+
+;; Move instructions.
+;;
+;; For QI and HI moves, the register must contain the full properly
+;; sign-extended value. nonzero_bits assumes this [otherwise
+;; SHORT_IMMEDIATES_SIGN_EXTEND must be used, but the comment for it
+;; says it's a kludge and the .md files should be fixed instead].
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Fixup PIC cases. */
+ if (flag_pic)
+ {
+ if (symbolic_operand (operands[1], QImode))
+ {
+ if (reload_in_progress || reload_completed)
+ operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
+ else
+ operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
+ }
+ }
+
+ /* Everything except mem = const or mem = mem can be done easily.
+ Objects in the small data area are handled too. */
+
+ if (MEM_P (operands[0]))
+ operands[1] = force_reg (QImode, operands[1]);
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "move_dest_operand" "=r,r,r,r,r,T,m")
+ (match_operand:QI 1 "move_src_operand" "r,I,JQR,T,m,r,r"))]
+ "register_operand (operands[0], QImode) || register_operand (operands[1], QImode)"
+ "@
+ mv %0,%1
+ ldi %0,%#%1
+ ldi %0,%#%1
+ ldub %0,%1
+ ldub %0,%1
+ stb %1,%0
+ stb %1,%0"
+ [(set_attr "type" "int2,int2,int4,load2,load4,store2,store4")
+ (set_attr "length" "2,2,4,2,4,2,4")])
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Fixup PIC cases. */
+ if (flag_pic)
+ {
+ if (symbolic_operand (operands[1], HImode))
+ {
+ if (reload_in_progress || reload_completed)
+ operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
+ else
+ operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
+ }
+ }
+
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (MEM_P (operands[0]))
+ operands[1] = force_reg (HImode, operands[1]);
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "move_dest_operand" "=r,r,r,r,r,r,T,m")
+ (match_operand:HI 1 "move_src_operand" "r,I,JQR,K,T,m,r,r"))]
+ "register_operand (operands[0], HImode) || register_operand (operands[1], HImode)"
+ "@
+ mv %0,%1
+ ldi %0,%#%1
+ ldi %0,%#%1
+ ld24 %0,%#%1
+ lduh %0,%1
+ lduh %0,%1
+ sth %1,%0
+ sth %1,%0"
+ [(set_attr "type" "int2,int2,int4,int4,load2,load4,store2,store4")
+ (set_attr "length" "2,2,4,4,2,4,2,4")])
+
+(define_expand "movsi_push"
+ [(set (mem:SI (pre_dec:SI (match_operand:SI 0 "register_operand" "")))
+ (match_operand:SI 1 "register_operand" ""))]
+ ""
+ "")
+
+(define_expand "movsi_pop"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (mem:SI (post_inc:SI (match_operand:SI 1 "register_operand" ""))))]
+ ""
+ "")
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Fixup PIC cases. */
+ if (flag_pic)
+ {
+ if (symbolic_operand (operands[1], SImode))
+ {
+ if (reload_in_progress || reload_completed)
+ operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
+ else
+ operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
+ }
+ }
+
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (MEM_P (operands[0]))
+ operands[1] = force_reg (SImode, operands[1]);
+
+ /* Small Data Area reference? */
+ if (small_data_operand (operands[1], SImode))
+ {
+ emit_insn (gen_movsi_sda (operands[0], operands[1]));
+ DONE;
+ }
+
+ /* If medium or large code model, symbols have to be loaded with
+ seth/add3. */
+ if (addr32_operand (operands[1], SImode))
+ {
+ emit_insn (gen_movsi_addr32 (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+;; ??? Do we need a const_double constraint here for large unsigned values?
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "move_dest_operand" "=r,r,r,r,r,r,r,r,r,T,S,m")
+ (match_operand:SI 1 "move_src_operand" "r,I,J,MQ,L,n,T,U,m,r,r,r"))]
+ "register_operand (operands[0], SImode) || register_operand (operands[1], SImode)"
+ "*
+{
+ if (REG_P (operands[0]) || GET_CODE (operands[1]) == SUBREG)
+ {
+ switch (GET_CODE (operands[1]))
+ {
+ default:
+ break;
+
+ case REG:
+ case SUBREG:
+ return \"mv %0,%1\";
+
+ case MEM:
+ if (GET_CODE (XEXP (operands[1], 0)) == POST_INC
+ && XEXP (XEXP (operands[1], 0), 0) == stack_pointer_rtx)
+ return \"pop %0\";
+
+ return \"ld %0,%1\";
+
+ case CONST_INT:
+ if (satisfies_constraint_J (operands[1]))
+ return \"ldi %0,%#%1\\t; %X1\";
+
+ if (satisfies_constraint_M (operands[1]))
+ return \"ld24 %0,%#%1\\t; %X1\";
+
+ if (satisfies_constraint_L (operands[1]))
+ return \"seth %0,%#%T1\\t; %X1\";
+
+ return \"#\";
+
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ if (TARGET_ADDR24)
+ return \"ld24 %0,%#%1\";
+
+ return \"#\";
+ }
+ }
+
+ else if (MEM_P (operands[0])
+ && (REG_P (operands[1]) || GET_CODE (operands[1]) == SUBREG))
+ {
+ if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
+ && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx)
+ return \"push %1\";
+
+ return \"st %1,%0\";
+ }
+
+ gcc_unreachable ();
+}"
+ [(set_attr "type" "int2,int2,int4,int4,int4,multi,load2,load2,load4,store2,store2,store4")
+ (set_attr "length" "2,2,4,4,4,8,2,2,4,2,2,4")])
+
+; Try to use a four byte / two byte pair for constants not loadable with
+; ldi, ld24, seth.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "two_insn_const_operand" ""))]
+ ""
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (ior:SI (match_dup 0) (match_dup 3)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT tmp;
+ int shift;
+
+ /* In all cases we will emit two instructions. However we try to
+ use 2 byte instructions wherever possible. We can assume the
+ constant isn't loadable with any of ldi, ld24, or seth. */
+
+ /* See if we can load a 24-bit unsigned value and invert it. */
+ if (UINT24_P (~ val))
+ {
+ emit_insn (gen_movsi (operands[0], GEN_INT (~ val)));
+ emit_insn (gen_one_cmplsi2 (operands[0], operands[0]));
+ DONE;
+ }
+
+ /* See if we can load a 24-bit unsigned value and shift it into place.
+ 0x01fffffe is just beyond ld24's range. */
+ for (shift = 1, tmp = 0x01fffffe;
+ shift < 8;
+ ++shift, tmp <<= 1)
+ {
+ if ((val & ~tmp) == 0)
+ {
+ emit_insn (gen_movsi (operands[0], GEN_INT (val >> shift)));
+ emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (shift)));
+ DONE;
+ }
+ }
+
+ /* Can't use any two byte insn, fall back to seth/or3. Use ~0xffff instead
+ of 0xffff0000, since the later fails on a 64-bit host. */
+ operands[2] = GEN_INT ((val) & ~0xffff);
+ operands[3] = GEN_INT ((val) & 0xffff);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "seth_add3_operand" ""))]
+ "TARGET_ADDR32"
+ [(set (match_dup 0)
+ (high:SI (match_dup 1)))
+ (set (match_dup 0)
+ (lo_sum:SI (match_dup 0)
+ (match_dup 1)))]
+ "")
+
+;; Small data area support.
+;; The address of _SDA_BASE_ is loaded into a register and all objects in
+;; the small data area are indexed off that. This is done for each reference
+;; but cse will clean things up for us. We let the compiler choose the
+;; register to use so we needn't allocate (and maybe even fix) a special
+;; register to use. Since the load and store insns have a 16-bit offset the
+;; total size of the data area can be 64K. However, if the data area lives
+;; above 16M (24 bits), _SDA_BASE_ will have to be loaded with seth/add3 which
+;; would then yield 3 instructions to reference an object [though there would
+;; be no net loss if two or more objects were referenced]. The 3 insns can be
+;; reduced back to 2 if the size of the small data area were reduced to 32K
+;; [then seth + ld/st would work for any object in the area]. Doing this
+;; would require special handling of _SDA_BASE_ (its value would be
+;; (.sdata + 32K) & 0xffff0000) and reloc computations would be different
+;; [I think]. What to do about this is deferred until later and for now we
+;; require .sdata to be in the first 16M.
+
+(define_expand "movsi_sda"
+ [(set (match_dup 2)
+ (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lo_sum:SI (match_dup 2)
+ (match_operand:SI 1 "small_data_operand" "")))]
+ ""
+ "
+{
+ if (reload_in_progress || reload_completed)
+ operands[2] = operands[0];
+ else
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "*load_sda_base_32"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))]
+ "TARGET_ADDR32"
+ "seth %0,%#shigh(_SDA_BASE_)\;add3 %0,%0,%#low(_SDA_BASE_)"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "*load_sda_base"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))]
+ ""
+ "ld24 %0,#_SDA_BASE_"
+ [(set_attr "type" "int4")
+ (set_attr "length" "4")])
+
+;; 32-bit address support.
+
+(define_expand "movsi_addr32"
+ [(set (match_dup 2)
+ ; addr32_operand isn't used because it's too restrictive,
+ ; seth_add3_operand is more general and thus safer.
+ (high:SI (match_operand:SI 1 "seth_add3_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lo_sum:SI (match_dup 2) (match_dup 1)))]
+ ""
+ "
+{
+ if (reload_in_progress || reload_completed)
+ operands[2] = operands[0];
+ else
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "set_hi_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (match_operand 1 "symbolic_operand" "")))]
+ ""
+ "seth %0,%#shigh(%1)"
+ [(set_attr "type" "int4")
+ (set_attr "length" "4")])
+
+(define_insn "lo_sum_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")))]
+ ""
+ "add3 %0,%1,%#%B2"
+ [(set_attr "type" "int4")
+ (set_attr "length" "4")])
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Fixup PIC cases. */
+ if (flag_pic)
+ {
+ if (symbolic_operand (operands[1], DImode))
+ {
+ if (reload_in_progress || reload_completed)
+ operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
+ else
+ operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
+ }
+ }
+
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (MEM_P (operands[0]))
+ operands[1] = force_reg (DImode, operands[1]);
+}")
+
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "move_dest_operand" "=r,r,r,r,m")
+ (match_operand:DI 1 "move_double_src_operand" "r,nG,F,m,r"))]
+ "register_operand (operands[0], DImode) || register_operand (operands[1], DImode)"
+ "#"
+ [(set_attr "type" "multi,multi,multi,load8,store8")
+ (set_attr "length" "4,4,16,6,6")])
+
+(define_split
+ [(set (match_operand:DI 0 "move_dest_operand" "")
+ (match_operand:DI 1 "move_double_src_operand" ""))]
+ "reload_completed"
+ [(match_dup 2)]
+ "operands[2] = gen_split_move_double (operands);")
+
+;; Floating point move insns.
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Fixup PIC cases. */
+ if (flag_pic)
+ {
+ if (symbolic_operand (operands[1], SFmode))
+ {
+ if (reload_in_progress || reload_completed)
+ operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
+ else
+ operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
+ }
+ }
+
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (MEM_P (operands[0]))
+ operands[1] = force_reg (SFmode, operands[1]);
+}")
+
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "move_dest_operand" "=r,r,r,r,r,T,S,m")
+ (match_operand:SF 1 "move_src_operand" "r,F,U,S,m,r,r,r"))]
+ "register_operand (operands[0], SFmode) || register_operand (operands[1], SFmode)"
+ "@
+ mv %0,%1
+ #
+ ld %0,%1
+ ld %0,%1
+ ld %0,%1
+ st %1,%0
+ st %1,%0
+ st %1,%0"
+ ;; ??? Length of alternative 1 is either 2, 4 or 8.
+ [(set_attr "type" "int2,multi,load2,load2,load4,store2,store2,store4")
+ (set_attr "length" "2,8,2,2,4,2,2,4")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "const_double_operand" ""))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ operands[2] = operand_subword (operands[0], 0, 0, SFmode);
+ operands[3] = operand_subword (operands[1], 0, 0, SFmode);
+}")
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Fixup PIC cases. */
+ if (flag_pic)
+ {
+ if (symbolic_operand (operands[1], DFmode))
+ {
+ if (reload_in_progress || reload_completed)
+ operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
+ else
+ operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
+ }
+ }
+
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (MEM_P (operands[0]))
+ operands[1] = force_reg (DFmode, operands[1]);
+}")
+
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "move_dest_operand" "=r,r,r,m")
+ (match_operand:DF 1 "move_double_src_operand" "r,F,m,r"))]
+ "register_operand (operands[0], DFmode) || register_operand (operands[1], DFmode)"
+ "#"
+ [(set_attr "type" "multi,multi,load8,store8")
+ (set_attr "length" "4,16,6,6")])
+
+(define_split
+ [(set (match_operand:DF 0 "move_dest_operand" "")
+ (match_operand:DF 1 "move_double_src_operand" ""))]
+ "reload_completed"
+ [(match_dup 2)]
+ "operands[2] = gen_split_move_double (operands);")
+
+;; Zero extension instructions.
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r")
+ (zero_extend:HI (match_operand:QI 1 "extend_operand" "r,T,m")))]
+ ""
+ "@
+ and3 %0,%1,%#255
+ ldub %0,%1
+ ldub %0,%1"
+ [(set_attr "type" "int4,load2,load4")
+ (set_attr "length" "4,2,4")])
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (zero_extend:SI (match_operand:QI 1 "extend_operand" "r,T,m")))]
+ ""
+ "@
+ and3 %0,%1,%#255
+ ldub %0,%1
+ ldub %0,%1"
+ [(set_attr "type" "int4,load2,load4")
+ (set_attr "length" "4,2,4")])
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (zero_extend:SI (match_operand:HI 1 "extend_operand" "r,T,m")))]
+ ""
+ "@
+ and3 %0,%1,%#65535
+ lduh %0,%1
+ lduh %0,%1"
+ [(set_attr "type" "int4,load2,load4")
+ (set_attr "length" "4,2,4")])
+
+;; Signed conversions from a smaller integer to a larger integer
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r")
+ (sign_extend:HI (match_operand:QI 1 "extend_operand" "0,T,m")))]
+ ""
+ "@
+ #
+ ldb %0,%1
+ ldb %0,%1"
+ [(set_attr "type" "multi,load2,load4")
+ (set_attr "length" "2,2,4")])
+
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
+ "reload_completed"
+ [(match_dup 2)
+ (match_dup 3)]
+ "
+{
+ rtx op0 = gen_lowpart (SImode, operands[0]);
+ rtx shift = GEN_INT (24);
+
+ operands[2] = gen_ashlsi3 (op0, op0, shift);
+ operands[3] = gen_ashrsi3 (op0, op0, shift);
+}")
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:QI 1 "extend_operand" "0,T,m")))]
+ ""
+ "@
+ #
+ ldb %0,%1
+ ldb %0,%1"
+ [(set_attr "type" "multi,load2,load4")
+ (set_attr "length" "4,2,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
+ "reload_completed"
+ [(match_dup 2)
+ (match_dup 3)]
+ "
+{
+ rtx shift = GEN_INT (24);
+
+ operands[2] = gen_ashlsi3 (operands[0], operands[0], shift);
+ operands[3] = gen_ashrsi3 (operands[0], operands[0], shift);
+}")
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:HI 1 "extend_operand" "0,T,m")))]
+ ""
+ "@
+ #
+ ldh %0,%1
+ ldh %0,%1"
+ [(set_attr "type" "multi,load2,load4")
+ (set_attr "length" "4,2,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
+ "reload_completed"
+ [(match_dup 2)
+ (match_dup 3)]
+ "
+{
+ rtx shift = GEN_INT (16);
+
+ operands[2] = gen_ashlsi3 (operands[0], operands[0], shift);
+ operands[3] = gen_ashrsi3 (operands[0], operands[0], shift);
+}")
+
+;; Arithmetic instructions.
+
+; ??? Adding an alternative to split add3 of small constants into two
+; insns yields better instruction packing but slower code. Adds of small
+; values is done a lot.
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,I,J")))]
+ ""
+ "@
+ add %0,%2
+ addi %0,%#%2
+ add3 %0,%1,%#%2"
+ [(set_attr "type" "int2,int2,int4")
+ (set_attr "length" "2,2,4")])
+
+;(define_split
+; [(set (match_operand:SI 0 "register_operand" "")
+; (plus:SI (match_operand:SI 1 "register_operand" "")
+; (match_operand:SI 2 "int8_operand" "")))]
+; "reload_completed
+; && REGNO (operands[0]) != REGNO (operands[1])
+; && satisfies_constraint_I (operands[2])
+; && INTVAL (operands[2]) != 0"
+; [(set (match_dup 0) (match_dup 1))
+; (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 2)))]
+; "")
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+;; ??? The cmp clears the condition bit. Can we speed up somehow?
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (reg:CC 17))]
+ "reload_completed"
+ [(parallel [(set (reg:CC 17)
+ (const_int 0))
+ (use (match_dup 4))])
+ (parallel [(set (match_dup 4)
+ (plus:SI (match_dup 4)
+ (plus:SI (match_dup 5)
+ (ne:SI (reg:CC 17) (const_int 0)))))
+ (set (reg:CC 17)
+ (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])
+ (parallel [(set (match_dup 6)
+ (plus:SI (match_dup 6)
+ (plus:SI (match_dup 7)
+ (ne:SI (reg:CC 17) (const_int 0)))))
+ (set (reg:CC 17)
+ (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])]
+ "
+{
+ operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
+ operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
+ operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
+ operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
+}")
+
+(define_insn "*clear_c"
+ [(set (reg:CC 17)
+ (const_int 0))
+ (use (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "cmp %0,%0"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+(define_insn "*add_carry"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0")
+ (plus:SI (match_operand:SI 2 "register_operand" "r")
+ (ne:SI (reg:CC 17) (const_int 0)))))
+ (set (reg:CC 17)
+ (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))]
+ ""
+ "addx %0,%2"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "sub %0,%2"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+;; ??? The cmp clears the condition bit. Can we speed up somehow?
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (minus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (reg:CC 17))]
+ "reload_completed"
+ [(parallel [(set (reg:CC 17)
+ (const_int 0))
+ (use (match_dup 4))])
+ (parallel [(set (match_dup 4)
+ (minus:SI (match_dup 4)
+ (minus:SI (match_dup 5)
+ (ne:SI (reg:CC 17) (const_int 0)))))
+ (set (reg:CC 17)
+ (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])
+ (parallel [(set (match_dup 6)
+ (minus:SI (match_dup 6)
+ (minus:SI (match_dup 7)
+ (ne:SI (reg:CC 17) (const_int 0)))))
+ (set (reg:CC 17)
+ (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])]
+ "
+{
+ operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
+ operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
+ operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
+ operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
+}")
+
+(define_insn "*sub_carry"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "%0")
+ (minus:SI (match_operand:SI 2 "register_operand" "r")
+ (ne:SI (reg:CC 17) (const_int 0)))))
+ (set (reg:CC 17)
+ (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))]
+ ""
+ "subx %0,%2"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+; Multiply/Divide instructions.
+
+(define_insn "mulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "r"))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
+ ""
+ "mullo %1,%2\;mvfacmi %0"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4")])
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "mul %0,%2"
+ [(set_attr "type" "mul2")
+ (set_attr "length" "2")])
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "div %0,%2"
+ [(set_attr "type" "div4")
+ (set_attr "length" "4")])
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "divu %0,%2"
+ [(set_attr "type" "div4")
+ (set_attr "length" "4")])
+
+(define_insn "modsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mod:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "rem %0,%2"
+ [(set_attr "type" "div4")
+ (set_attr "length" "4")])
+
+(define_insn "umodsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (umod:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "remu %0,%2"
+ [(set_attr "type" "div4")
+ (set_attr "length" "4")])
+
+;; Boolean instructions.
+;;
+;; We don't define the DImode versions as expand_binop does a good enough job.
+;; And if it doesn't it should be fixed.
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
+ ""
+ "*
+{
+ /* If we are worried about space, see if we can break this up into two
+ short instructions, which might eliminate a NOP being inserted. */
+ if (optimize_size
+ && m32r_not_same_reg (operands[0], operands[1])
+ && satisfies_constraint_I (operands[2]))
+ return \"#\";
+
+ else if (CONST_INT_P (operands[2]))
+ return \"and3 %0,%1,%#%X2\";
+
+ return \"and %0,%2\";
+}"
+ [(set_attr "type" "int2,int4")
+ (set_attr "length" "2,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "int8_operand" "")))]
+ "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (and:SI (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
+ ""
+ "*
+{
+ /* If we are worried about space, see if we can break this up into two
+ short instructions, which might eliminate a NOP being inserted. */
+ if (optimize_size
+ && m32r_not_same_reg (operands[0], operands[1])
+ && satisfies_constraint_I (operands[2]))
+ return \"#\";
+
+ else if (CONST_INT_P (operands[2]))
+ return \"or3 %0,%1,%#%X2\";
+
+ return \"or %0,%2\";
+}"
+ [(set_attr "type" "int2,int4")
+ (set_attr "length" "2,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "int8_operand" "")))]
+ "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (ior:SI (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
+ ""
+ "*
+{
+ /* If we are worried about space, see if we can break this up into two
+ short instructions, which might eliminate a NOP being inserted. */
+ if (optimize_size
+ && m32r_not_same_reg (operands[0], operands[1])
+ && satisfies_constraint_I (operands[2]))
+ return \"#\";
+
+ else if (CONST_INT_P (operands[2]))
+ return \"xor3 %0,%1,%#%X2\";
+
+ return \"xor %0,%2\";
+}"
+ [(set_attr "type" "int2,int4")
+ (set_attr "length" "2,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "int8_operand" "")))]
+ "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (xor:SI (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "neg %0,%1"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "not %0,%1"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+;; Shift instructions.
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
+ ""
+ "@
+ sll %0,%2
+ slli %0,%#%2
+ sll3 %0,%1,%#%2"
+ [(set_attr "type" "shift2,shift2,shift4")
+ (set_attr "length" "2,2,4")])
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
+ ""
+ "@
+ sra %0,%2
+ srai %0,%#%2
+ sra3 %0,%1,%#%2"
+ [(set_attr "type" "shift2,shift2,shift4")
+ (set_attr "length" "2,2,4")])
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
+ ""
+ "@
+ srl %0,%2
+ srli %0,%#%2
+ srl3 %0,%1,%#%2"
+ [(set_attr "type" "shift2,shift2,shift4")
+ (set_attr "length" "2,2,4")])
+
+;; Compare instructions.
+;; This controls RTL generation and register allocation.
+
+;; We generate RTL for comparisons and branches by having the cmpxx
+;; patterns store away the operands. Then the bcc patterns
+;; emit RTL for both the compare and the branch.
+;;
+;; On the m32r it is more efficient to use the bxxz instructions and
+;; thus merge the compare and branch into one instruction, so they are
+;; preferred.
+
+(define_insn "cmp_eqsi_zero_insn"
+ [(set (reg:CC 17)
+ (eq:CC (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "reg_or_zero_operand" "r,P")))]
+ "TARGET_M32RX || TARGET_M32R2"
+ "@
+ cmpeq %0, %1
+ cmpz %0"
+ [(set_attr "type" "int4")
+ (set_attr "length" "4")])
+
+;; The cmp_xxx_insn patterns set the condition bit to the result of the
+;; comparison. There isn't a "compare equal" instruction so cmp_eqsi_insn
+;; is quite inefficient. However, it is rarely used.
+
+(define_insn "cmp_eqsi_insn"
+ [(set (reg:CC 17)
+ (eq:CC (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "reg_or_cmp_int16_operand" "r,P")))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ {
+ return \"mv %2,%0\;sub %2,%1\;cmpui %2,#1\";
+ }
+ else
+ {
+ if (INTVAL (operands [1]) == 0)
+ return \"cmpui %0, #1\";
+ else if (REGNO (operands [2]) == REGNO (operands [0]))
+ return \"addi %0,%#%N1\;cmpui %2,#1\";
+ else
+ return \"add3 %2,%0,%#%N1\;cmpui %2,#1\";
+ }
+}"
+ [(set_attr "type" "multi,multi")
+ (set_attr "length" "8,8")])
+
+(define_insn "cmp_ltsi_insn"
+ [(set (reg:CC 17)
+ (lt:CC (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
+ ""
+ "@
+ cmp %0,%1
+ cmpi %0,%#%1"
+ [(set_attr "type" "int2,int4")
+ (set_attr "length" "2,4")])
+
+(define_insn "cmp_ltusi_insn"
+ [(set (reg:CC 17)
+ (ltu:CC (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
+ ""
+ "@
+ cmpu %0,%1
+ cmpui %0,%#%1"
+ [(set_attr "type" "int2,int4")
+ (set_attr "length" "2,4")])
+
+;; These control RTL generation for conditional jump insns.
+
+(define_expand "cbranchsi4"
+ ; the comparison is emitted by gen_compare if needed.
+ [(set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_cmp_int16_operand" "")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[0] = gen_compare (GET_CODE (operands[0]), operands[1], operands[2], FALSE);
+ operands[1] = XEXP (operands[0], 0);
+ operands[2] = XEXP (operands[0], 1);
+}")
+
+;; Now match both normal and inverted jump.
+
+(define_insn "*branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "eqne_comparison_operator"
+ [(reg 17) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ static char instruction[40];
+ sprintf (instruction, \"%s%s %%l0\",
+ (GET_CODE (operands[1]) == NE) ? \"bc\" : \"bnc\",
+ (get_attr_length (insn) == 2) ? \".s\" : \"\");
+ return instruction;
+}"
+ [(set_attr "type" "branch")
+ ; cf PR gcc/28508
+ ; We use 300/600 instead of 512,1024 to account for inaccurate insn
+ ; lengths and insn alignments that are complex to track.
+ ; It's not important that we be hyper-precise here. It may be more
+ ; important blah blah blah when the chip supports parallel execution
+ ; blah blah blah but until then blah blah blah this is simple and
+ ; suffices.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 300))
+ (const_int 600))
+ (const_int 2)
+ (const_int 4)))])
+
+(define_insn "*rev_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "eqne_comparison_operator"
+ [(reg 17) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ;"REVERSIBLE_CC_MODE (GET_MODE (XEXP (operands[1], 0)))"
+ ""
+ "*
+{
+ static char instruction[40];
+ sprintf (instruction, \"%s%s %%l0\",
+ (GET_CODE (operands[1]) == EQ) ? \"bc\" : \"bnc\",
+ (get_attr_length (insn) == 2) ? \".s\" : \"\");
+ return instruction;
+}"
+ [(set_attr "type" "branch")
+ ; cf PR gcc/28508
+ ; We use 300/600 instead of 512,1024 to account for inaccurate insn
+ ; lengths and insn alignments that are complex to track.
+ ; It's not important that we be hyper-precise here. It may be more
+ ; important blah blah blah when the chip supports parallel execution
+ ; blah blah blah but until then blah blah blah this is simple and
+ ; suffices.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 300))
+ (const_int 600))
+ (const_int 2)
+ (const_int 4)))])
+
+; reg/reg compare and branch insns
+
+(define_insn "*reg_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "eqne_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (match_operand:SI 3 "register_operand" "r")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ /* Is branch target reachable with beq/bne? */
+ if (get_attr_length (insn) == 4)
+ {
+ if (GET_CODE (operands[1]) == EQ)
+ return \"beq %2,%3,%l0\";
+ else
+ return \"bne %2,%3,%l0\";
+ }
+ else
+ {
+ if (GET_CODE (operands[1]) == EQ)
+ return \"bne %2,%3,1f\;bra %l0\;1:\";
+ else
+ return \"beq %2,%3,1f\;bra %l0\;1:\";
+ }
+}"
+ [(set_attr "type" "branch")
+ ; We use 25000/50000 instead of 32768/65536 to account for slot filling
+ ; which is complex to track and inaccurate length specs.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 25000))
+ (const_int 50000))
+ (const_int 4)
+ (const_int 8)))])
+
+(define_insn "*rev_reg_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "eqne_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (match_operand:SI 3 "register_operand" "r")])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ /* Is branch target reachable with beq/bne? */
+ if (get_attr_length (insn) == 4)
+ {
+ if (GET_CODE (operands[1]) == NE)
+ return \"beq %2,%3,%l0\";
+ else
+ return \"bne %2,%3,%l0\";
+ }
+ else
+ {
+ if (GET_CODE (operands[1]) == NE)
+ return \"bne %2,%3,1f\;bra %l0\;1:\";
+ else
+ return \"beq %2,%3,1f\;bra %l0\;1:\";
+ }
+}"
+ [(set_attr "type" "branch")
+ ; We use 25000/50000 instead of 32768/65536 to account for slot filling
+ ; which is complex to track and inaccurate length specs.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 25000))
+ (const_int 50000))
+ (const_int 4)
+ (const_int 8)))])
+
+; reg/zero compare and branch insns
+
+(define_insn "*zero_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "signed_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ const char *br,*invbr;
+ char asmtext[40];
+
+ switch (GET_CODE (operands[1]))
+ {
+ case EQ : br = \"eq\"; invbr = \"ne\"; break;
+ case NE : br = \"ne\"; invbr = \"eq\"; break;
+ case LE : br = \"le\"; invbr = \"gt\"; break;
+ case GT : br = \"gt\"; invbr = \"le\"; break;
+ case LT : br = \"lt\"; invbr = \"ge\"; break;
+ case GE : br = \"ge\"; invbr = \"lt\"; break;
+
+ default: gcc_unreachable ();
+ }
+
+ /* Is branch target reachable with bxxz? */
+ if (get_attr_length (insn) == 4)
+ {
+ sprintf (asmtext, \"b%sz %%2,%%l0\", br);
+ output_asm_insn (asmtext, operands);
+ }
+ else
+ {
+ sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", invbr);
+ output_asm_insn (asmtext, operands);
+ }
+ return \"\";
+}"
+ [(set_attr "type" "branch")
+ ; We use 25000/50000 instead of 32768/65536 to account for slot filling
+ ; which is complex to track and inaccurate length specs.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 25000))
+ (const_int 50000))
+ (const_int 4)
+ (const_int 8)))])
+
+(define_insn "*rev_zero_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "eqne_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ const char *br,*invbr;
+ char asmtext[40];
+
+ switch (GET_CODE (operands[1]))
+ {
+ case EQ : br = \"eq\"; invbr = \"ne\"; break;
+ case NE : br = \"ne\"; invbr = \"eq\"; break;
+ case LE : br = \"le\"; invbr = \"gt\"; break;
+ case GT : br = \"gt\"; invbr = \"le\"; break;
+ case LT : br = \"lt\"; invbr = \"ge\"; break;
+ case GE : br = \"ge\"; invbr = \"lt\"; break;
+
+ default: gcc_unreachable ();
+ }
+
+ /* Is branch target reachable with bxxz? */
+ if (get_attr_length (insn) == 4)
+ {
+ sprintf (asmtext, \"b%sz %%2,%%l0\", invbr);
+ output_asm_insn (asmtext, operands);
+ }
+ else
+ {
+ sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", br);
+ output_asm_insn (asmtext, operands);
+ }
+ return \"\";
+}"
+ [(set_attr "type" "branch")
+ ; We use 25000/50000 instead of 32768/65536 to account for slot filling
+ ; which is complex to track and inaccurate length specs.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 25000))
+ (const_int 50000))
+ (const_int 4)
+ (const_int 8)))])
+
+;; S<cc> operations to set a register to 1/0 based on a comparison
+
+(define_expand "cstoresi4"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operator:SI 1 "ordered_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "reg_or_cmp_int16_operand" "")])]
+ ""
+ "
+{
+ if (GET_MODE (operands[0]) != SImode)
+ FAIL;
+
+ if (!gen_cond_store (GET_CODE (operands[1]),
+ operands[0], operands[2], operands[3]))
+ FAIL;
+
+ DONE;
+}")
+
+(define_insn "seq_insn_m32rx"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "reg_or_zero_operand" "rP")))
+ (clobber (reg:CC 17))]
+ "TARGET_M32RX || TARGET_M32R2"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_zero_operand" "")))
+ (clobber (reg:CC 17))]
+ "TARGET_M32RX || TARGET_M32R2"
+ [(set (reg:CC 17)
+ (eq:CC (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))]
+ "")
+
+(define_insn "seq_zero_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:CC 17))]
+ "TARGET_M32R"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:CC 17))]
+ "TARGET_M32R"
+ [(match_dup 3)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+
+ start_sequence ();
+ emit_insn (gen_cmp_ltusi_insn (op1, const1_rtx));
+ emit_insn (gen_movcc_insn (op0));
+ operands[3] = get_insns ();
+ end_sequence ();
+}")
+
+(define_insn "seq_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,??r,r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_eq_int16_operand" "r,r,r,PK")))
+ (clobber (reg:CC 17))
+ (clobber (match_scratch:SI 3 "=1,2,&r,r"))]
+ "TARGET_M32R"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8,8,10,10")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_eq_int16_operand" "")))
+ (clobber (reg:CC 17))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_M32R && reload_completed"
+ [(match_dup 4)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx op3 = operands[3];
+ HOST_WIDE_INT value;
+
+ if (REG_P (op2) && REG_P (op3)
+ && REGNO (op2) == REGNO (op3))
+ {
+ op1 = operands[2];
+ op2 = operands[1];
+ }
+
+ start_sequence ();
+ if (REG_P (op1) && REG_P (op3)
+ && REGNO (op1) != REGNO (op3))
+ {
+ emit_move_insn (op3, op1);
+ op1 = op3;
+ }
+
+ if (satisfies_constraint_P (op2) && (value = INTVAL (op2)) != 0)
+ emit_insn (gen_addsi3 (op3, op1, GEN_INT (-value)));
+ else
+ emit_insn (gen_xorsi3 (op3, op1, op2));
+
+ emit_insn (gen_cmp_ltusi_insn (op3, const1_rtx));
+ emit_insn (gen_movcc_insn (op0));
+ operands[4] = get_insns ();
+ end_sequence ();
+}")
+
+(define_insn "sne_zero_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:CC 17))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:CC 17))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (const_int 0))
+ (set (reg:CC 17)
+ (ltu:CC (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))]
+ "")
+
+(define_insn "slt_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:CC 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4,6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (lt:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:CC 17))]
+ ""
+ [(set (reg:CC 17)
+ (lt:CC (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))]
+ "")
+
+(define_insn "sle_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (le:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:CC 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (le:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:CC 17))]
+ "!optimize_size"
+ [(set (reg:CC 17)
+ (lt:CC (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (le:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:CC 17))]
+ "optimize_size"
+ [(set (reg:CC 17)
+ (lt:CC (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_insn "sge_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ge:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:CC 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8,10")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ge:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:CC 17))]
+ "!optimize_size"
+ [(set (reg:CC 17)
+ (lt:CC (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ge:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:CC 17))]
+ "optimize_size"
+ [(set (reg:CC 17)
+ (lt:CC (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_insn "sltu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ltu:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:CC 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6,8")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ltu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:CC 17))]
+ ""
+ [(set (reg:CC 17)
+ (ltu:CC (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))]
+ "")
+
+(define_insn "sleu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (leu:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:CC 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (leu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:CC 17))]
+ "!optimize_size"
+ [(set (reg:CC 17)
+ (ltu:CC (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (leu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:CC 17))]
+ "optimize_size"
+ [(set (reg:CC 17)
+ (ltu:CC (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_insn "sgeu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (geu:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:CC 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8,10")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (geu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:CC 17))]
+ "!optimize_size"
+ [(set (reg:CC 17)
+ (ltu:CC (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (geu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:CC 17))]
+ "optimize_size"
+ [(set (reg:CC 17)
+ (ltu:CC (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (ne:SI (reg:CC 17) (const_int 0)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_insn "movcc_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ne:SI (reg:CC 17) (const_int 0)))]
+ ""
+ "mvfc %0, cbr"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2")])
+
+
+;; Unconditional and other jump instructions.
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "bra %l0"
+ [(set_attr "type" "uncond_branch")
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 400))
+ (const_int 800))
+ (const_int 2)
+ (const_int 4)))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
+ ""
+ "jmp %a0"
+ [(set_attr "type" "uncond_branch")
+ (set_attr "length" "2")])
+
+(define_insn "return_lr"
+ [(parallel [(return) (use (reg:SI 14))])]
+ ""
+ "jmp lr"
+ [(set_attr "type" "uncond_branch")
+ (set_attr "length" "2")])
+
+(define_insn "return_rte"
+ [(return)]
+ ""
+ "rte"
+ [(set_attr "type" "uncond_branch")
+ (set_attr "length" "2")])
+
+(define_expand "return"
+ [(return)]
+ "direct_return ()"
+ "
+{
+ emit_jump_insn (gen_return_lr ());
+ DONE;
+}")
+
+(define_expand "return_normal"
+ [(return)]
+ "!direct_return ()"
+ "
+{
+ enum m32r_function_type fn_type;
+
+ fn_type = m32r_compute_function_type (current_function_decl);
+ if (M32R_INTERRUPT_P (fn_type))
+ {
+ emit_jump_insn (gen_return_rte ());
+ DONE;
+ }
+
+ emit_jump_insn (gen_return_lr ());
+ DONE;
+}")
+
+(define_expand "tablejump"
+ [(parallel [(set (pc) (match_operand 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))])]
+ ""
+ "
+{
+ /* In pic mode, our address differences are against the base of the
+ table. Add that base value back in; CSE ought to be able to combine
+ the two address loads. */
+ if (flag_pic)
+ {
+ rtx tmp, tmp2;
+
+ tmp = gen_rtx_LABEL_REF (Pmode, operands[1]);
+ tmp2 = operands[0];
+ tmp = gen_rtx_PLUS (Pmode, tmp2, tmp);
+ operands[0] = memory_address (Pmode, tmp);
+ }
+}")
+
+(define_insn "*tablejump_insn"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmp %a0"
+ [(set_attr "type" "uncond_branch")
+ (set_attr "length" "2")])
+
+(define_expand "call"
+ ;; operands[1] is stack_size_rtx
+ ;; operands[2] is next_arg_register
+ [(parallel [(call (match_operand:SI 0 "call_operand" "")
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 14))])]
+ ""
+ "
+{
+ if (flag_pic)
+ crtl->uses_pic_offset_table = 1;
+}")
+
+(define_insn "*call_via_reg"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "r"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 14))]
+ ""
+ "jl %0"
+ [(set_attr "type" "call")
+ (set_attr "length" "2")])
+
+(define_insn "*call_via_label"
+ [(call (mem:SI (match_operand:SI 0 "call_address_operand" ""))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+{
+ int call26_p = call26_operand (operands[0], FUNCTION_MODE);
+
+ if (! call26_p)
+ {
+ /* We may not be able to reach with a `bl' insn so punt and leave it to
+ the linker.
+ We do this here, rather than doing a force_reg in the define_expand
+ so these insns won't be separated, say by scheduling, thus simplifying
+ the linker. */
+ return \"seth r14,%T0\;add3 r14,r14,%B0\;jl r14\";
+ }
+ else
+ return \"bl %0\";
+}"
+ [(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (not (match_test "call26_operand (operands[0], FUNCTION_MODE)"))
+ (const_int 12) ; 10 + 2 for nop filler
+ ; The return address must be on a 4 byte boundary so
+ ; there's no point in using a value of 2 here. A 2 byte
+ ; insn may go in the left slot but we currently can't
+ ; use such knowledge.
+ (const_int 4)))])
+
+(define_expand "call_value"
+ ;; operand 2 is stack_size_rtx
+ ;; operand 3 is next_arg_register
+ [(parallel [(set (match_operand 0 "register_operand" "=r")
+ (call (match_operand:SI 1 "call_operand" "")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 14))])]
+ ""
+ "
+{
+ if (flag_pic)
+ crtl->uses_pic_offset_table = 1;
+}")
+
+(define_insn "*call_value_via_reg"
+ [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 14))]
+ ""
+ "jl %1"
+ [(set_attr "type" "call")
+ (set_attr "length" "2")])
+
+(define_insn "*call_value_via_label"
+ [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "call_address_operand" ""))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+{
+ int call26_p = call26_operand (operands[1], FUNCTION_MODE);
+
+ if (flag_pic)
+ crtl->uses_pic_offset_table = 1;
+
+ if (! call26_p)
+ {
+ /* We may not be able to reach with a `bl' insn so punt and leave it to
+ the linker.
+ We do this here, rather than doing a force_reg in the define_expand
+ so these insns won't be separated, say by scheduling, thus simplifying
+ the linker. */
+ return \"seth r14,%T1\;add3 r14,r14,%B1\;jl r14\";
+ }
+ else
+ return \"bl %1\";
+}"
+ [(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (not (match_test "call26_operand (operands[1], FUNCTION_MODE)"))
+ (const_int 12) ; 10 + 2 for nop filler
+ ; The return address must be on a 4 byte boundary so
+ ; there's no point in using a value of 2 here. A 2 byte
+ ; insn may go in the left slot but we currently can't
+ ; use such knowledge.
+ (const_int 4)))])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
+ ""
+ "")
+
+;; Special pattern to flush the icache.
+
+(define_insn "flush_icache"
+ [(unspec_volatile [(match_operand 0 "memory_operand" "m")]
+ UNSPECV_FLUSH_ICACHE)
+ (match_operand 1 "" "")
+ (clobber (reg:SI 17))]
+ ""
+ "* return \"trap %#%1 ; flush-icache\";"
+ [(set_attr "type" "int4")
+ (set_attr "length" "4")])
+
+;; Speed up fabs and provide correct sign handling for -0
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (abs:DF (match_operand:DF 1 "register_operand" "0")))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (abs:DF (match_operand:DF 1 "register_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (ashift:SI (match_dup 2)
+ (const_int 1)))
+ (set (match_dup 2)
+ (lshiftrt:SI (match_dup 2)
+ (const_int 1)))]
+ "operands[2] = gen_highpart (SImode, operands[0]);")
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (abs:SF (match_operand:SF 1 "register_operand" "0")))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (abs:SF (match_operand:SF 1 "register_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (ashift:SI (match_dup 2)
+ (const_int 1)))
+ (set (match_dup 2)
+ (lshiftrt:SI (match_dup 2)
+ (const_int 1)))]
+ "operands[2] = gen_highpart (SImode, operands[0]);")
+
+;; Conditional move instructions
+;; Based on those done for the d10v
+
+(define_expand "movsicc"
+ [
+ (set (match_operand:SI 0 "register_operand" "r")
+ (if_then_else:SI (match_operand 1 "" "")
+ (match_operand:SI 2 "conditional_move_operand" "O")
+ (match_operand:SI 3 "conditional_move_operand" "O")
+ )
+ )
+ ]
+ ""
+ "
+{
+ if (! zero_and_one (operands [2], operands [3]))
+ FAIL;
+
+ /* Generate the comparison that will set the carry flag. */
+ operands[1] = gen_compare (GET_CODE (operands[1]), XEXP (operands[1], 0),
+ XEXP (operands[1], 1), TRUE);
+
+ /* See other movsicc pattern below for reason why. */
+ emit_insn (gen_blockage ());
+}")
+
+;; Generate the conditional instructions based on how the carry flag is examined.
+(define_insn "*movsicc_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI (match_operand 1 "carry_compare_operand" "")
+ (match_operand:SI 2 "conditional_move_operand" "O")
+ (match_operand:SI 3 "conditional_move_operand" "O")
+ )
+ )]
+ "zero_and_one (operands [2], operands[3])"
+ "* return emit_cond_move (operands, insn);"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")
+ ]
+)
+
+
+;; Block moves, see m32r.c for more details.
+;; Argument 0 is the destination
+;; Argument 1 is the source
+;; Argument 2 is the length
+;; Argument 3 is the alignment
+
+(define_expand "movmemsi"
+ [(parallel [(set (match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" ""))
+ (use (match_operand:SI 2 "immediate_operand" ""))
+ (use (match_operand:SI 3 "immediate_operand" ""))])]
+ ""
+ "
+{
+ if (operands[0]) /* Avoid unused code messages. */
+ {
+ if (m32r_expand_block_move (operands))
+ DONE;
+ else
+ FAIL;
+ }
+}")
+
+;; Insn generated by block moves
+
+(define_insn "movmemsi_internal"
+ [(set (mem:BLK (match_operand:SI 0 "register_operand" "r")) ;; destination
+ (mem:BLK (match_operand:SI 1 "register_operand" "r"))) ;; source
+ (use (match_operand:SI 2 "m32r_block_immediate_operand" "J"));; # bytes to move
+ (set (match_operand:SI 3 "register_operand" "=0")
+ (plus:SI (minus (match_dup 2) (const_int 4))
+ (match_dup 0)))
+ (set (match_operand:SI 4 "register_operand" "=1")
+ (plus:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:SI 5 "=&r")) ;; temp1
+ (clobber (match_scratch:SI 6 "=&r"))] ;; temp2
+ ""
+ "* m32r_output_block_move (insn, operands); return \"\"; "
+ [(set_attr "type" "store8")
+ (set_attr "length" "72")]) ;; Maximum
+
+;; PIC
+
+/* When generating pic, we need to load the symbol offset into a register.
+ So that the optimizer does not confuse this with a normal symbol load
+ we use an unspec. The offset will be loaded from a constant pool entry,
+ since that is the only type of relocation we can use. */
+
+(define_insn "pic_load_addr"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] UNSPEC_PIC_LOAD_ADDR))]
+ "flag_pic"
+ "ld24 %0,%#%1"
+ [(set_attr "type" "int4")])
+
+(define_insn "gotoff_load_addr"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] UNSPEC_GOTOFF))]
+ "flag_pic"
+ "seth %0, %#shigh(%1@GOTOFF)\;add3 %0, %0, low(%1@GOTOFF)"
+ [(set_attr "type" "int4")
+ (set_attr "length" "8")])
+
+;; Load program counter insns.
+
+(define_insn "get_pc"
+ [(clobber (reg:SI 14))
+ (set (match_operand 0 "register_operand" "=r,r")
+ (unspec [(match_operand 1 "" "")] UNSPEC_GET_PC))
+ (use (match_operand:SI 2 "immediate_operand" "W,i"))]
+ "flag_pic"
+ "@
+ bl.s .+4\;seth %0,%#shigh(%1)\;add3 %0,%0,%#low(%1+4)\;add %0,lr
+ bl.s .+4\;ld24 %0,%#%1\;add %0,lr"
+ [(set_attr "length" "12,8")])
+
+(define_expand "builtin_setjmp_receiver"
+ [(label_ref (match_operand 0 "" ""))]
+ "flag_pic"
+ "
+{
+ m32r_load_pic_register ();
+ DONE;
+}")
diff --git a/gcc-4.9/gcc/config/m32r/m32r.opt b/gcc-4.9/gcc/config/m32r/m32r.opt
new file mode 100644
index 000000000..64afd93b8
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/m32r.opt
@@ -0,0 +1,117 @@
+; Options for the Renesas M32R port of the compiler.
+
+; Copyright (C) 2005-2014 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+HeaderInclude
+config/m32r/m32r-opts.h
+
+; Selected code model.
+Variable
+enum m32r_model m32r_model_selected = M32R_MODEL_DEFAULT
+
+; Selected SDA support.
+Variable
+enum m32r_sdata m32r_sdata_selected = M32R_SDATA_DEFAULT
+
+m32rx
+Target Report RejectNegative Mask(M32RX)
+Compile for the m32rx
+
+m32r2
+Target Report RejectNegative Mask(M32R2)
+Compile for the m32r2
+
+m32r
+Target RejectNegative
+Compile for the m32r
+
+malign-loops
+Target Report Mask(ALIGN_LOOPS)
+Align all loops to 32 byte boundary
+
+mbranch-cost=1
+Target Report RejectNegative Mask(BRANCH_COST)
+Prefer branches over conditional execution
+
+mbranch-cost=2
+Target Report RejectNegative InverseMask(BRANCH_COST)
+Give branches their default cost
+
+mdebug
+Target Mask(DEBUG)
+Display compile time statistics
+
+mflush-func=
+Target RejectNegative Joined Var(m32r_cache_flush_func) Init(CACHE_FLUSH_FUNC)
+Specify cache flush function
+
+mflush-trap=
+Target RejectNegative Joined UInteger Var(m32r_cache_flush_trap) Init(CACHE_FLUSH_TRAP)
+Specify cache flush trap number
+
+missue-rate=1
+Target Report RejectNegative Mask(LOW_ISSUE_RATE)
+Only issue one instruction per cycle
+
+missue-rate=2
+Target Report RejectNegative InverseMask(LOW_ISSUE_RATE)
+Allow two instructions to be issued per cycle
+
+mmodel=
+Target RejectNegative Joined Enum(m32r_model) Var(m32r_model_selected)
+Code size: small, medium or large
+
+Enum
+Name(m32r_model) Type(enum m32r_model)
+
+EnumValue
+Enum(m32r_model) String(small) Value(M32R_MODEL_SMALL)
+
+EnumValue
+Enum(m32r_model) String(medium) Value(M32R_MODEL_MEDIUM)
+
+EnumValue
+Enum(m32r_model) String(large) Value(M32R_MODEL_LARGE)
+
+mno-flush-func
+Target RejectNegative
+Don't call any cache flush functions
+
+mno-flush-trap
+Target RejectNegative Var(m32r_cache_flush_trap, -1)
+Don't call any cache flush trap
+
+; mrelax
+; Target Mask(RELAX)
+
+msdata=
+Target RejectNegative Joined Enum(m32r_sdata) Var(m32r_sdata_selected)
+Small data area: none, sdata, use
+
+Enum
+Name(m32r_sdata) Type(enum m32r_sdata)
+
+EnumValue
+Enum(m32r_sdata) String(none) Value(M32R_SDATA_NONE)
+
+EnumValue
+Enum(m32r_sdata) String(sdata) Value(M32R_SDATA_SDATA)
+
+EnumValue
+Enum(m32r_sdata) String(use) Value(M32R_SDATA_USE)
diff --git a/gcc-4.9/gcc/config/m32r/predicates.md b/gcc-4.9/gcc/config/m32r/predicates.md
new file mode 100644
index 000000000..dbe11e8db
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/predicates.md
@@ -0,0 +1,440 @@
+;; Predicate definitions for Renesas M32R.
+;; Copyright (C) 2005-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Return true if OP is a register or the constant 0.
+
+(define_predicate "reg_or_zero_operand"
+ (match_code "reg,subreg,const_int")
+{
+ if (REG_P (op) || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+
+ if (!CONST_INT_P (op))
+ return 0;
+
+ return INTVAL (op) == 0;
+})
+
+;; Return nonzero if the operand is suitable for use in a conditional
+;; move sequence.
+
+(define_predicate "conditional_move_operand"
+ (match_code "reg,subreg,const_int")
+{
+ /* Only defined for simple integers so far... */
+ if (mode != SImode && mode != HImode && mode != QImode)
+ return FALSE;
+
+ /* At the moment we can handle moving registers and loading constants. */
+ /* To be added: Addition/subtraction/bitops/multiplication of registers. */
+
+ switch (GET_CODE (op))
+ {
+ case REG:
+ return 1;
+
+ case CONST_INT:
+ return satisfies_constraint_I (op);
+
+ default:
+#if 0
+ fprintf (stderr, "Test for cond move op of type: %s\n",
+ GET_RTX_NAME (GET_CODE (op)));
+#endif
+ return 0;
+ }
+})
+
+;; Return true if the code is a test of the carry bit.
+
+(define_predicate "carry_compare_operand"
+ (match_code "eq,ne")
+{
+ rtx x;
+
+ if (GET_MODE (op) != CCmode && GET_MODE (op) != VOIDmode)
+ return FALSE;
+
+ if (GET_CODE (op) != NE && GET_CODE (op) != EQ)
+ return FALSE;
+
+ x = XEXP (op, 0);
+ if (!REG_P (x) || REGNO (x) != CARRY_REGNUM)
+ return FALSE;
+
+ x = XEXP (op, 1);
+ if (!CONST_INT_P (x) || INTVAL (x) != 0)
+ return FALSE;
+
+ return TRUE;
+})
+
+;; Return 1 if OP is an EQ or NE comparison operator.
+
+(define_predicate "eqne_comparison_operator"
+ (match_code "eq,ne")
+{
+ enum rtx_code code = GET_CODE (op);
+
+ return (code == EQ || code == NE);
+})
+
+;; Return 1 if OP is a signed comparison operator.
+
+(define_predicate "signed_comparison_operator"
+ (match_code "eq,ne,lt,le,gt,ge")
+{
+ enum rtx_code code = GET_CODE (op);
+
+ return (COMPARISON_P (op)
+ && (code == EQ || code == NE
+ || code == LT || code == LE || code == GT || code == GE));
+})
+
+;; Return true if OP is an acceptable argument for a move destination.
+
+(define_predicate "move_dest_operand"
+ (match_code "reg,subreg,mem")
+{
+ switch (GET_CODE (op))
+ {
+ case REG :
+ return register_operand (op, mode);
+ case SUBREG :
+ /* (subreg (mem ...) ...) can occur here if the inner part was once a
+ pseudo-reg and is now a stack slot. */
+ if (MEM_P (SUBREG_REG (op)))
+ return address_operand (XEXP (SUBREG_REG (op), 0), mode);
+ else
+ return register_operand (op, mode);
+ case MEM :
+ if (GET_CODE (XEXP (op, 0)) == POST_INC)
+ return 0; /* stores can't do post inc */
+ return address_operand (XEXP (op, 0), mode);
+ default :
+ return 0;
+ }
+})
+
+;; Return true if OP is an acceptable argument for a single word move
+;; source.
+
+(define_predicate "move_src_operand"
+ (match_code "reg,subreg,mem,const_int,const_double,label_ref,const,symbol_ref")
+{
+ switch (GET_CODE (op))
+ {
+ case LABEL_REF :
+ case SYMBOL_REF :
+ case CONST :
+ return addr24_operand (op, mode);
+ case CONST_INT :
+ /* ??? We allow more cse opportunities if we only allow constants
+ loadable with one insn, and split the rest into two. The instances
+ where this would help should be rare and the current way is
+ simpler. */
+ if (HOST_BITS_PER_WIDE_INT > 32)
+ {
+ HOST_WIDE_INT rest = INTVAL (op) >> 31;
+ return (rest == 0 || rest == -1);
+ }
+ else
+ return 1;
+ case CONST_DOUBLE :
+ if (mode == SFmode)
+ return 1;
+ else if (mode == SImode)
+ {
+ /* Large unsigned constants are represented as const_double's. */
+ unsigned HOST_WIDE_INT low, high;
+
+ low = CONST_DOUBLE_LOW (op);
+ high = CONST_DOUBLE_HIGH (op);
+ return high == 0 && low <= (unsigned) 0xffffffff;
+ }
+ else
+ return 0;
+ case REG :
+ return register_operand (op, mode);
+ case SUBREG :
+ /* (subreg (mem ...) ...) can occur here if the inner part was once a
+ pseudo-reg and is now a stack slot. */
+ if (MEM_P (SUBREG_REG (op)))
+ return address_operand (XEXP (SUBREG_REG (op), 0), mode);
+ else
+ return register_operand (op, mode);
+ case MEM :
+ if (GET_CODE (XEXP (op, 0)) == PRE_INC
+ || GET_CODE (XEXP (op, 0)) == PRE_DEC)
+ return 0; /* loads can't do pre-{inc,dec} */
+ return address_operand (XEXP (op, 0), mode);
+ default :
+ return 0;
+ }
+})
+
+;; Return true if OP is an acceptable argument for a double word move
+;; source.
+
+(define_predicate "move_double_src_operand"
+ (match_code "reg,subreg,mem,const_int,const_double")
+{
+ switch (GET_CODE (op))
+ {
+ case CONST_INT :
+ case CONST_DOUBLE :
+ return 1;
+ case REG :
+ return register_operand (op, mode);
+ case SUBREG :
+ /* (subreg (mem ...) ...) can occur here if the inner part was once a
+ pseudo-reg and is now a stack slot. */
+ if (MEM_P (SUBREG_REG (op)))
+ return move_double_src_operand (SUBREG_REG (op), mode);
+ else
+ return register_operand (op, mode);
+ case MEM :
+ /* Disallow auto inc/dec for now. */
+ if (GET_CODE (XEXP (op, 0)) == PRE_DEC
+ || GET_CODE (XEXP (op, 0)) == PRE_INC)
+ return 0;
+ return address_operand (XEXP (op, 0), mode);
+ default :
+ return 0;
+ }
+})
+
+;; Return true if OP is a const_int requiring two instructions to
+;; load.
+
+(define_predicate "two_insn_const_operand"
+ (match_code "const_int")
+{
+ if (!CONST_INT_P (op))
+ return 0;
+ if (satisfies_constraint_J (op)
+ || satisfies_constraint_M (op)
+ || satisfies_constraint_L (op))
+ return 0;
+ return 1;
+})
+
+;; Returns 1 if OP is a symbol reference.
+
+(define_predicate "symbolic_operand"
+ (match_code "symbol_ref,label_ref,const")
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST :
+ return 1;
+
+ default:
+ return 0;
+ }
+})
+
+;; Return true if OP is a signed 8-bit immediate value.
+
+(define_predicate "int8_operand"
+ (match_code "const_int")
+{
+ if (!CONST_INT_P (op))
+ return 0;
+ return satisfies_constraint_I (op);
+})
+
+;; Return true if OP is an unsigned 16-bit immediate value.
+
+(define_predicate "uint16_operand"
+ (match_code "const_int")
+{
+ if (!CONST_INT_P (op))
+ return 0;
+ return satisfies_constraint_K (op);
+})
+
+;; Return true if OP is a register or signed 16-bit value.
+
+(define_predicate "reg_or_int16_operand"
+ (match_code "reg,subreg,const_int")
+{
+ if (REG_P (op) || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+ if (!CONST_INT_P (op))
+ return 0;
+ return satisfies_constraint_J (op);
+})
+
+;; Return true if OP is a register or an unsigned 16-bit value.
+
+(define_predicate "reg_or_uint16_operand"
+ (match_code "reg,subreg,const_int")
+{
+ if (REG_P (op) || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+ if (!CONST_INT_P (op))
+ return 0;
+ return satisfies_constraint_K (op);
+})
+
+;; Return true if OP is a register or signed 16-bit value for
+;; compares.
+
+(define_predicate "reg_or_cmp_int16_operand"
+ (match_code "reg,subreg,const_int")
+{
+ if (REG_P (op) || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+ if (!CONST_INT_P (op))
+ return 0;
+ return satisfies_constraint_P (op);
+})
+
+;; Return true if OP is a register or an integer value that can be
+;; used is SEQ/SNE. We can use either XOR of the value or ADD of the
+;; negative of the value for the constant. Don't allow 0, because
+;; that is special cased.
+
+(define_predicate "reg_or_eq_int16_operand"
+ (match_code "reg,subreg,const_int")
+{
+ HOST_WIDE_INT value;
+
+ if (REG_P (op) || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+
+ if (!CONST_INT_P (op))
+ return 0;
+
+ value = INTVAL (op);
+ return (value != 0) && (UINT16_P (value) || CMP_INT16_P (-value));
+})
+
+;; Return true if OP is a signed 16-bit immediate value useful in
+;; comparisons.
+
+(define_predicate "cmp_int16_operand"
+ (match_code "const_int")
+{
+ if (!CONST_INT_P (op))
+ return 0;
+ return satisfies_constraint_P (op);
+})
+
+;; Acceptable arguments to the call insn.
+
+(define_predicate "call_address_operand"
+ (match_code "symbol_ref,label_ref,const")
+{
+ return symbolic_operand (op, mode);
+
+/* Constants and values in registers are not OK, because
+ the m32r BL instruction can only support PC relative branching. */
+})
+
+;; Return true if OP is an acceptable input argument for a zero/sign
+;; extend operation.
+
+(define_predicate "extend_operand"
+ (match_code "reg,subreg,mem")
+{
+ rtx addr;
+
+ switch (GET_CODE (op))
+ {
+ case REG :
+ case SUBREG :
+ return register_operand (op, mode);
+
+ case MEM :
+ addr = XEXP (op, 0);
+ if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
+ return 0; /* loads can't do pre inc/pre dec */
+
+ return address_operand (addr, mode);
+
+ default :
+ return 0;
+ }
+})
+
+;; Return nonzero if the operand is an insn that is a small
+;; insn. Allow const_int 0 as well, which is a placeholder for NOP
+;; slots.
+
+(define_predicate "small_insn_p"
+ (match_code "insn,call_insn,jump_insn")
+{
+ if (CONST_INT_P (op) && INTVAL (op) == 0)
+ return 1;
+
+ if (! INSN_P (op))
+ return 0;
+
+ return get_attr_length (op) == 2;
+})
+
+;; Return true if op is an integer constant, less than or equal to
+;; MAX_MOVE_BYTES.
+
+(define_predicate "m32r_block_immediate_operand"
+ (match_code "const_int")
+{
+ if (!CONST_INT_P (op)
+ || INTVAL (op) > MAX_MOVE_BYTES
+ || INTVAL (op) <= 0)
+ return 0;
+
+ return 1;
+})
+
+;; Return nonzero if the operand is an insn that is a large insn.
+
+(define_predicate "large_insn_p"
+ (match_code "insn,call_insn,jump_insn")
+{
+ if (! INSN_P (op))
+ return 0;
+
+ return get_attr_length (op) != 2;
+})
+
+;; Returns 1 if OP is an acceptable operand for seth/add3.
+
+(define_predicate "seth_add3_operand"
+ (match_code "symbol_ref,label_ref,const")
+{
+ if (flag_pic)
+ return 0;
+
+ if (GET_CODE (op) == SYMBOL_REF
+ || GET_CODE (op) == LABEL_REF)
+ return 1;
+
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ && satisfies_constraint_J (XEXP (XEXP (op, 0), 1)))
+ return 1;
+
+ return 0;
+})
diff --git a/gcc-4.9/gcc/config/m32r/rtems.h b/gcc-4.9/gcc/config/m32r/rtems.h
new file mode 100644
index 000000000..0fc47aad5
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/rtems.h
@@ -0,0 +1,33 @@
+/* Definitions for rtems targeting a M32R using ELF.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by Joel Sherrill (joel@OARcorp.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Target OS builtins. */
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__rtems__"); \
+ builtin_define ("__USE_INIT_FINI__"); \
+ builtin_assert ("system=rtems"); \
+ } \
+ while (0)
+
+/* Use the default */
+#undef LINK_GCC_C_SEQUENCE_SPEC
diff --git a/gcc-4.9/gcc/config/m32r/t-linux b/gcc-4.9/gcc/config/m32r/t-linux
new file mode 100644
index 000000000..3e1519997
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/t-linux
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Don't install "assert.h" in gcc. We use the one in glibc.
+INSTALL_ASSERT_H =
diff --git a/gcc-4.9/gcc/config/m32r/t-m32r b/gcc-4.9/gcc/config/m32r/t-m32r
new file mode 100644
index 000000000..dc016a8d3
--- /dev/null
+++ b/gcc-4.9/gcc/config/m32r/t-m32r
@@ -0,0 +1,31 @@
+# Copyright (C) 1997-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# -mmodel={small,medium} requires separate libraries.
+# We don't build libraries for the large model, instead we use the medium
+# libraries. The only difference is that the large model can handle jumps
+# more than 26 signed bits away.
+
+MULTILIB_OPTIONS = mmodel=small/mmodel=medium m32r/m32rx/m32r2
+MULTILIB_DIRNAMES = small medium m32r m32rx m32r2
+MULTILIB_MATCHES = mmodel?medium=mmodel?large
+
+# Set MULTILIB_EXTRA_OPTS so shipped libraries have small data in .sdata and
+# SHN_M32R_SCOMMON.
+# This is important for objects referenced in system header files.
+MULTILIB_EXTRA_OPTS = msdata=sdata