aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.8/gcc/config/cris
diff options
context:
space:
mode:
authorBen Cheng <bccheng@google.com>2013-03-28 11:14:20 -0700
committerBen Cheng <bccheng@google.com>2013-03-28 12:40:33 -0700
commitaf0c51ac87ab2a87caa03fa108f0d164987a2764 (patch)
tree4b8b470f7c5b69642fdab8d0aa1fbc148d02196b /gcc-4.8/gcc/config/cris
parentd87cae247d39ebf4f5a6bf25c932a14d2fdb9384 (diff)
downloadtoolchain_gcc-af0c51ac87ab2a87caa03fa108f0d164987a2764.tar.gz
toolchain_gcc-af0c51ac87ab2a87caa03fa108f0d164987a2764.tar.bz2
toolchain_gcc-af0c51ac87ab2a87caa03fa108f0d164987a2764.zip
[GCC 4.8] Initial check-in of GCC 4.8.0
Change-Id: I0719d8a6d0f69b367a6ab6f10eb75622dbf12771
Diffstat (limited to 'gcc-4.8/gcc/config/cris')
-rw-r--r--gcc-4.8/gcc/config/cris/constraints.md164
-rw-r--r--gcc-4.8/gcc/config/cris/cris-protos.h67
-rw-r--r--gcc-4.8/gcc/config/cris/cris.c4345
-rw-r--r--gcc-4.8/gcc/config/cris/cris.h1081
-rw-r--r--gcc-4.8/gcc/config/cris/cris.md5156
-rw-r--r--gcc-4.8/gcc/config/cris/cris.opt202
-rw-r--r--gcc-4.8/gcc/config/cris/elf.opt25
-rw-r--r--gcc-4.8/gcc/config/cris/linux.h150
-rw-r--r--gcc-4.8/gcc/config/cris/linux.opt33
-rw-r--r--gcc-4.8/gcc/config/cris/predicates.md174
-rw-r--r--gcc-4.8/gcc/config/cris/sync.md314
-rw-r--r--gcc-4.8/gcc/config/cris/t-cris29
-rw-r--r--gcc-4.8/gcc/config/cris/t-elfmulti30
-rw-r--r--gcc-4.8/gcc/config/cris/t-linux5
14 files changed, 11775 insertions, 0 deletions
diff --git a/gcc-4.8/gcc/config/cris/constraints.md b/gcc-4.8/gcc/config/cris/constraints.md
new file mode 100644
index 000000000..4083bdff5
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/constraints.md
@@ -0,0 +1,164 @@
+;; Constraint definitions for CRIS.
+;; Copyright (C) 2011-2013 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Register constraints.
+(define_register_constraint "a" "ACR_REGS"
+ "@internal")
+
+(define_register_constraint "b" "GENNONACR_REGS"
+ "@internal")
+
+(define_register_constraint "h" "MOF_REGS"
+ "@internal")
+
+(define_register_constraint "x" "SPECIAL_REGS"
+ "@internal")
+
+(define_register_constraint "c" "CC0_REGS"
+ "@internal")
+
+;; Integer constraints.
+(define_constraint "I"
+ "MOVEQ, CMPQ, ANDQ, ORQ."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -32, 31)")))
+
+(define_constraint "J"
+ "ADDQ, SUBQ."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 63)")))
+
+(define_constraint "Kc"
+ "ASRQ, BTSTQ, LSRQ, LSLQ."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 31)")))
+
+(define_constraint "Kp"
+ "A power of two."
+ (and (match_code "const_int")
+ (match_test "exact_log2 (ival) >= 0")))
+
+(define_constraint "L"
+ "A 16-bit signed number."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -32768, 32767)")))
+
+(define_constraint "M"
+ "The constant 0 for CLEAR."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "N"
+ "A negative ADDQ or SUBQ."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -63, -1)")))
+
+(define_constraint "O"
+ "Quickened ints, QI and HI."
+ (and (match_code "const_int")
+ (ior (match_test "IN_RANGE (ival, (65535 - 31), 65535)")
+ (match_test "IN_RANGE (ival, (255 - 31), 255)"))))
+
+(define_constraint "P"
+ "A 16-bit number signed *or* unsigned."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -32768, 65535)")))
+
+;; Floating-point constant constraints.
+(define_constraint "G"
+ "The floating point zero constant"
+ (and (match_code "const_double")
+ (match_test "GET_MODE_CLASS (mode) == MODE_FLOAT")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; Memory constraints.
+
+;; Just an indirect register (happens to also be "all" slottable
+;; memory addressing modes not covered by other constraints, i.e. '>').
+(define_memory_constraint "Q"
+ "@internal"
+ (and (match_code "mem")
+ (match_test "cris_base_p (XEXP (op, 0), reload_in_progress
+ || reload_completed)")))
+
+;; Extra constraints.
+(define_constraint "R"
+ "An operand to BDAP or BIAP."
+ ;; A BIAP; r.S?
+ (ior (match_test "cris_biap_index_p (op, reload_in_progress
+ || reload_completed)")
+ ;; A [reg] or (int) [reg], maybe with post-increment.
+ (match_test "cris_bdap_index_p (op, reload_in_progress
+ || reload_completed)")
+ (match_test "cris_constant_index_p (op)")))
+
+(define_constraint "T"
+ "Memory three-address operand."
+ ;; All are indirect-memory:
+ (and (match_code "mem")
+ ;; Double indirect: [[reg]] or [[reg+]]?
+ (ior (and (match_code "mem" "0")
+ (match_test "cris_base_or_autoincr_p (XEXP (XEXP (op, 0), 0),
+ reload_in_progress
+ || reload_completed)"))
+ ;; Just an explicit indirect reference: [const]?
+ (match_test "CONSTANT_P (XEXP (op, 0))")
+ ;; Something that is indexed; [...+...]?
+ (and (match_code "plus" "0")
+ ;; A BDAP constant: [reg+(8|16|32)bit offset]?
+ (ior (and (match_test "cris_base_p (XEXP (XEXP (op, 0), 0),
+ reload_in_progress
+ || reload_completed)")
+ (match_test "cris_constant_index_p (XEXP (XEXP (op, 0), 1))"))
+ ;; A BDAP register: [reg+[reg(+)].S]?
+ (and (match_test "cris_base_p (XEXP (XEXP (op, 0), 0),
+ reload_in_progress
+ || reload_completed)")
+ (match_test "cris_bdap_index_p (XEXP (XEXP (op, 0), 1),
+ reload_in_progress
+ || reload_completed)"))
+ ;; Same, but with swapped arguments (no canonical
+ ;; ordering between e.g. REG and MEM as of LAST_UPDATED
+ ;; "Thu May 12 03:59:11 UTC 2005").
+ (and (match_test "cris_base_p (XEXP (XEXP (op, 0), 1),
+ reload_in_progress
+ || reload_completed)")
+ (match_test "cris_bdap_index_p (XEXP (XEXP (op, 0), 0),
+ reload_in_progress
+ || reload_completed)"))
+ ;; A BIAP: [reg+reg.S] (MULT comes first).
+ (and (match_test "cris_base_p (XEXP (XEXP (op, 0), 1),
+ reload_in_progress
+ || reload_completed)")
+ (match_test "cris_biap_index_p (XEXP (XEXP (op, 0), 0),
+ reload_in_progress
+ || reload_completed)")))))))
+
+(define_constraint "S"
+ "PIC-constructs for symbols."
+ (and (match_test "flag_pic")
+ (match_code "const")
+ (match_test "cris_valid_pic_const (op, false)")))
+
+(define_constraint "U"
+ "@internal"
+ (and (match_test "flag_pic")
+ (match_test "CONSTANT_P (op)")
+ (match_operand 0 "cris_nonmemory_operand_or_callable_symbol")))
+
diff --git a/gcc-4.8/gcc/config/cris/cris-protos.h b/gcc-4.8/gcc/config/cris/cris-protos.h
new file mode 100644
index 000000000..e5439228f
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/cris-protos.h
@@ -0,0 +1,67 @@
+/* Definitions for GCC. Part of the machine description for CRIS.
+ Copyright (C) 1998-2013 Free Software Foundation, Inc.
+ Contributed by Axis Communications.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Prototypes for the CRIS port. */
+
+extern bool cris_simple_epilogue (void);
+#ifdef RTX_CODE
+extern const char *cris_op_str (rtx);
+extern void cris_notice_update_cc (rtx, rtx);
+extern bool cris_reload_address_legitimized (rtx, enum machine_mode, int, int, int);
+extern int cris_side_effect_mode_ok (enum rtx_code, rtx *, int, int,
+ int, int, int);
+extern bool cris_cc0_user_requires_cmp (rtx);
+extern rtx cris_return_addr_rtx (int, rtx);
+extern rtx cris_split_movdx (rtx *);
+extern int cris_legitimate_pic_operand (rtx);
+extern enum cris_pic_symbol_type cris_pic_symbol_type_of (const_rtx);
+extern bool cris_valid_pic_const (const_rtx, bool);
+extern bool cris_constant_index_p (const_rtx);
+extern bool cris_base_p (const_rtx, bool);
+extern bool cris_base_or_autoincr_p (const_rtx, bool);
+extern bool cris_bdap_index_p (const_rtx, bool);
+extern bool cris_biap_index_p (const_rtx, bool);
+extern bool cris_legitimate_address_p (enum machine_mode, rtx, bool);
+extern bool cris_store_multiple_op_p (rtx);
+extern bool cris_movem_load_rest_p (rtx, int);
+extern void cris_asm_output_symbol_ref (FILE *, rtx);
+extern int cris_cfun_uses_pic_table (void);
+extern void cris_asm_output_case_end (FILE *, int, rtx);
+extern rtx cris_gen_movem_load (rtx, rtx, int);
+extern rtx cris_emit_movem_store (rtx, rtx, int, bool);
+extern void cris_expand_pic_call_address (rtx *);
+extern void cris_order_for_addsi3 (rtx *, int);
+extern void cris_emit_trap_for_misalignment (rtx);
+#endif /* RTX_CODE */
+extern void cris_asm_output_label_ref (FILE *, char *);
+extern void cris_asm_output_ident (const char *);
+extern void cris_expand_prologue (void);
+extern void cris_expand_epilogue (void);
+extern void cris_expand_return (bool);
+extern bool cris_return_address_on_stack_for_return (void);
+extern bool cris_return_address_on_stack (void);
+extern void cris_pragma_expand_mul (struct cpp_reader *);
+
+/* Need one that returns an int; usable in expressions. */
+extern int cris_fatal (char *);
+
+extern int cris_initial_elimination_offset (int, int);
+
+extern void cris_init_expanders (void);
diff --git a/gcc-4.8/gcc/config/cris/cris.c b/gcc-4.8/gcc/config/cris/cris.c
new file mode 100644
index 000000000..a9117d766
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/cris.c
@@ -0,0 +1,4345 @@
+/* Definitions for GCC. Part of the machine description for CRIS.
+ Copyright (C) 1998-2013 Free Software Foundation, Inc.
+ Contributed by Axis Communications. Written by Hans-Peter Nilsson.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+#include "except.h"
+#include "function.h"
+#include "diagnostic-core.h"
+#include "recog.h"
+#include "reload.h"
+#include "tm_p.h"
+#include "debug.h"
+#include "output.h"
+#include "tm-constrs.h"
+#include "target.h"
+#include "target-def.h"
+#include "ggc.h"
+#include "optabs.h"
+#include "df.h"
+#include "opts.h"
+#include "cgraph.h"
+
+/* Usable when we have an amount to add or subtract, and want the
+ optimal size of the insn. */
+#define ADDITIVE_SIZE_MODIFIER(size) \
+ ((size) <= 63 ? "q" : (size) <= 255 ? "u.b" : (size) <= 65535 ? "u.w" : ".d")
+
+#define LOSE_AND_RETURN(msgid, x) \
+ do \
+ { \
+ cris_operand_lossage (msgid, x); \
+ return; \
+ } while (0)
+
+enum cris_retinsn_type
+ { CRIS_RETINSN_UNKNOWN = 0, CRIS_RETINSN_RET, CRIS_RETINSN_JUMP };
+
+/* Per-function machine data. */
+struct GTY(()) machine_function
+ {
+ int needs_return_address_on_stack;
+
+ /* This is the number of registers we save in the prologue due to
+ stdarg. */
+ int stdarg_regs;
+
+ enum cris_retinsn_type return_type;
+ };
+
+/* This little fix suppresses the 'u' or 's' when '%e' in assembly
+ pattern. */
+static char cris_output_insn_is_bound = 0;
+
+/* In code for output macros, this is how we know whether e.g. constant
+ goes in code or in a static initializer. */
+static int in_code = 0;
+
+/* Fix for reg_overlap_mentioned_p. */
+static int cris_reg_overlap_mentioned_p (rtx, rtx);
+
+static enum machine_mode cris_promote_function_mode (const_tree, enum machine_mode,
+ int *, const_tree, int);
+
+static void cris_print_base (rtx, FILE *);
+
+static void cris_print_index (rtx, FILE *);
+
+static void cris_output_addr_const (FILE *, rtx);
+
+static struct machine_function * cris_init_machine_status (void);
+
+static rtx cris_struct_value_rtx (tree, int);
+
+static void cris_setup_incoming_varargs (cumulative_args_t, enum machine_mode,
+ tree type, int *, int);
+
+static int cris_initial_frame_pointer_offset (void);
+
+static void cris_operand_lossage (const char *, rtx);
+
+static int cris_reg_saved_in_regsave_area (unsigned int, bool);
+
+static void cris_print_operand (FILE *, rtx, int);
+
+static void cris_print_operand_address (FILE *, rtx);
+
+static bool cris_print_operand_punct_valid_p (unsigned char code);
+
+static bool cris_output_addr_const_extra (FILE *, rtx);
+
+static void cris_conditional_register_usage (void);
+
+static void cris_asm_output_mi_thunk
+ (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
+
+static void cris_file_start (void);
+static void cris_init_libfuncs (void);
+
+static reg_class_t cris_preferred_reload_class (rtx, reg_class_t);
+
+static int cris_register_move_cost (enum machine_mode, reg_class_t, reg_class_t);
+static int cris_memory_move_cost (enum machine_mode, reg_class_t, bool);
+static bool cris_rtx_costs (rtx, int, int, int, int *, bool);
+static int cris_address_cost (rtx, enum machine_mode, addr_space_t, bool);
+static bool cris_pass_by_reference (cumulative_args_t, enum machine_mode,
+ const_tree, bool);
+static int cris_arg_partial_bytes (cumulative_args_t, enum machine_mode,
+ tree, bool);
+static rtx cris_function_arg (cumulative_args_t, enum machine_mode,
+ const_tree, bool);
+static rtx cris_function_incoming_arg (cumulative_args_t,
+ enum machine_mode, const_tree, bool);
+static void cris_function_arg_advance (cumulative_args_t, enum machine_mode,
+ const_tree, bool);
+static tree cris_md_asm_clobbers (tree, tree, tree);
+
+static void cris_option_override (void);
+
+static bool cris_frame_pointer_required (void);
+
+static void cris_asm_trampoline_template (FILE *);
+static void cris_trampoline_init (rtx, tree, rtx);
+
+static rtx cris_function_value(const_tree, const_tree, bool);
+static rtx cris_libcall_value (enum machine_mode, const_rtx);
+static bool cris_function_value_regno_p (const unsigned int);
+static void cris_file_end (void);
+
+/* This is the parsed result of the "-max-stack-stackframe=" option. If
+ it (still) is zero, then there was no such option given. */
+int cris_max_stackframe = 0;
+
+/* This is the parsed result of the "-march=" option, if given. */
+int cris_cpu_version = CRIS_DEFAULT_CPU_VERSION;
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.dword\t"
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
+
+/* We need to define these, since the 2byte, 4byte, 8byte op:s are only
+ available in ELF. These "normal" pseudos do not have any alignment
+ constraints or side-effects. */
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
+
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
+
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
+
+#undef TARGET_PRINT_OPERAND
+#define TARGET_PRINT_OPERAND cris_print_operand
+#undef TARGET_PRINT_OPERAND_ADDRESS
+#define TARGET_PRINT_OPERAND_ADDRESS cris_print_operand_address
+#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
+#define TARGET_PRINT_OPERAND_PUNCT_VALID_P cris_print_operand_punct_valid_p
+#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
+#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA cris_output_addr_const_extra
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE cris_conditional_register_usage
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK cris_asm_output_mi_thunk
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START cris_file_start
+#undef TARGET_ASM_FILE_END
+#define TARGET_ASM_FILE_END cris_file_end
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS cris_init_libfuncs
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P cris_legitimate_address_p
+
+#undef TARGET_PREFERRED_RELOAD_CLASS
+#define TARGET_PREFERRED_RELOAD_CLASS cris_preferred_reload_class
+
+#undef TARGET_REGISTER_MOVE_COST
+#define TARGET_REGISTER_MOVE_COST cris_register_move_cost
+#undef TARGET_MEMORY_MOVE_COST
+#define TARGET_MEMORY_MOVE_COST cris_memory_move_cost
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS cris_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST cris_address_cost
+
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE cris_promote_function_mode
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX cris_struct_value_rtx
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS cris_setup_incoming_varargs
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE cris_pass_by_reference
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES cris_arg_partial_bytes
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG cris_function_arg
+#undef TARGET_FUNCTION_INCOMING_ARG
+#define TARGET_FUNCTION_INCOMING_ARG cris_function_incoming_arg
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE cris_function_arg_advance
+#undef TARGET_MD_ASM_CLOBBERS
+#define TARGET_MD_ASM_CLOBBERS cris_md_asm_clobbers
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED cris_frame_pointer_required
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE cris_option_override
+
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define TARGET_ASM_TRAMPOLINE_TEMPLATE cris_asm_trampoline_template
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT cris_trampoline_init
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE cris_function_value
+#undef TARGET_LIBCALL_VALUE
+#define TARGET_LIBCALL_VALUE cris_libcall_value
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P cris_function_value_regno_p
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* Helper for cris_load_multiple_op and cris_ret_movem_op. */
+
+bool
+cris_movem_load_rest_p (rtx op, int offs)
+{
+ unsigned int reg_count = XVECLEN (op, 0) - offs;
+ rtx src_addr;
+ int i;
+ rtx elt;
+ int setno;
+ int regno_dir = 1;
+ unsigned int regno = 0;
+
+ /* Perform a quick check so we don't blow up below. FIXME: Adjust for
+ other than (MEM reg). */
+ if (reg_count <= 1
+ || GET_CODE (XVECEXP (op, 0, offs)) != SET
+ || !REG_P (SET_DEST (XVECEXP (op, 0, offs)))
+ || !MEM_P (SET_SRC (XVECEXP (op, 0, offs))))
+ return false;
+
+ /* Check a possible post-inc indicator. */
+ if (GET_CODE (SET_SRC (XVECEXP (op, 0, offs + 1))) == PLUS)
+ {
+ rtx reg = XEXP (SET_SRC (XVECEXP (op, 0, offs + 1)), 0);
+ rtx inc = XEXP (SET_SRC (XVECEXP (op, 0, offs + 1)), 1);
+
+ reg_count--;
+
+ if (reg_count == 1
+ || !REG_P (reg)
+ || !REG_P (SET_DEST (XVECEXP (op, 0, offs + 1)))
+ || REGNO (reg) != REGNO (SET_DEST (XVECEXP (op, 0, offs + 1)))
+ || !CONST_INT_P (inc)
+ || INTVAL (inc) != (HOST_WIDE_INT) reg_count * 4)
+ return false;
+ i = offs + 2;
+ }
+ else
+ i = offs + 1;
+
+ if (!TARGET_V32)
+ {
+ regno_dir = -1;
+ regno = reg_count - 1;
+ }
+
+ elt = XVECEXP (op, 0, offs);
+ src_addr = XEXP (SET_SRC (elt), 0);
+
+ if (GET_CODE (elt) != SET
+ || !REG_P (SET_DEST (elt))
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != regno
+ || !MEM_P (SET_SRC (elt))
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || !memory_address_p (SImode, src_addr))
+ return false;
+
+ for (setno = 1; i < XVECLEN (op, 0); setno++, i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ regno += regno_dir;
+
+ if (GET_CODE (elt) != SET
+ || !REG_P (SET_DEST (elt))
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != regno
+ || !MEM_P (SET_SRC (elt))
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || !CONST_INT_P (XEXP (XEXP (SET_SRC (elt), 0), 1))
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != setno * 4)
+ return false;
+ }
+
+ return true;
+}
+
+/* Worker function for predicate for the parallel contents in a movem
+ to-memory. */
+
+bool
+cris_store_multiple_op_p (rtx op)
+{
+ int reg_count = XVECLEN (op, 0);
+ rtx dest;
+ rtx dest_addr;
+ rtx dest_base;
+ int i;
+ rtx elt;
+ int setno;
+ int regno_dir = 1;
+ int regno = 0;
+ int offset = 0;
+
+ /* Perform a quick check so we don't blow up below. FIXME: Adjust for
+ other than (MEM reg) and (MEM (PLUS reg const)). */
+ if (reg_count <= 1)
+ return false;
+
+ elt = XVECEXP (op, 0, 0);
+
+ if (GET_CODE (elt) != SET)
+ return false;
+
+ dest = SET_DEST (elt);
+
+ if (!REG_P (SET_SRC (elt)) || !MEM_P (dest))
+ return false;
+
+ dest_addr = XEXP (dest, 0);
+
+ /* Check a possible post-inc indicator. */
+ if (GET_CODE (SET_SRC (XVECEXP (op, 0, 1))) == PLUS)
+ {
+ rtx reg = XEXP (SET_SRC (XVECEXP (op, 0, 1)), 0);
+ rtx inc = XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1);
+
+ reg_count--;
+
+ if (reg_count == 1
+ || !REG_P (reg)
+ || !REG_P (SET_DEST (XVECEXP (op, 0, 1)))
+ || REGNO (reg) != REGNO (SET_DEST (XVECEXP (op, 0, 1)))
+ || !CONST_INT_P (inc)
+ /* Support increment by number of registers, and by the offset
+ of the destination, if it has the form (MEM (PLUS reg
+ offset)). */
+ || !((REG_P (dest_addr)
+ && REGNO (dest_addr) == REGNO (reg)
+ && INTVAL (inc) == (HOST_WIDE_INT) reg_count * 4)
+ || (GET_CODE (dest_addr) == PLUS
+ && REG_P (XEXP (dest_addr, 0))
+ && REGNO (XEXP (dest_addr, 0)) == REGNO (reg)
+ && CONST_INT_P (XEXP (dest_addr, 1))
+ && INTVAL (XEXP (dest_addr, 1)) == INTVAL (inc))))
+ return false;
+
+ i = 2;
+ }
+ else
+ i = 1;
+
+ if (!TARGET_V32)
+ {
+ regno_dir = -1;
+ regno = reg_count - 1;
+ }
+
+ if (GET_CODE (elt) != SET
+ || !REG_P (SET_SRC (elt))
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != (unsigned int) regno
+ || !MEM_P (SET_DEST (elt))
+ || GET_MODE (SET_DEST (elt)) != SImode)
+ return false;
+
+ if (REG_P (dest_addr))
+ {
+ dest_base = dest_addr;
+ offset = 0;
+ }
+ else if (GET_CODE (dest_addr) == PLUS
+ && REG_P (XEXP (dest_addr, 0))
+ && CONST_INT_P (XEXP (dest_addr, 1)))
+ {
+ dest_base = XEXP (dest_addr, 0);
+ offset = INTVAL (XEXP (dest_addr, 1));
+ }
+ else
+ return false;
+
+ for (setno = 1; i < XVECLEN (op, 0); setno++, i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ regno += regno_dir;
+
+ if (GET_CODE (elt) != SET
+ || !REG_P (SET_SRC (elt))
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != (unsigned int) regno
+ || !MEM_P (SET_DEST (elt))
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_base)
+ || !CONST_INT_P (XEXP (XEXP (SET_DEST (elt), 0), 1))
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != setno * 4 + offset)
+ return false;
+ }
+
+ return true;
+}
+
+/* The TARGET_CONDITIONAL_REGISTER_USAGE worker. */
+
+static void
+cris_conditional_register_usage (void)
+{
+ /* FIXME: This isn't nice. We should be able to use that register for
+ something else if the PIC table isn't needed. */
+ if (flag_pic)
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM]
+ = call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ /* Allow use of ACR (PC in pre-V32) and tweak order. */
+ if (TARGET_V32)
+ {
+ static const int reg_alloc_order_v32[] = REG_ALLOC_ORDER_V32;
+ unsigned int i;
+
+ fixed_regs[CRIS_ACR_REGNUM] = 0;
+
+ for (i = 0;
+ i < sizeof (reg_alloc_order_v32)/sizeof (reg_alloc_order_v32[0]);
+ i++)
+ reg_alloc_order[i] = reg_alloc_order_v32[i];
+ }
+
+ if (TARGET_HAS_MUL_INSNS)
+ fixed_regs[CRIS_MOF_REGNUM] = 0;
+
+ /* On early versions, we must use the 16-bit condition-code register,
+ which has another name. */
+ if (cris_cpu_version < 8)
+ reg_names[CRIS_CC0_REGNUM] = "ccr";
+}
+
+/* Return crtl->uses_pic_offset_table. For use in cris.md,
+ since some generated files do not include function.h. */
+
+int
+cris_cfun_uses_pic_table (void)
+{
+ return crtl->uses_pic_offset_table;
+}
+
+/* Given an rtx, return the text string corresponding to the CODE of X.
+ Intended for use in the assembly language output section of a
+ define_insn. */
+
+const char *
+cris_op_str (rtx x)
+{
+ cris_output_insn_is_bound = 0;
+ switch (GET_CODE (x))
+ {
+ case PLUS:
+ return "add";
+ break;
+
+ case MINUS:
+ return "sub";
+ break;
+
+ case MULT:
+ /* This function is for retrieving a part of an instruction name for
+ an operator, for immediate output. If that ever happens for
+ MULT, we need to apply TARGET_MUL_BUG in the caller. Make sure
+ we notice. */
+ internal_error ("MULT case in cris_op_str");
+ break;
+
+ case DIV:
+ return "div";
+ break;
+
+ case AND:
+ return "and";
+ break;
+
+ case IOR:
+ return "or";
+ break;
+
+ case XOR:
+ return "xor";
+ break;
+
+ case NOT:
+ return "not";
+ break;
+
+ case ASHIFT:
+ return "lsl";
+ break;
+
+ case LSHIFTRT:
+ return "lsr";
+ break;
+
+ case ASHIFTRT:
+ return "asr";
+ break;
+
+ case UMIN:
+ /* Used to control the sign/zero-extend character for the 'E' modifier.
+ BOUND has none. */
+ cris_output_insn_is_bound = 1;
+ return "bound";
+ break;
+
+ default:
+ return "Unknown operator";
+ break;
+ }
+}
+
+/* Emit an error message when we're in an asm, and a fatal error for
+ "normal" insns. Formatted output isn't easily implemented, since we
+ use output_operand_lossage to output the actual message and handle the
+ categorization of the error. */
+
+static void
+cris_operand_lossage (const char *msgid, rtx op)
+{
+ debug_rtx (op);
+ output_operand_lossage ("%s", msgid);
+}
+
+/* Print an index part of an address to file. */
+
+static void
+cris_print_index (rtx index, FILE *file)
+{
+ /* Make the index "additive" unless we'll output a negative number, in
+ which case the sign character is free (as in free beer). */
+ if (!CONST_INT_P (index) || INTVAL (index) >= 0)
+ putc ('+', file);
+
+ if (REG_P (index))
+ fprintf (file, "$%s.b", reg_names[REGNO (index)]);
+ else if (CONSTANT_P (index))
+ cris_output_addr_const (file, index);
+ else if (GET_CODE (index) == MULT)
+ {
+ fprintf (file, "$%s.",
+ reg_names[REGNO (XEXP (index, 0))]);
+
+ putc (INTVAL (XEXP (index, 1)) == 2 ? 'w' : 'd', file);
+ }
+ else if (GET_CODE (index) == SIGN_EXTEND && MEM_P (XEXP (index, 0)))
+ {
+ rtx inner = XEXP (index, 0);
+ rtx inner_inner = XEXP (inner, 0);
+
+ if (GET_CODE (inner_inner) == POST_INC)
+ {
+ fprintf (file, "[$%s+].",
+ reg_names[REGNO (XEXP (inner_inner, 0))]);
+ putc (GET_MODE (inner) == HImode ? 'w' : 'b', file);
+ }
+ else
+ {
+ fprintf (file, "[$%s].", reg_names[REGNO (inner_inner)]);
+
+ putc (GET_MODE (inner) == HImode ? 'w' : 'b', file);
+ }
+ }
+ else if (MEM_P (index))
+ {
+ rtx inner = XEXP (index, 0);
+ if (GET_CODE (inner) == POST_INC)
+ fprintf (file, "[$%s+].d", reg_names[REGNO (XEXP (inner, 0))]);
+ else
+ fprintf (file, "[$%s].d", reg_names[REGNO (inner)]);
+ }
+ else
+ cris_operand_lossage ("unexpected index-type in cris_print_index",
+ index);
+}
+
+/* Print a base rtx of an address to file. */
+
+static void
+cris_print_base (rtx base, FILE *file)
+{
+ if (REG_P (base))
+ fprintf (file, "$%s", reg_names[REGNO (base)]);
+ else if (GET_CODE (base) == POST_INC)
+ {
+ gcc_assert (REGNO (XEXP (base, 0)) != CRIS_ACR_REGNUM);
+ fprintf (file, "$%s+", reg_names[REGNO (XEXP (base, 0))]);
+ }
+ else
+ cris_operand_lossage ("unexpected base-type in cris_print_base",
+ base);
+}
+
+/* Usable as a guard in expressions. */
+
+int
+cris_fatal (char *arg)
+{
+ internal_error (arg);
+
+ /* We'll never get here; this is just to appease compilers. */
+ return 0;
+}
+
+/* Return nonzero if REGNO is an ordinary register that *needs* to be
+ saved together with other registers, possibly by a MOVEM instruction,
+ or is saved for target-independent reasons. There may be
+ target-dependent reasons to save the register anyway; this is just a
+ wrapper for a complicated conditional. */
+
+static int
+cris_reg_saved_in_regsave_area (unsigned int regno, bool got_really_used)
+{
+ return
+ (((df_regs_ever_live_p (regno)
+ && !call_used_regs[regno])
+ || (regno == PIC_OFFSET_TABLE_REGNUM
+ && (got_really_used
+ /* It is saved anyway, if there would be a gap. */
+ || (flag_pic
+ && df_regs_ever_live_p (regno + 1)
+ && !call_used_regs[regno + 1]))))
+ && (regno != FRAME_POINTER_REGNUM || !frame_pointer_needed)
+ && regno != CRIS_SRP_REGNUM)
+ || (crtl->calls_eh_return
+ && (regno == EH_RETURN_DATA_REGNO (0)
+ || regno == EH_RETURN_DATA_REGNO (1)
+ || regno == EH_RETURN_DATA_REGNO (2)
+ || regno == EH_RETURN_DATA_REGNO (3)));
+}
+
+/* The PRINT_OPERAND worker. */
+
+static void
+cris_print_operand (FILE *file, rtx x, int code)
+{
+ rtx operand = x;
+
+ /* Size-strings corresponding to MULT expressions. */
+ static const char *const mults[] = { "BAD:0", ".b", ".w", "BAD:3", ".d" };
+
+ /* New code entries should just be added to the switch below. If
+ handling is finished, just return. If handling was just a
+ modification of the operand, the modified operand should be put in
+ "operand", and then do a break to let default handling
+ (zero-modifier) output the operand. */
+
+ switch (code)
+ {
+ case 'b':
+ /* Print the unsigned supplied integer as if it were signed
+ and < 0, i.e print 255 or 65535 as -1, 254, 65534 as -2, etc. */
+ if (!satisfies_constraint_O (x))
+ LOSE_AND_RETURN ("invalid operand for 'b' modifier", x);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ INTVAL (x)| (INTVAL (x) <= 255 ? ~255 : ~65535));
+ return;
+
+ case 'x':
+ /* Print assembler code for operator. */
+ fprintf (file, "%s", cris_op_str (operand));
+ return;
+
+ case 'o':
+ {
+ /* A movem modifier working on a parallel; output the register
+ name. */
+ int regno;
+
+ if (GET_CODE (x) != PARALLEL)
+ LOSE_AND_RETURN ("invalid operand for 'o' modifier", x);
+
+ /* The second item can be (set reg (plus reg const)) to denote a
+ postincrement. */
+ regno
+ = (GET_CODE (SET_SRC (XVECEXP (x, 0, 1))) == PLUS
+ ? XVECLEN (x, 0) - 2
+ : XVECLEN (x, 0) - 1);
+
+ fprintf (file, "$%s", reg_names [regno]);
+ }
+ return;
+
+ case 'O':
+ {
+ /* A similar movem modifier; output the memory operand. */
+ rtx addr;
+
+ if (GET_CODE (x) != PARALLEL)
+ LOSE_AND_RETURN ("invalid operand for 'O' modifier", x);
+
+ /* The lowest mem operand is in the first item, but perhaps it
+ needs to be output as postincremented. */
+ addr = MEM_P (SET_SRC (XVECEXP (x, 0, 0)))
+ ? XEXP (SET_SRC (XVECEXP (x, 0, 0)), 0)
+ : XEXP (SET_DEST (XVECEXP (x, 0, 0)), 0);
+
+ /* The second item can be a (set reg (plus reg const)) to denote
+ a modification. */
+ if (GET_CODE (SET_SRC (XVECEXP (x, 0, 1))) == PLUS)
+ {
+ /* It's a post-increment, if the address is a naked (reg). */
+ if (REG_P (addr))
+ addr = gen_rtx_POST_INC (SImode, addr);
+ else
+ {
+ /* Otherwise, it's a side-effect; RN=RN+M. */
+ fprintf (file, "[$%s=$%s%s%d]",
+ reg_names [REGNO (SET_DEST (XVECEXP (x, 0, 1)))],
+ reg_names [REGNO (XEXP (addr, 0))],
+ INTVAL (XEXP (addr, 1)) < 0 ? "" : "+",
+ (int) INTVAL (XEXP (addr, 1)));
+ return;
+ }
+ }
+ output_address (addr);
+ }
+ return;
+
+ case 'p':
+ /* Adjust a power of two to its log2. */
+ if (!CONST_INT_P (x) || exact_log2 (INTVAL (x)) < 0 )
+ LOSE_AND_RETURN ("invalid operand for 'p' modifier", x);
+ fprintf (file, "%d", exact_log2 (INTVAL (x)));
+ return;
+
+ case 's':
+ /* For an integer, print 'b' or 'w' if <= 255 or <= 65535
+ respectively. This modifier also terminates the inhibiting
+ effects of the 'x' modifier. */
+ cris_output_insn_is_bound = 0;
+ if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
+ {
+ if (INTVAL (x) >= 0)
+ {
+ if (INTVAL (x) <= 255)
+ putc ('b', file);
+ else if (INTVAL (x) <= 65535)
+ putc ('w', file);
+ else
+ putc ('d', file);
+ }
+ else
+ putc ('d', file);
+ return;
+ }
+
+ /* For a non-integer, print the size of the operand. */
+ putc ((GET_MODE (x) == SImode || GET_MODE (x) == SFmode)
+ ? 'd' : GET_MODE (x) == HImode ? 'w'
+ : GET_MODE (x) == QImode ? 'b'
+ /* If none of the above, emit an erroneous size letter. */
+ : 'X',
+ file);
+ return;
+
+ case 'z':
+ /* Const_int: print b for -127 <= x <= 255,
+ w for -32768 <= x <= 65535, else die. */
+ if (!CONST_INT_P (x)
+ || INTVAL (x) < -32768 || INTVAL (x) > 65535)
+ LOSE_AND_RETURN ("invalid operand for 'z' modifier", x);
+ putc (INTVAL (x) >= -128 && INTVAL (x) <= 255 ? 'b' : 'w', file);
+ return;
+
+ case 'Z':
+ /* If this is a GOT-symbol, print the size-letter corresponding to
+ -fpic/-fPIC. For everything else, print "d". */
+ putc ((flag_pic == 1
+ && GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == UNSPEC
+ && XINT (XEXP (x, 0), 1) == CRIS_UNSPEC_GOTREAD)
+ ? 'w' : 'd', file);
+ return;
+
+ case '#':
+ /* Output a 'nop' if there's nothing for the delay slot.
+ This method stolen from the sparc files. */
+ if (dbr_sequence_length () == 0)
+ fputs ("\n\tnop", file);
+ return;
+
+ case '!':
+ /* Output directive for alignment padded with "nop" insns.
+ Optimizing for size, it's plain 4-byte alignment, otherwise we
+ align the section to a cache-line (32 bytes) and skip at max 2
+ bytes, i.e. we skip if it's the last insn on a cache-line. The
+ latter is faster by a small amount (for two test-programs 99.6%
+ and 99.9%) and larger by a small amount (ditto 100.1% and
+ 100.2%). This is supposed to be the simplest yet performance-
+ wise least intrusive way to make sure the immediately following
+ (supposed) muls/mulu insn isn't located at the end of a
+ cache-line. */
+ if (TARGET_MUL_BUG)
+ fputs (optimize_size
+ ? ".p2alignw 2,0x050f\n\t"
+ : ".p2alignw 5,0x050f,2\n\t", file);
+ return;
+
+ case ':':
+ /* The PIC register. */
+ if (! flag_pic)
+ internal_error ("invalid use of ':' modifier");
+ fprintf (file, "$%s", reg_names [PIC_OFFSET_TABLE_REGNUM]);
+ return;
+
+ case 'H':
+ /* Print high (most significant) part of something. */
+ switch (GET_CODE (operand))
+ {
+ case CONST_INT:
+ /* If we're having 64-bit HOST_WIDE_INTs, the whole (DImode)
+ value is kept here, and so may be other than 0 or -1. */
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ INTVAL (operand_subword (operand, 1, 0, DImode)));
+ return;
+
+ case CONST_DOUBLE:
+ /* High part of a long long constant. */
+ if (GET_MODE (operand) == VOIDmode)
+ {
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_HIGH (x));
+ return;
+ }
+ else
+ LOSE_AND_RETURN ("invalid operand for 'H' modifier", x);
+
+ case REG:
+ /* Print reg + 1. Check that there's not an attempt to print
+ high-parts of registers like stack-pointer or higher, except
+ for SRP (where the "high part" is MOF). */
+ if (REGNO (operand) > STACK_POINTER_REGNUM - 2
+ && (REGNO (operand) != CRIS_SRP_REGNUM
+ || CRIS_SRP_REGNUM + 1 != CRIS_MOF_REGNUM
+ || fixed_regs[CRIS_MOF_REGNUM] != 0))
+ LOSE_AND_RETURN ("bad register", operand);
+ fprintf (file, "$%s", reg_names[REGNO (operand) + 1]);
+ return;
+
+ case MEM:
+ /* Adjust memory address to high part. */
+ {
+ rtx adj_mem = operand;
+ int size
+ = GET_MODE_BITSIZE (GET_MODE (operand)) / BITS_PER_UNIT;
+
+ /* Adjust so we can use two SImode in DImode.
+ Calling adj_offsettable_operand will make sure it is an
+ offsettable address. Don't do this for a postincrement
+ though; it should remain as it was. */
+ if (GET_CODE (XEXP (adj_mem, 0)) != POST_INC)
+ adj_mem
+ = adjust_address (adj_mem, GET_MODE (adj_mem), size / 2);
+
+ output_address (XEXP (adj_mem, 0));
+ return;
+ }
+
+ default:
+ LOSE_AND_RETURN ("invalid operand for 'H' modifier", x);
+ }
+
+ case 'L':
+ /* Strip the MEM expression. */
+ operand = XEXP (operand, 0);
+ break;
+
+ case 'e':
+ /* Like 'E', but ignore state set by 'x'. FIXME: Use code
+ iterators and attributes in cris.md to avoid the need for %x
+ and %E (and %e) and state passed between those modifiers. */
+ cris_output_insn_is_bound = 0;
+ /* FALL THROUGH. */
+ case 'E':
+ /* Print 's' if operand is SIGN_EXTEND or 'u' if ZERO_EXTEND unless
+ cris_output_insn_is_bound is nonzero. */
+ if (GET_CODE (operand) != SIGN_EXTEND
+ && GET_CODE (operand) != ZERO_EXTEND
+ && !CONST_INT_P (operand))
+ LOSE_AND_RETURN ("invalid operand for 'e' modifier", x);
+
+ if (cris_output_insn_is_bound)
+ {
+ cris_output_insn_is_bound = 0;
+ return;
+ }
+
+ putc (GET_CODE (operand) == SIGN_EXTEND
+ || (CONST_INT_P (operand) && INTVAL (operand) < 0)
+ ? 's' : 'u', file);
+ return;
+
+ case 'm':
+ /* Print the size letter of the inner element. We can do it by
+ calling ourselves with the 's' modifier. */
+ if (GET_CODE (operand) != SIGN_EXTEND && GET_CODE (operand) != ZERO_EXTEND)
+ LOSE_AND_RETURN ("invalid operand for 'm' modifier", x);
+ cris_print_operand (file, XEXP (operand, 0), 's');
+ return;
+
+ case 'M':
+ /* Print the least significant part of operand. */
+ if (GET_CODE (operand) == CONST_DOUBLE)
+ {
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (x));
+ return;
+ }
+ else if (HOST_BITS_PER_WIDE_INT > 32 && CONST_INT_P (operand))
+ {
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ INTVAL (x) & ((unsigned int) 0x7fffffff * 2 + 1));
+ return;
+ }
+ /* Otherwise the least significant part equals the normal part,
+ so handle it normally. */
+ break;
+
+ case 'A':
+ /* When emitting an add for the high part of a DImode constant, we
+ want to use addq for 0 and adds.w for -1. */
+ if (!CONST_INT_P (operand))
+ LOSE_AND_RETURN ("invalid operand for 'A' modifier", x);
+ fprintf (file, INTVAL (operand) < 0 ? "adds.w" : "addq");
+ return;
+
+ case 'P':
+ /* For const_int operands, print the additive mnemonic and the
+ modified operand (byte-sized operands don't save anything):
+ N=MIN_INT..-65536: add.d N
+ -65535..-64: subu.w -N
+ -63..-1: subq -N
+ 0..63: addq N
+ 64..65535: addu.w N
+ 65536..MAX_INT: add.d N.
+ (Emitted mnemonics are capitalized to simplify testing.)
+ For anything else (N.B: only register is valid), print "add.d". */
+ if (REG_P (operand))
+ {
+ fprintf (file, "Add.d ");
+
+ /* Deal with printing the operand by dropping through to the
+ normal path. */
+ break;
+ }
+ else
+ {
+ int val;
+ gcc_assert (CONST_INT_P (operand));
+
+ val = INTVAL (operand);
+ if (!IN_RANGE (val, -65535, 65535))
+ fprintf (file, "Add.d %d", val);
+ else if (val <= -64)
+ fprintf (file, "Subu.w %d", -val);
+ else if (val <= -1)
+ fprintf (file, "Subq %d", -val);
+ else if (val <= 63)
+ fprintf (file, "Addq %d", val);
+ else if (val <= 65535)
+ fprintf (file, "Addu.w %d", val);
+ return;
+ }
+ break;
+
+ case 'q':
+ /* If the operand is an integer -31..31, print "q" else ".d". */
+ if (CONST_INT_P (operand) && IN_RANGE (INTVAL (operand), -31, 31))
+ fprintf (file, "q");
+ else
+ fprintf (file, ".d");
+ return;
+
+ case 'd':
+ /* If this is a GOT symbol, force it to be emitted as :GOT and
+ :GOTPLT regardless of -fpic (i.e. not as :GOT16, :GOTPLT16).
+ Avoid making this too much of a special case. */
+ if (flag_pic == 1 && CONSTANT_P (operand))
+ {
+ int flag_pic_save = flag_pic;
+
+ flag_pic = 2;
+ cris_output_addr_const (file, operand);
+ flag_pic = flag_pic_save;
+ return;
+ }
+ break;
+
+ case 'D':
+ /* When emitting an sub for the high part of a DImode constant, we
+ want to use subq for 0 and subs.w for -1. */
+ if (!CONST_INT_P (operand))
+ LOSE_AND_RETURN ("invalid operand for 'D' modifier", x);
+ fprintf (file, INTVAL (operand) < 0 ? "subs.w" : "subq");
+ return;
+
+ case 'S':
+ /* Print the operand as the index-part of an address.
+ Easiest way out is to use cris_print_index. */
+ cris_print_index (operand, file);
+ return;
+
+ case 'T':
+ /* Print the size letter for an operand to a MULT, which must be a
+ const_int with a suitable value. */
+ if (!CONST_INT_P (operand) || INTVAL (operand) > 4)
+ LOSE_AND_RETURN ("invalid operand for 'T' modifier", x);
+ fprintf (file, "%s", mults[INTVAL (operand)]);
+ return;
+
+ case 'u':
+ /* Print "u.w" if a GOT symbol and flag_pic == 1, else ".d". */
+ if (flag_pic == 1
+ && GET_CODE (operand) == CONST
+ && GET_CODE (XEXP (operand, 0)) == UNSPEC
+ && XINT (XEXP (operand, 0), 1) == CRIS_UNSPEC_GOTREAD)
+ fprintf (file, "u.w");
+ else
+ fprintf (file, ".d");
+ return;
+
+ case 0:
+ /* No code, print as usual. */
+ break;
+
+ default:
+ LOSE_AND_RETURN ("invalid operand modifier letter", x);
+ }
+
+ /* Print an operand as without a modifier letter. */
+ switch (GET_CODE (operand))
+ {
+ case REG:
+ if (REGNO (operand) > 15
+ && REGNO (operand) != CRIS_MOF_REGNUM
+ && REGNO (operand) != CRIS_SRP_REGNUM
+ && REGNO (operand) != CRIS_CC0_REGNUM)
+ internal_error ("internal error: bad register: %d", REGNO (operand));
+ fprintf (file, "$%s", reg_names[REGNO (operand)]);
+ return;
+
+ case MEM:
+ output_address (XEXP (operand, 0));
+ return;
+
+ case CONST_DOUBLE:
+ if (GET_MODE (operand) == VOIDmode)
+ /* A long long constant. */
+ output_addr_const (file, operand);
+ else
+ {
+ /* Only single precision is allowed as plain operands the
+ moment. FIXME: REAL_VALUE_FROM_CONST_DOUBLE isn't
+ documented. */
+ REAL_VALUE_TYPE r;
+ long l;
+
+ /* FIXME: Perhaps check overflow of the "single". */
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
+ REAL_VALUE_TO_TARGET_SINGLE (r, l);
+
+ fprintf (file, "0x%lx", l);
+ }
+ return;
+
+ case UNSPEC:
+ /* Fall through. */
+ case CONST:
+ cris_output_addr_const (file, operand);
+ return;
+
+ case MULT:
+ case ASHIFT:
+ {
+ /* For a (MULT (reg X) const_int) we output "rX.S". */
+ int i = CONST_INT_P (XEXP (operand, 1))
+ ? INTVAL (XEXP (operand, 1)) : INTVAL (XEXP (operand, 0));
+ rtx reg = CONST_INT_P (XEXP (operand, 1))
+ ? XEXP (operand, 0) : XEXP (operand, 1);
+
+ if (!REG_P (reg)
+ || (!CONST_INT_P (XEXP (operand, 0))
+ && !CONST_INT_P (XEXP (operand, 1))))
+ LOSE_AND_RETURN ("unexpected multiplicative operand", x);
+
+ cris_print_base (reg, file);
+ fprintf (file, ".%c",
+ i == 0 || (i == 1 && GET_CODE (operand) == MULT) ? 'b'
+ : i == 4 ? 'd'
+ : (i == 2 && GET_CODE (operand) == MULT) || i == 1 ? 'w'
+ : 'd');
+ return;
+ }
+
+ default:
+ /* No need to handle all strange variants, let output_addr_const
+ do it for us. */
+ if (CONSTANT_P (operand))
+ {
+ cris_output_addr_const (file, operand);
+ return;
+ }
+
+ LOSE_AND_RETURN ("unexpected operand", x);
+ }
+}
+
+static bool
+cris_print_operand_punct_valid_p (unsigned char code)
+{
+ return (code == '#' || code == '!' || code == ':');
+}
+
+/* The PRINT_OPERAND_ADDRESS worker. */
+
+static void
+cris_print_operand_address (FILE *file, rtx x)
+{
+ /* All these were inside MEM:s so output indirection characters. */
+ putc ('[', file);
+
+ if (CONSTANT_ADDRESS_P (x))
+ cris_output_addr_const (file, x);
+ else if (cris_base_or_autoincr_p (x, true))
+ cris_print_base (x, file);
+ else if (GET_CODE (x) == PLUS)
+ {
+ rtx x1, x2;
+
+ x1 = XEXP (x, 0);
+ x2 = XEXP (x, 1);
+ if (cris_base_p (x1, true))
+ {
+ cris_print_base (x1, file);
+ cris_print_index (x2, file);
+ }
+ else if (cris_base_p (x2, true))
+ {
+ cris_print_base (x2, file);
+ cris_print_index (x1, file);
+ }
+ else
+ LOSE_AND_RETURN ("unrecognized address", x);
+ }
+ else if (MEM_P (x))
+ {
+ /* A DIP. Output more indirection characters. */
+ putc ('[', file);
+ cris_print_base (XEXP (x, 0), file);
+ putc (']', file);
+ }
+ else
+ LOSE_AND_RETURN ("unrecognized address", x);
+
+ putc (']', file);
+}
+
+/* The RETURN_ADDR_RTX worker.
+ We mark that the return address is used, either by EH or
+ __builtin_return_address, for use by the function prologue and
+ epilogue. FIXME: This isn't optimal; we just use the mark in the
+ prologue and epilogue to say that the return address is to be stored
+ in the stack frame. We could return SRP for leaf-functions and use the
+ initial-value machinery. */
+
+rtx
+cris_return_addr_rtx (int count, rtx frameaddr ATTRIBUTE_UNUSED)
+{
+ cfun->machine->needs_return_address_on_stack = 1;
+
+ /* The return-address is stored just above the saved frame-pointer (if
+ present). Apparently we can't eliminate from the frame-pointer in
+ that direction, so use the incoming args (maybe pretended) pointer. */
+ return count == 0
+ ? gen_rtx_MEM (Pmode, plus_constant (Pmode, virtual_incoming_args_rtx, -4))
+ : NULL_RTX;
+}
+
+/* Accessor used in cris.md:return because cfun->machine isn't available
+ there. */
+
+bool
+cris_return_address_on_stack (void)
+{
+ return df_regs_ever_live_p (CRIS_SRP_REGNUM)
+ || cfun->machine->needs_return_address_on_stack;
+}
+
+/* Accessor used in cris.md:return because cfun->machine isn't available
+ there. */
+
+bool
+cris_return_address_on_stack_for_return (void)
+{
+ return cfun->machine->return_type == CRIS_RETINSN_RET ? false
+ : cris_return_address_on_stack ();
+}
+
+/* This used to be the INITIAL_FRAME_POINTER_OFFSET worker; now only
+ handles FP -> SP elimination offset. */
+
+static int
+cris_initial_frame_pointer_offset (void)
+{
+ int regno;
+
+ /* Initial offset is 0 if we don't have a frame pointer. */
+ int offs = 0;
+ bool got_really_used = false;
+
+ if (crtl->uses_pic_offset_table)
+ {
+ push_topmost_sequence ();
+ got_really_used
+ = reg_used_between_p (pic_offset_table_rtx, get_insns (),
+ NULL_RTX);
+ pop_topmost_sequence ();
+ }
+
+ /* And 4 for each register pushed. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (cris_reg_saved_in_regsave_area (regno, got_really_used))
+ offs += 4;
+
+ /* And then, last, we add the locals allocated. */
+ offs += get_frame_size ();
+
+ /* And more; the accumulated args size. */
+ offs += crtl->outgoing_args_size;
+
+ /* Then round it off, in case we use aligned stack. */
+ if (TARGET_STACK_ALIGN)
+ offs = TARGET_ALIGN_BY_32 ? (offs + 3) & ~3 : (offs + 1) & ~1;
+
+ return offs;
+}
+
+/* The INITIAL_ELIMINATION_OFFSET worker.
+ Calculate the difference between imaginary registers such as frame
+ pointer and the stack pointer. Used to eliminate the frame pointer
+ and imaginary arg pointer. */
+
+int
+cris_initial_elimination_offset (int fromreg, int toreg)
+{
+ int fp_sp_offset
+ = cris_initial_frame_pointer_offset ();
+
+ /* We should be able to use regs_ever_live and related prologue
+ information here, or alpha should not as well. */
+ bool return_address_on_stack = cris_return_address_on_stack ();
+
+ /* Here we act as if the frame-pointer were needed. */
+ int ap_fp_offset = 4 + (return_address_on_stack ? 4 : 0);
+
+ if (fromreg == ARG_POINTER_REGNUM
+ && toreg == FRAME_POINTER_REGNUM)
+ return ap_fp_offset;
+
+ /* Between the frame pointer and the stack are only "normal" stack
+ variables and saved registers. */
+ if (fromreg == FRAME_POINTER_REGNUM
+ && toreg == STACK_POINTER_REGNUM)
+ return fp_sp_offset;
+
+ /* We need to balance out the frame pointer here. */
+ if (fromreg == ARG_POINTER_REGNUM
+ && toreg == STACK_POINTER_REGNUM)
+ return ap_fp_offset + fp_sp_offset - 4;
+
+ gcc_unreachable ();
+}
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+static inline bool
+reg_ok_for_base_p (const_rtx x, bool strict)
+{
+ return ((! strict && ! HARD_REGISTER_P (x))
+ || REGNO_OK_FOR_BASE_P (REGNO (x)));
+}
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+static inline bool
+reg_ok_for_index_p (const_rtx x, bool strict)
+{
+ return reg_ok_for_base_p (x, strict);
+}
+
+/* No symbol can be used as an index (or more correct, as a base) together
+ with a register with PIC; the PIC register must be there. */
+
+bool
+cris_constant_index_p (const_rtx x)
+{
+ return (CONSTANT_P (x) && (!flag_pic || cris_valid_pic_const (x, true)));
+}
+
+/* True if X is a valid base register. */
+
+bool
+cris_base_p (const_rtx x, bool strict)
+{
+ return (REG_P (x) && reg_ok_for_base_p (x, strict));
+}
+
+/* True if X is a valid index register. */
+
+static inline bool
+cris_index_p (const_rtx x, bool strict)
+{
+ return (REG_P (x) && reg_ok_for_index_p (x, strict));
+}
+
+/* True if X is a valid base register with or without autoincrement. */
+
+bool
+cris_base_or_autoincr_p (const_rtx x, bool strict)
+{
+ return (cris_base_p (x, strict)
+ || (GET_CODE (x) == POST_INC
+ && cris_base_p (XEXP (x, 0), strict)
+ && REGNO (XEXP (x, 0)) != CRIS_ACR_REGNUM));
+}
+
+/* True if X is a valid (register) index for BDAP, i.e. [Rs].S or [Rs+].S. */
+
+bool
+cris_bdap_index_p (const_rtx x, bool strict)
+{
+ return ((MEM_P (x)
+ && GET_MODE (x) == SImode
+ && cris_base_or_autoincr_p (XEXP (x, 0), strict))
+ || (GET_CODE (x) == SIGN_EXTEND
+ && MEM_P (XEXP (x, 0))
+ && (GET_MODE (XEXP (x, 0)) == HImode
+ || GET_MODE (XEXP (x, 0)) == QImode)
+ && cris_base_or_autoincr_p (XEXP (XEXP (x, 0), 0), strict)));
+}
+
+/* True if X is a valid (register) index for BIAP, i.e. Rd.m. */
+
+bool
+cris_biap_index_p (const_rtx x, bool strict)
+{
+ return (cris_index_p (x, strict)
+ || (GET_CODE (x) == MULT
+ && cris_index_p (XEXP (x, 0), strict)
+ && cris_scale_int_operand (XEXP (x, 1), VOIDmode)));
+}
+
+/* Worker function for TARGET_LEGITIMATE_ADDRESS_P.
+
+ A PIC operand looks like a normal symbol here. At output we dress it
+ in "[rPIC+symbol:GOT]" (global symbol) or "rPIC+symbol:GOTOFF" (local
+ symbol) so we exclude all addressing modes where we can't replace a
+ plain "symbol" with that. A global PIC symbol does not fit anywhere
+ here (but is thankfully a general_operand in itself). A local PIC
+ symbol is valid for the plain "symbol + offset" case. */
+
+bool
+cris_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
+{
+ const_rtx x1, x2;
+
+ if (cris_base_or_autoincr_p (x, strict))
+ return true;
+ else if (TARGET_V32)
+ /* Nothing else is valid then. */
+ return false;
+ else if (cris_constant_index_p (x))
+ return true;
+ /* Indexed? */
+ else if (GET_CODE (x) == PLUS)
+ {
+ x1 = XEXP (x, 0);
+ x2 = XEXP (x, 1);
+ /* BDAP o, Rd. */
+ if ((cris_base_p (x1, strict) && cris_constant_index_p (x2))
+ || (cris_base_p (x2, strict) && cris_constant_index_p (x1))
+ /* BDAP Rs[+], Rd. */
+ || (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ && ((cris_base_p (x1, strict)
+ && cris_bdap_index_p (x2, strict))
+ || (cris_base_p (x2, strict)
+ && cris_bdap_index_p (x1, strict))
+ /* BIAP.m Rs, Rd */
+ || (cris_base_p (x1, strict)
+ && cris_biap_index_p (x2, strict))
+ || (cris_base_p (x2, strict)
+ && cris_biap_index_p (x1, strict)))))
+ return true;
+ }
+ else if (MEM_P (x))
+ {
+ /* DIP (Rs). Reject [[reg+]] and [[reg]] for DImode (long long). */
+ if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ && cris_base_or_autoincr_p (XEXP (x, 0), strict))
+ return true;
+ }
+
+ return false;
+}
+
+/* Worker function for LEGITIMIZE_RELOAD_ADDRESS. */
+
+bool
+cris_reload_address_legitimized (rtx x,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int opnum ATTRIBUTE_UNUSED,
+ int itype,
+ int ind_levels ATTRIBUTE_UNUSED)
+{
+ enum reload_type type = (enum reload_type) itype;
+ rtx op0, op1;
+ rtx *op1p;
+
+ if (GET_CODE (x) != PLUS)
+ return false;
+
+ if (TARGET_V32)
+ return false;
+
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+ op1p = &XEXP (x, 1);
+
+ if (!REG_P (op1))
+ return false;
+
+ if (GET_CODE (op0) == SIGN_EXTEND && MEM_P (XEXP (op0, 0)))
+ {
+ rtx op00 = XEXP (op0, 0);
+ rtx op000 = XEXP (op00, 0);
+ rtx *op000p = &XEXP (op00, 0);
+
+ if ((GET_MODE (op00) == HImode || GET_MODE (op00) == QImode)
+ && (REG_P (op000)
+ || (GET_CODE (op000) == POST_INC && REG_P (XEXP (op000, 0)))))
+ {
+ bool something_reloaded = false;
+
+ if (GET_CODE (op000) == POST_INC
+ && REG_P (XEXP (op000, 0))
+ && REGNO (XEXP (op000, 0)) > CRIS_LAST_GENERAL_REGISTER)
+ /* No, this gets too complicated and is too rare to care
+ about trying to improve on the general code Here.
+ As the return-value is an all-or-nothing indicator, we
+ punt on the other register too. */
+ return false;
+
+ if ((REG_P (op000)
+ && REGNO (op000) > CRIS_LAST_GENERAL_REGISTER))
+ {
+ /* The address of the inner mem is a pseudo or wrong
+ reg: reload that. */
+ push_reload (op000, NULL_RTX, op000p, NULL, GENERAL_REGS,
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ something_reloaded = true;
+ }
+
+ if (REGNO (op1) > CRIS_LAST_GENERAL_REGISTER)
+ {
+ /* Base register is a pseudo or wrong reg: reload it. */
+ push_reload (op1, NULL_RTX, op1p, NULL, GENERAL_REGS,
+ GET_MODE (x), VOIDmode, 0, 0,
+ opnum, type);
+ something_reloaded = true;
+ }
+
+ gcc_assert (something_reloaded);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+/* Worker function for TARGET_PREFERRED_RELOAD_CLASS.
+
+ It seems like gcc (2.7.2 and 2.9x of 2000-03-22) may send "NO_REGS" as
+ the class for a constant (testcase: __Mul in arit.c). To avoid forcing
+ out a constant into the constant pool, we will trap this case and
+ return something a bit more sane. FIXME: Check if this is a bug.
+ Beware that we must not "override" classes that can be specified as
+ constraint letters, or else asm operands using them will fail when
+ they need to be reloaded. FIXME: Investigate whether that constitutes
+ a bug. */
+
+static reg_class_t
+cris_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
+{
+ if (rclass != ACR_REGS
+ && rclass != MOF_REGS
+ && rclass != MOF_SRP_REGS
+ && rclass != SRP_REGS
+ && rclass != CC0_REGS
+ && rclass != SPECIAL_REGS)
+ return GENERAL_REGS;
+
+ return rclass;
+}
+
+/* Worker function for TARGET_REGISTER_MOVE_COST. */
+
+static int
+cris_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t from, reg_class_t to)
+{
+ /* Can't move to and from a SPECIAL_REGS register, so we have to say
+ their move cost within that class is higher. How about 7? That's 3
+ for a move to a GENERAL_REGS register, 3 for the move from the
+ GENERAL_REGS register, and 1 for the increased register pressure.
+ Also, it's higher than the memory move cost, as it should.
+ We also do this for ALL_REGS, since we don't want that class to be
+ preferred (even to memory) at all where GENERAL_REGS doesn't fit.
+ Whenever it's about to be used, it's for SPECIAL_REGS. If we don't
+ present a higher cost for ALL_REGS than memory, a SPECIAL_REGS may be
+ used when a GENERAL_REGS should be used, even if there are call-saved
+ GENERAL_REGS left to allocate. This is because the fall-back when
+ the most preferred register class isn't available, isn't the next
+ (or next good) wider register class, but the *most widest* register
+ class. FIXME: pre-IRA comment, perhaps obsolete now. */
+
+ if ((reg_classes_intersect_p (from, SPECIAL_REGS)
+ && reg_classes_intersect_p (to, SPECIAL_REGS))
+ || from == ALL_REGS || to == ALL_REGS)
+ return 7;
+
+ /* Make moves to/from SPECIAL_REGS slightly more expensive, as we
+ generally prefer GENERAL_REGS. */
+ if (reg_classes_intersect_p (from, SPECIAL_REGS)
+ || reg_classes_intersect_p (to, SPECIAL_REGS))
+ return 3;
+
+ return 2;
+}
+
+/* Worker function for TARGET_MEMORY_MOVE_COST.
+
+ This isn't strictly correct for v0..3 in buswidth-8bit mode, but should
+ suffice. */
+
+static int
+cris_memory_move_cost (enum machine_mode mode,
+ reg_class_t rclass ATTRIBUTE_UNUSED,
+ bool in ATTRIBUTE_UNUSED)
+{
+ if (mode == QImode
+ || mode == HImode)
+ return 4;
+ else
+ return 6;
+}
+
+/* Worker for cris_notice_update_cc; handles the "normal" cases.
+ FIXME: this code is historical; its functionality should be
+ refactored to look at insn attributes and moved to
+ cris_notice_update_cc. Except, we better lose cc0 entirely. */
+
+static void
+cris_normal_notice_update_cc (rtx exp, rtx insn)
+{
+ /* "Normal" means, for:
+ (set (cc0) (...)):
+ CC is (...).
+
+ (set (reg) (...)):
+ CC is (reg) and (...) - unless (...) is 0 or reg is a special
+ register or (v32 and (...) is -32..-1), then CC does not change.
+ CC_NO_OVERFLOW unless (...) is reg or mem.
+
+ (set (mem) (...)):
+ CC does not change.
+
+ (set (pc) (...)):
+ CC does not change.
+
+ (parallel
+ (set (reg1) (mem (bdap/biap)))
+ (set (reg2) (bdap/biap))):
+ CC is (reg1) and (mem (reg2))
+
+ (parallel
+ (set (mem (bdap/biap)) (reg1)) [or 0]
+ (set (reg2) (bdap/biap))):
+ CC does not change.
+
+ (where reg and mem includes strict_low_parts variants thereof)
+
+ For all others, assume CC is clobbered.
+ Note that we do not have to care about setting CC_NO_OVERFLOW,
+ since the overflow flag is set to 0 (i.e. right) for
+ instructions where it does not have any sane sense, but where
+ other flags have meanings. (This includes shifts; the carry is
+ not set by them).
+
+ Note that there are other parallel constructs we could match,
+ but we don't do that yet. */
+
+ if (GET_CODE (exp) == SET)
+ {
+ /* FIXME: Check when this happens. It looks like we should
+ actually do a CC_STATUS_INIT here to be safe. */
+ if (SET_DEST (exp) == pc_rtx)
+ return;
+
+ /* Record CC0 changes, so we do not have to output multiple
+ test insns. */
+ if (SET_DEST (exp) == cc0_rtx)
+ {
+ CC_STATUS_INIT;
+
+ if (GET_CODE (SET_SRC (exp)) == COMPARE
+ && XEXP (SET_SRC (exp), 1) == const0_rtx)
+ cc_status.value1 = XEXP (SET_SRC (exp), 0);
+ else
+ cc_status.value1 = SET_SRC (exp);
+
+ /* Handle flags for the special btstq on one bit. */
+ if (GET_CODE (cc_status.value1) == ZERO_EXTRACT
+ && XEXP (cc_status.value1, 1) == const1_rtx)
+ {
+ if (CONST_INT_P (XEXP (cc_status.value1, 0)))
+ /* Using cmpq. */
+ cc_status.flags = CC_INVERTED;
+ else
+ /* A one-bit btstq. */
+ cc_status.flags = CC_Z_IN_NOT_N;
+ }
+
+ else if (GET_CODE (SET_SRC (exp)) == COMPARE)
+ {
+ if (!REG_P (XEXP (SET_SRC (exp), 0))
+ && XEXP (SET_SRC (exp), 1) != const0_rtx)
+ /* For some reason gcc will not canonicalize compare
+ operations, reversing the sign by itself if
+ operands are in wrong order. */
+ /* (But NOT inverted; eq is still eq.) */
+ cc_status.flags = CC_REVERSED;
+
+ /* This seems to be overlooked by gcc. FIXME: Check again.
+ FIXME: Is it really safe? */
+ cc_status.value2
+ = gen_rtx_MINUS (GET_MODE (SET_SRC (exp)),
+ XEXP (SET_SRC (exp), 0),
+ XEXP (SET_SRC (exp), 1));
+ }
+ return;
+ }
+ else if (REG_P (SET_DEST (exp))
+ || (GET_CODE (SET_DEST (exp)) == STRICT_LOW_PART
+ && REG_P (XEXP (SET_DEST (exp), 0))))
+ {
+ /* A register is set; normally CC is set to show that no
+ test insn is needed. Catch the exceptions. */
+
+ /* If not to cc0, then no "set"s in non-natural mode give
+ ok cc0... */
+ if (GET_MODE_SIZE (GET_MODE (SET_DEST (exp))) > UNITS_PER_WORD
+ || GET_MODE_CLASS (GET_MODE (SET_DEST (exp))) == MODE_FLOAT)
+ {
+ /* ... except add:s and sub:s in DImode. */
+ if (GET_MODE (SET_DEST (exp)) == DImode
+ && (GET_CODE (SET_SRC (exp)) == PLUS
+ || GET_CODE (SET_SRC (exp)) == MINUS))
+ {
+ CC_STATUS_INIT;
+ cc_status.value1 = SET_DEST (exp);
+ cc_status.value2 = SET_SRC (exp);
+
+ if (cris_reg_overlap_mentioned_p (cc_status.value1,
+ cc_status.value2))
+ cc_status.value2 = 0;
+
+ /* Add and sub may set V, which gets us
+ unoptimizable results in "gt" and "le" condition
+ codes. */
+ cc_status.flags |= CC_NO_OVERFLOW;
+
+ return;
+ }
+ }
+ else if (SET_SRC (exp) == const0_rtx
+ || (REG_P (SET_SRC (exp))
+ && (REGNO (SET_SRC (exp))
+ > CRIS_LAST_GENERAL_REGISTER))
+ || (TARGET_V32
+ && REG_P (SET_DEST (exp))
+ && satisfies_constraint_I (SET_SRC (exp))))
+ {
+ /* There's no CC0 change for this case. Just check
+ for overlap. */
+ if (cc_status.value1
+ && modified_in_p (cc_status.value1, insn))
+ cc_status.value1 = 0;
+
+ if (cc_status.value2
+ && modified_in_p (cc_status.value2, insn))
+ cc_status.value2 = 0;
+
+ return;
+ }
+ else
+ {
+ CC_STATUS_INIT;
+ cc_status.value1 = SET_DEST (exp);
+ cc_status.value2 = SET_SRC (exp);
+
+ if (cris_reg_overlap_mentioned_p (cc_status.value1,
+ cc_status.value2))
+ cc_status.value2 = 0;
+
+ /* Some operations may set V, which gets us
+ unoptimizable results in "gt" and "le" condition
+ codes. */
+ if (GET_CODE (SET_SRC (exp)) == PLUS
+ || GET_CODE (SET_SRC (exp)) == MINUS
+ || GET_CODE (SET_SRC (exp)) == NEG)
+ cc_status.flags |= CC_NO_OVERFLOW;
+
+ /* For V32, nothing with a register destination sets
+ C and V usefully. */
+ if (TARGET_V32)
+ cc_status.flags |= CC_NO_OVERFLOW;
+
+ return;
+ }
+ }
+ else if (MEM_P (SET_DEST (exp))
+ || (GET_CODE (SET_DEST (exp)) == STRICT_LOW_PART
+ && MEM_P (XEXP (SET_DEST (exp), 0))))
+ {
+ /* When SET to MEM, then CC is not changed (except for
+ overlap). */
+ if (cc_status.value1
+ && modified_in_p (cc_status.value1, insn))
+ cc_status.value1 = 0;
+
+ if (cc_status.value2
+ && modified_in_p (cc_status.value2, insn))
+ cc_status.value2 = 0;
+
+ return;
+ }
+ }
+ else if (GET_CODE (exp) == PARALLEL)
+ {
+ if (GET_CODE (XVECEXP (exp, 0, 0)) == SET
+ && GET_CODE (XVECEXP (exp, 0, 1)) == SET
+ && REG_P (XEXP (XVECEXP (exp, 0, 1), 0)))
+ {
+ if (REG_P (XEXP (XVECEXP (exp, 0, 0), 0))
+ && MEM_P (XEXP (XVECEXP (exp, 0, 0), 1)))
+ {
+ CC_STATUS_INIT;
+
+ /* For "move.S [rx=ry+o],rz", say CC reflects
+ value1=rz and value2=[rx] */
+ cc_status.value1 = XEXP (XVECEXP (exp, 0, 0), 0);
+ cc_status.value2
+ = replace_equiv_address (XEXP (XVECEXP (exp, 0, 0), 1),
+ XEXP (XVECEXP (exp, 0, 1), 0));
+
+ /* Huh? A side-effect cannot change the destination
+ register. */
+ if (cris_reg_overlap_mentioned_p (cc_status.value1,
+ cc_status.value2))
+ internal_error ("internal error: sideeffect-insn affecting main effect");
+
+ /* For V32, moves to registers don't set C and V. */
+ if (TARGET_V32)
+ cc_status.flags |= CC_NO_OVERFLOW;
+ return;
+ }
+ else if ((REG_P (XEXP (XVECEXP (exp, 0, 0), 1))
+ || XEXP (XVECEXP (exp, 0, 0), 1) == const0_rtx)
+ && MEM_P (XEXP (XVECEXP (exp, 0, 0), 0)))
+ {
+ /* For "move.S rz,[rx=ry+o]" and "clear.S [rx=ry+o]",
+ say flags are not changed, except for overlap. */
+ if (cc_status.value1
+ && modified_in_p (cc_status.value1, insn))
+ cc_status.value1 = 0;
+
+ if (cc_status.value2
+ && modified_in_p (cc_status.value2, insn))
+ cc_status.value2 = 0;
+
+ return;
+ }
+ }
+ }
+
+ /* If we got here, the case wasn't covered by the code above. */
+ CC_STATUS_INIT;
+}
+
+/* This function looks into the pattern to see how this insn affects
+ condition codes.
+
+ Used when to eliminate test insns before a condition-code user,
+ such as a "scc" insn or a conditional branch. This includes
+ checking if the entities that cc was updated by, are changed by the
+ operation.
+
+ Currently a jumble of the old peek-inside-the-insn and the newer
+ check-cc-attribute methods. */
+
+void
+cris_notice_update_cc (rtx exp, rtx insn)
+{
+ enum attr_cc attrval = get_attr_cc (insn);
+
+ /* Check if user specified "-mcc-init" as a bug-workaround. Remember
+ to still set CC_REVERSED as below, since that's required by some
+ compare insn alternatives. (FIXME: GCC should do this virtual
+ operand swap by itself.) A test-case that may otherwise fail is
+ gcc.c-torture/execute/20000217-1.c -O0 and -O1. */
+ if (TARGET_CCINIT)
+ {
+ CC_STATUS_INIT;
+
+ if (attrval == CC_REV)
+ cc_status.flags = CC_REVERSED;
+ return;
+ }
+
+ /* Slowly, we're converting to using attributes to control the setting
+ of condition-code status. */
+ switch (attrval)
+ {
+ case CC_NONE:
+ /* Even if it is "none", a setting may clobber a previous
+ cc-value, so check. */
+ if (GET_CODE (exp) == SET)
+ {
+ if (cc_status.value1
+ && modified_in_p (cc_status.value1, insn))
+ cc_status.value1 = 0;
+
+ if (cc_status.value2
+ && modified_in_p (cc_status.value2, insn))
+ cc_status.value2 = 0;
+ }
+ return;
+
+ case CC_CLOBBER:
+ CC_STATUS_INIT;
+ return;
+
+ case CC_REV:
+ case CC_NOOV32:
+ case CC_NORMAL:
+ cris_normal_notice_update_cc (exp, insn);
+
+ /* The "test" insn doesn't clear (carry and) overflow on V32. We
+ can change bge => bpl and blt => bmi by passing on to the cc0
+ user that V should not be considered; bgt and ble are taken
+ care of by other methods (see {tst,cmp}{si,hi,qi}). */
+ if (attrval == CC_NOOV32 && TARGET_V32)
+ cc_status.flags |= CC_NO_OVERFLOW;
+ return;
+
+ default:
+ internal_error ("unknown cc_attr value");
+ }
+
+ CC_STATUS_INIT;
+}
+
+/* Return != 0 if the return sequence for the current function is short,
+ like "ret" or "jump [sp+]". Prior to reloading, we can't tell if
+ registers must be saved, so return 0 then. */
+
+bool
+cris_simple_epilogue (void)
+{
+ unsigned int regno;
+ unsigned int reglimit = STACK_POINTER_REGNUM;
+ bool got_really_used = false;
+
+ if (! reload_completed
+ || frame_pointer_needed
+ || get_frame_size () != 0
+ || crtl->args.pretend_args_size
+ || crtl->args.size
+ || crtl->outgoing_args_size
+ || crtl->calls_eh_return
+
+ /* If we're not supposed to emit prologue and epilogue, we must
+ not emit return-type instructions. */
+ || !TARGET_PROLOGUE_EPILOGUE)
+ return false;
+
+ /* Can't return from stacked return address with v32. */
+ if (TARGET_V32 && cris_return_address_on_stack ())
+ return false;
+
+ if (crtl->uses_pic_offset_table)
+ {
+ push_topmost_sequence ();
+ got_really_used
+ = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX);
+ pop_topmost_sequence ();
+ }
+
+ /* No simple epilogue if there are saved registers. */
+ for (regno = 0; regno < reglimit; regno++)
+ if (cris_reg_saved_in_regsave_area (regno, got_really_used))
+ return false;
+
+ return true;
+}
+
+/* Emit checking that MEM is aligned for an access in MODE, failing
+ that, executing a "break 8" (or call to abort, if "break 8" is
+ disabled). */
+
+void
+cris_emit_trap_for_misalignment (rtx mem)
+{
+ rtx addr, reg, ok_label, andop, jmp;
+ int natural_alignment;
+ gcc_assert (MEM_P (mem));
+
+ natural_alignment = GET_MODE_SIZE (GET_MODE (mem));
+ addr = XEXP (mem, 0);
+ reg = force_reg (Pmode, addr);
+ ok_label = gen_label_rtx ();
+
+ /* This will yield a btstq without a separate register used, usually -
+ with the exception for PRE hoisting the "and" but not the branch
+ around the trap: see gcc.dg/target/cris/sync-3s.c. */
+ andop = gen_rtx_AND (Pmode, reg, GEN_INT (natural_alignment - 1));
+ emit_cmp_and_jump_insns (force_reg (SImode, andop), const0_rtx, EQ,
+ NULL_RTX, Pmode, 1, ok_label);
+ jmp = get_last_insn ();
+ gcc_assert (JUMP_P (jmp));
+
+ /* While this isn't mudflap, it is a similar kind of assertion.
+ If PRED_MUDFLAP stops working, use something else or introduce a
+ more suitable assertion predication type. */
+ predict_insn_def (jmp, PRED_MUDFLAP, TAKEN);
+ expand_builtin_trap ();
+ emit_label (ok_label);
+}
+
+/* Expand a return insn (just one insn) marked as using SRP or stack
+ slot depending on parameter ON_STACK. */
+
+void
+cris_expand_return (bool on_stack)
+{
+ /* FIXME: emit a parallel with a USE for SRP or the stack-slot, to
+ tell "ret" from "jump [sp+]". Some, but not all, other parts of
+ GCC expect just (return) to do the right thing when optimizing, so
+ we do that until they're fixed. Currently, all return insns in a
+ function must be the same (not really a limiting factor) so we need
+ to check that it doesn't change half-way through. */
+ emit_jump_insn (ret_rtx);
+
+ CRIS_ASSERT (cfun->machine->return_type != CRIS_RETINSN_RET || !on_stack);
+ CRIS_ASSERT (cfun->machine->return_type != CRIS_RETINSN_JUMP || on_stack);
+
+ cfun->machine->return_type
+ = on_stack ? CRIS_RETINSN_JUMP : CRIS_RETINSN_RET;
+}
+
+/* Compute a (partial) cost for rtx X. Return true if the complete
+ cost has been computed, and false if subexpressions should be
+ scanned. In either case, *TOTAL contains the cost result. */
+
+static bool
+cris_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
+ bool speed)
+{
+ switch (code)
+ {
+ case CONST_INT:
+ {
+ HOST_WIDE_INT val = INTVAL (x);
+ if (val == 0)
+ *total = 0;
+ else if (val < 32 && val >= -32)
+ *total = 1;
+ /* Eight or 16 bits are a word and cycle more expensive. */
+ else if (val <= 32767 && val >= -32768)
+ *total = 2;
+ /* A 32-bit constant (or very seldom, unsigned 16 bits) costs
+ another word. FIXME: This isn't linear to 16 bits. */
+ else
+ *total = 4;
+ return true;
+ }
+
+ case LABEL_REF:
+ *total = 6;
+ return true;
+
+ case CONST:
+ case SYMBOL_REF:
+ *total = 6;
+ return true;
+
+ case CONST_DOUBLE:
+ if (x != CONST0_RTX (GET_MODE (x) == VOIDmode ? DImode : GET_MODE (x)))
+ *total = 12;
+ else
+ /* Make 0.0 cheap, else test-insns will not be used. */
+ *total = 0;
+ return true;
+
+ case MULT:
+ /* If we have one arm of an ADDI, make sure it gets the cost of
+ one insn, i.e. zero cost for this operand, and just the cost
+ of the PLUS, as the insn is created by combine from a PLUS
+ and an ASHIFT, and the MULT cost below would make the
+ combined value be larger than the separate insns. The insn
+ validity is checked elsewhere by combine.
+
+ FIXME: this case is a stop-gap for 4.3 and 4.4, this whole
+ function should be rewritten. */
+ if (outer_code == PLUS && cris_biap_index_p (x, false))
+ {
+ *total = 0;
+ return true;
+ }
+
+ /* Identify values that are no powers of two. Powers of 2 are
+ taken care of already and those values should not be changed. */
+ if (!CONST_INT_P (XEXP (x, 1))
+ || exact_log2 (INTVAL (XEXP (x, 1)) < 0))
+ {
+ /* If we have a multiply insn, then the cost is between
+ 1 and 2 "fast" instructions. */
+ if (TARGET_HAS_MUL_INSNS)
+ {
+ *total = COSTS_N_INSNS (1) + COSTS_N_INSNS (1) / 2;
+ return true;
+ }
+
+ /* Estimate as 4 + 4 * #ofbits. */
+ *total = COSTS_N_INSNS (132);
+ return true;
+ }
+ return false;
+
+ case UDIV:
+ case MOD:
+ case UMOD:
+ case DIV:
+ if (!CONST_INT_P (XEXP (x, 1))
+ || exact_log2 (INTVAL (XEXP (x, 1)) < 0))
+ {
+ /* Estimate this as 4 + 8 * #of bits. */
+ *total = COSTS_N_INSNS (260);
+ return true;
+ }
+ return false;
+
+ case AND:
+ if (CONST_INT_P (XEXP (x, 1))
+ /* Two constants may actually happen before optimization. */
+ && !CONST_INT_P (XEXP (x, 0))
+ && !satisfies_constraint_I (XEXP (x, 1)))
+ {
+ *total
+ = (rtx_cost (XEXP (x, 0), (enum rtx_code) outer_code,
+ opno, speed) + 2
+ + 2 * GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))));
+ return true;
+ }
+ return false;
+
+ case ZERO_EXTRACT:
+ if (outer_code != COMPARE)
+ return false;
+ /* fall through */
+
+ case ZERO_EXTEND: case SIGN_EXTEND:
+ *total = rtx_cost (XEXP (x, 0), (enum rtx_code) outer_code, opno, speed);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* The ADDRESS_COST worker. */
+
+static int
+cris_address_cost (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED,
+ bool speed ATTRIBUTE_UNUSED)
+{
+ /* The metric to use for the cost-macros is unclear.
+ The metric used here is (the number of cycles needed) / 2,
+ where we consider equal a cycle for a word of code and a cycle to
+ read memory. FIXME: Adding "+ 1" to all values would avoid
+ returning 0, as tree-ssa-loop-ivopts.c as of r128272 "normalizes"
+ 0 to 1, thereby giving equal costs to [rN + rM] and [rN].
+ Unfortunately(?) such a hack would expose other pessimizations,
+ at least with g++.dg/tree-ssa/ivopts-1.C, adding insns to the
+ loop there, without apparent reason. */
+
+ /* The cheapest addressing modes get 0, since nothing extra is needed. */
+ if (cris_base_or_autoincr_p (x, false))
+ return 0;
+
+ /* An indirect mem must be a DIP. This means two bytes extra for code,
+ and 4 bytes extra for memory read, i.e. (2 + 4) / 2. */
+ if (MEM_P (x))
+ return (2 + 4) / 2;
+
+ /* Assume (2 + 4) / 2 for a single constant; a dword, since it needs
+ an extra DIP prefix and 4 bytes of constant in most cases. */
+ if (CONSTANT_P (x))
+ return (2 + 4) / 2;
+
+ /* Handle BIAP and BDAP prefixes. */
+ if (GET_CODE (x) == PLUS)
+ {
+ rtx tem1 = XEXP (x, 0);
+ rtx tem2 = XEXP (x, 1);
+
+ /* Local extended canonicalization rule: the first operand must
+ be REG, unless it's an operation (MULT). */
+ if (!REG_P (tem1) && GET_CODE (tem1) != MULT)
+ tem1 = tem2, tem2 = XEXP (x, 0);
+
+ /* We'll "assume" we have canonical RTX now. */
+ gcc_assert (REG_P (tem1) || GET_CODE (tem1) == MULT);
+
+ /* A BIAP is 2 extra bytes for the prefix insn, nothing more. We
+ recognize the typical MULT which is always in tem1 because of
+ insn canonicalization. */
+ if ((GET_CODE (tem1) == MULT && cris_biap_index_p (tem1, false))
+ || REG_P (tem2))
+ return 2 / 2;
+
+ /* A BDAP (quick) is 2 extra bytes. Any constant operand to the
+ PLUS is always found in tem2. */
+ if (CONST_INT_P (tem2) && INTVAL (tem2) < 128 && INTVAL (tem2) >= -128)
+ return 2 / 2;
+
+ /* A BDAP -32768 .. 32767 is like BDAP quick, but with 2 extra
+ bytes. */
+ if (satisfies_constraint_L (tem2))
+ return (2 + 2) / 2;
+
+ /* A BDAP with some other constant is 2 bytes extra. */
+ if (CONSTANT_P (tem2))
+ return (2 + 2 + 2) / 2;
+
+ /* BDAP with something indirect should have a higher cost than
+ BIAP with register. FIXME: Should it cost like a MEM or more? */
+ return (2 + 2 + 2) / 2;
+ }
+
+ /* What else? Return a high cost. It matters only for valid
+ addressing modes. */
+ return 10;
+}
+
+/* Check various objections to the side-effect. Used in the test-part
+ of an anonymous insn describing an insn with a possible side-effect.
+ Returns nonzero if the implied side-effect is ok.
+
+ code : PLUS or MULT
+ ops : An array of rtx:es. lreg, rreg, rval,
+ The variables multop and other_op are indexes into this,
+ or -1 if they are not applicable.
+ lreg : The register that gets assigned in the side-effect.
+ rreg : One register in the side-effect expression
+ rval : The other register, or an int.
+ multop : An integer to multiply rval with.
+ other_op : One of the entities of the main effect,
+ whose mode we must consider. */
+
+int
+cris_side_effect_mode_ok (enum rtx_code code, rtx *ops,
+ int lreg, int rreg, int rval,
+ int multop, int other_op)
+{
+ /* Find what value to multiply with, for rx =ry + rz * n. */
+ int mult = multop < 0 ? 1 : INTVAL (ops[multop]);
+
+ rtx reg_rtx = ops[rreg];
+ rtx val_rtx = ops[rval];
+
+ /* The operands may be swapped. Canonicalize them in reg_rtx and
+ val_rtx, where reg_rtx always is a reg (for this constraint to
+ match). */
+ if (! cris_base_p (reg_rtx, reload_in_progress || reload_completed))
+ reg_rtx = val_rtx, val_rtx = ops[rreg];
+
+ /* Don't forget to check that reg_rtx really is a reg. If it isn't,
+ we have no business. */
+ if (! cris_base_p (reg_rtx, reload_in_progress || reload_completed))
+ return 0;
+
+ /* Don't do this when -mno-split. */
+ if (!TARGET_SIDE_EFFECT_PREFIXES)
+ return 0;
+
+ /* The mult expression may be hidden in lreg. FIXME: Add more
+ commentary about that. */
+ if (GET_CODE (val_rtx) == MULT)
+ {
+ mult = INTVAL (XEXP (val_rtx, 1));
+ val_rtx = XEXP (val_rtx, 0);
+ code = MULT;
+ }
+
+ /* First check the "other operand". */
+ if (other_op >= 0)
+ {
+ if (GET_MODE_SIZE (GET_MODE (ops[other_op])) > UNITS_PER_WORD)
+ return 0;
+
+ /* Check if the lvalue register is the same as the "other
+ operand". If so, the result is undefined and we shouldn't do
+ this. FIXME: Check again. */
+ if ((cris_base_p (ops[lreg], reload_in_progress || reload_completed)
+ && cris_base_p (ops[other_op],
+ reload_in_progress || reload_completed)
+ && REGNO (ops[lreg]) == REGNO (ops[other_op]))
+ || rtx_equal_p (ops[other_op], ops[lreg]))
+ return 0;
+ }
+
+ /* Do not accept frame_pointer_rtx as any operand. */
+ if (ops[lreg] == frame_pointer_rtx || ops[rreg] == frame_pointer_rtx
+ || ops[rval] == frame_pointer_rtx
+ || (other_op >= 0 && ops[other_op] == frame_pointer_rtx))
+ return 0;
+
+ if (code == PLUS
+ && ! cris_base_p (val_rtx, reload_in_progress || reload_completed))
+ {
+
+ /* Do not allow rx = rx + n if a normal add or sub with same size
+ would do. */
+ if (rtx_equal_p (ops[lreg], reg_rtx)
+ && CONST_INT_P (val_rtx)
+ && (INTVAL (val_rtx) <= 63 && INTVAL (val_rtx) >= -63))
+ return 0;
+
+ /* Check allowed cases, like [r(+)?].[bwd] and const. */
+ if (CONSTANT_P (val_rtx))
+ return 1;
+
+ if (MEM_P (val_rtx)
+ && cris_base_or_autoincr_p (XEXP (val_rtx, 0),
+ reload_in_progress || reload_completed))
+ return 1;
+
+ if (GET_CODE (val_rtx) == SIGN_EXTEND
+ && MEM_P (XEXP (val_rtx, 0))
+ && cris_base_or_autoincr_p (XEXP (XEXP (val_rtx, 0), 0),
+ reload_in_progress || reload_completed))
+ return 1;
+
+ /* If we got here, it's not a valid addressing mode. */
+ return 0;
+ }
+ else if (code == MULT
+ || (code == PLUS
+ && cris_base_p (val_rtx,
+ reload_in_progress || reload_completed)))
+ {
+ /* Do not allow rx = rx + ry.S, since it doesn't give better code. */
+ if (rtx_equal_p (ops[lreg], reg_rtx)
+ || (mult == 1 && rtx_equal_p (ops[lreg], val_rtx)))
+ return 0;
+
+ /* Do not allow bad multiply-values. */
+ if (mult != 1 && mult != 2 && mult != 4)
+ return 0;
+
+ /* Only allow r + ... */
+ if (! cris_base_p (reg_rtx, reload_in_progress || reload_completed))
+ return 0;
+
+ /* If we got here, all seems ok.
+ (All checks need to be done above). */
+ return 1;
+ }
+
+ /* If we get here, the caller got its initial tests wrong. */
+ internal_error ("internal error: cris_side_effect_mode_ok with bad operands");
+}
+
+/* Whether next_cc0_user of insn is LE or GT or requires a real compare
+ insn for other reasons. */
+
+bool
+cris_cc0_user_requires_cmp (rtx insn)
+{
+ rtx cc0_user = NULL;
+ rtx body;
+ rtx set;
+
+ gcc_assert (insn != NULL);
+
+ if (!TARGET_V32)
+ return false;
+
+ cc0_user = next_cc0_user (insn);
+ if (cc0_user == NULL)
+ return false;
+
+ body = PATTERN (cc0_user);
+ set = single_set (cc0_user);
+
+ /* Users can be sCC and bCC. */
+ if (JUMP_P (cc0_user)
+ && GET_CODE (body) == SET
+ && SET_DEST (body) == pc_rtx
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
+ && XEXP (XEXP (SET_SRC (body), 0), 0) == cc0_rtx)
+ {
+ return
+ GET_CODE (XEXP (SET_SRC (body), 0)) == GT
+ || GET_CODE (XEXP (SET_SRC (body), 0)) == LE;
+ }
+ else if (set)
+ {
+ return
+ GET_CODE (SET_SRC (body)) == GT
+ || GET_CODE (SET_SRC (body)) == LE;
+ }
+
+ gcc_unreachable ();
+}
+
+/* The function reg_overlap_mentioned_p in CVS (still as of 2001-05-16)
+ does not handle the case where the IN operand is strict_low_part; it
+ does handle it for X. Test-case in Axis-20010516. This function takes
+ care of that for THIS port. FIXME: strict_low_part is going away
+ anyway. */
+
+static int
+cris_reg_overlap_mentioned_p (rtx x, rtx in)
+{
+ /* The function reg_overlap_mentioned now handles when X is
+ strict_low_part, but not when IN is a STRICT_LOW_PART. */
+ if (GET_CODE (in) == STRICT_LOW_PART)
+ in = XEXP (in, 0);
+
+ return reg_overlap_mentioned_p (x, in);
+}
+
+/* Return TRUE iff X is a CONST valid for e.g. indexing.
+ ANY_OPERAND is 0 if X is in a CALL_P insn or movsi, 1
+ elsewhere. */
+
+bool
+cris_valid_pic_const (const_rtx x, bool any_operand)
+{
+ gcc_assert (flag_pic);
+
+ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ return true;
+ default:
+ ;
+ }
+
+ if (GET_CODE (x) != CONST)
+ return false;
+
+ x = XEXP (x, 0);
+
+ /* Handle (const (plus (unspec .. UNSPEC_GOTREL) (const_int ...))). */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == UNSPEC
+ && (XINT (XEXP (x, 0), 1) == CRIS_UNSPEC_GOTREL
+ || XINT (XEXP (x, 0), 1) == CRIS_UNSPEC_PCREL)
+ && CONST_INT_P (XEXP (x, 1)))
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) == UNSPEC)
+ switch (XINT (x, 1))
+ {
+ /* A PCREL operand is only valid for call and movsi. */
+ case CRIS_UNSPEC_PLT_PCREL:
+ case CRIS_UNSPEC_PCREL:
+ return !any_operand;
+
+ case CRIS_UNSPEC_PLT_GOTREL:
+ case CRIS_UNSPEC_PLTGOTREAD:
+ case CRIS_UNSPEC_GOTREAD:
+ case CRIS_UNSPEC_GOTREL:
+ return true;
+ default:
+ gcc_unreachable ();
+ }
+
+ return cris_pic_symbol_type_of (x) == cris_no_symbol;
+}
+
+/* Helper function to find the right PIC-type symbol to generate,
+ given the original (non-PIC) representation. */
+
+enum cris_pic_symbol_type
+cris_pic_symbol_type_of (const_rtx x)
+{
+ switch (GET_CODE (x))
+ {
+ case SYMBOL_REF:
+ return SYMBOL_REF_LOCAL_P (x)
+ ? cris_rel_symbol : cris_got_symbol;
+
+ case LABEL_REF:
+ return cris_rel_symbol;
+
+ case CONST:
+ return cris_pic_symbol_type_of (XEXP (x, 0));
+
+ case PLUS:
+ case MINUS:
+ {
+ enum cris_pic_symbol_type t1 = cris_pic_symbol_type_of (XEXP (x, 0));
+ enum cris_pic_symbol_type t2 = cris_pic_symbol_type_of (XEXP (x, 1));
+
+ gcc_assert (t1 == cris_no_symbol || t2 == cris_no_symbol);
+
+ if (t1 == cris_got_symbol || t1 == cris_got_symbol)
+ return cris_got_symbol_needing_fixup;
+
+ return t1 != cris_no_symbol ? t1 : t2;
+ }
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ return cris_no_symbol;
+
+ case UNSPEC:
+ /* Likely an offsettability-test attempting to add a constant to
+ a GOTREAD symbol, which can't be handled. */
+ return cris_invalid_pic_symbol;
+
+ default:
+ fatal_insn ("unrecognized supposed constant", x);
+ }
+
+ gcc_unreachable ();
+}
+
+/* The LEGITIMATE_PIC_OPERAND_P worker. */
+
+int
+cris_legitimate_pic_operand (rtx x)
+{
+ /* Symbols are not valid PIC operands as-is; just constants. */
+ return cris_valid_pic_const (x, true);
+}
+
+/* Queue an .ident string in the queue of top-level asm statements.
+ If the front-end is done, we must be being called from toplev.c.
+ In that case, do nothing. */
+void
+cris_asm_output_ident (const char *string)
+{
+ if (cgraph_state != CGRAPH_STATE_PARSING)
+ return;
+
+ default_asm_output_ident_directive (string);
+}
+
+/* The ASM_OUTPUT_CASE_END worker. */
+
+void
+cris_asm_output_case_end (FILE *stream, int num, rtx table)
+{
+ /* Step back, over the label for the table, to the actual casejump and
+ assert that we find only what's expected. */
+ rtx whole_jump_insn = prev_nonnote_nondebug_insn (table);
+ gcc_assert (whole_jump_insn != NULL_RTX && LABEL_P (whole_jump_insn));
+ whole_jump_insn = prev_nonnote_nondebug_insn (whole_jump_insn);
+ gcc_assert (whole_jump_insn != NULL_RTX
+ && (JUMP_P (whole_jump_insn)
+ || (TARGET_V32 && INSN_P (whole_jump_insn)
+ && GET_CODE (PATTERN (whole_jump_insn)) == SEQUENCE)));
+ /* Get the pattern of the casejump, so we can extract the default label. */
+ whole_jump_insn = PATTERN (whole_jump_insn);
+
+ if (TARGET_V32)
+ {
+ /* This can be a SEQUENCE, meaning the delay-slot of the jump is
+ filled. We also output the offset word a little differently. */
+ rtx parallel_jump
+ = (GET_CODE (whole_jump_insn) == SEQUENCE
+ ? PATTERN (XVECEXP (whole_jump_insn, 0, 0)) : whole_jump_insn);
+
+ asm_fprintf (stream,
+ "\t.word %LL%d-.%s\n",
+ CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (XVECEXP
+ (parallel_jump, 0, 0),
+ 1), 2), 0)),
+ (TARGET_PDEBUG ? "; default" : ""));
+ return;
+ }
+
+ asm_fprintf (stream,
+ "\t.word %LL%d-%LL%d%s\n",
+ CODE_LABEL_NUMBER (XEXP
+ (XEXP
+ (XEXP (XVECEXP (whole_jump_insn, 0, 0), 1),
+ 2), 0)),
+ num,
+ (TARGET_PDEBUG ? "; default" : ""));
+}
+
+/* The TARGET_OPTION_OVERRIDE worker.
+ As is the norm, this also parses -mfoo=bar type parameters. */
+
+static void
+cris_option_override (void)
+{
+ if (cris_max_stackframe_str)
+ {
+ cris_max_stackframe = atoi (cris_max_stackframe_str);
+
+ /* Do some sanity checking. */
+ if (cris_max_stackframe < 0 || cris_max_stackframe > 0x20000000)
+ internal_error ("-max-stackframe=%d is not usable, not between 0 and %d",
+ cris_max_stackframe, 0x20000000);
+ }
+
+ /* Let "-metrax4" and "-metrax100" change the cpu version. */
+ if (TARGET_SVINTO && cris_cpu_version < CRIS_CPU_SVINTO)
+ cris_cpu_version = CRIS_CPU_SVINTO;
+ else if (TARGET_ETRAX4_ADD && cris_cpu_version < CRIS_CPU_ETRAX4)
+ cris_cpu_version = CRIS_CPU_ETRAX4;
+
+ /* Parse -march=... and its synonym, the deprecated -mcpu=... */
+ if (cris_cpu_str)
+ {
+ cris_cpu_version
+ = (*cris_cpu_str == 'v' ? atoi (cris_cpu_str + 1) : -1);
+
+ if (strcmp ("etrax4", cris_cpu_str) == 0)
+ cris_cpu_version = 3;
+
+ if (strcmp ("svinto", cris_cpu_str) == 0
+ || strcmp ("etrax100", cris_cpu_str) == 0)
+ cris_cpu_version = 8;
+
+ if (strcmp ("ng", cris_cpu_str) == 0
+ || strcmp ("etrax100lx", cris_cpu_str) == 0)
+ cris_cpu_version = 10;
+
+ if (cris_cpu_version < 0 || cris_cpu_version > 32)
+ error ("unknown CRIS version specification in -march= or -mcpu= : %s",
+ cris_cpu_str);
+
+ /* Set the target flags. */
+ if (cris_cpu_version >= CRIS_CPU_ETRAX4)
+ target_flags |= MASK_ETRAX4_ADD;
+
+ /* If this is Svinto or higher, align for 32 bit accesses. */
+ if (cris_cpu_version >= CRIS_CPU_SVINTO)
+ target_flags
+ |= (MASK_SVINTO | MASK_ALIGN_BY_32
+ | MASK_STACK_ALIGN | MASK_CONST_ALIGN
+ | MASK_DATA_ALIGN);
+
+ /* Note that we do not add new flags when it can be completely
+ described with a macro that uses -mcpu=X. So
+ TARGET_HAS_MUL_INSNS is (cris_cpu_version >= CRIS_CPU_NG). */
+ }
+
+ if (cris_tune_str)
+ {
+ int cris_tune
+ = (*cris_tune_str == 'v' ? atoi (cris_tune_str + 1) : -1);
+
+ if (strcmp ("etrax4", cris_tune_str) == 0)
+ cris_tune = 3;
+
+ if (strcmp ("svinto", cris_tune_str) == 0
+ || strcmp ("etrax100", cris_tune_str) == 0)
+ cris_tune = 8;
+
+ if (strcmp ("ng", cris_tune_str) == 0
+ || strcmp ("etrax100lx", cris_tune_str) == 0)
+ cris_tune = 10;
+
+ if (cris_tune < 0 || cris_tune > 32)
+ error ("unknown CRIS cpu version specification in -mtune= : %s",
+ cris_tune_str);
+
+ if (cris_tune >= CRIS_CPU_SVINTO)
+ /* We have currently nothing more to tune than alignment for
+ memory accesses. */
+ target_flags
+ |= (MASK_STACK_ALIGN | MASK_CONST_ALIGN
+ | MASK_DATA_ALIGN | MASK_ALIGN_BY_32);
+ }
+
+ if (cris_cpu_version >= CRIS_CPU_V32)
+ target_flags &= ~(MASK_SIDE_EFFECT_PREFIXES|MASK_MUL_BUG);
+
+ if (flag_pic)
+ {
+ /* Use error rather than warning, so invalid use is easily
+ detectable. Still change to the values we expect, to avoid
+ further errors. */
+ if (! TARGET_LINUX)
+ {
+ error ("-fPIC and -fpic are not supported in this configuration");
+ flag_pic = 0;
+ }
+
+ /* Turn off function CSE. We need to have the addresses reach the
+ call expanders to get PLT-marked, as they could otherwise be
+ compared against zero directly or indirectly. After visiting the
+ call expanders they will then be cse:ed, as the call expanders
+ force_reg the addresses, effectively forcing flag_no_function_cse
+ to 0. */
+ flag_no_function_cse = 1;
+ }
+
+ /* Set the per-function-data initializer. */
+ init_machine_status = cris_init_machine_status;
+}
+
+/* The TARGET_ASM_OUTPUT_MI_THUNK worker. */
+
+static void
+cris_asm_output_mi_thunk (FILE *stream,
+ tree thunkdecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
+ tree funcdecl)
+{
+ /* Make sure unwind info is emitted for the thunk if needed. */
+ final_start_function (emit_barrier (), stream, 1);
+
+ if (delta > 0)
+ fprintf (stream, "\tadd%s " HOST_WIDE_INT_PRINT_DEC ",$%s\n",
+ ADDITIVE_SIZE_MODIFIER (delta), delta,
+ reg_names[CRIS_FIRST_ARG_REG]);
+ else if (delta < 0)
+ fprintf (stream, "\tsub%s " HOST_WIDE_INT_PRINT_DEC ",$%s\n",
+ ADDITIVE_SIZE_MODIFIER (-delta), -delta,
+ reg_names[CRIS_FIRST_ARG_REG]);
+
+ if (flag_pic)
+ {
+ const char *name = XSTR (XEXP (DECL_RTL (funcdecl), 0), 0);
+
+ name = (* targetm.strip_name_encoding) (name);
+
+ if (TARGET_V32)
+ {
+ fprintf (stream, "\tba ");
+ assemble_name (stream, name);
+ fprintf (stream, "%s\n", CRIS_PLT_PCOFFSET_SUFFIX);
+ }
+ else
+ {
+ fprintf (stream, "add.d ");
+ assemble_name (stream, name);
+ fprintf (stream, "%s,$pc\n", CRIS_PLT_PCOFFSET_SUFFIX);
+ }
+ }
+ else
+ {
+ fprintf (stream, "jump ");
+ assemble_name (stream, XSTR (XEXP (DECL_RTL (funcdecl), 0), 0));
+ fprintf (stream, "\n");
+
+ if (TARGET_V32)
+ fprintf (stream, "\tnop\n");
+ }
+
+ final_end_function ();
+}
+
+/* Boilerplate emitted at start of file.
+
+ NO_APP *only at file start* means faster assembly. It also means
+ comments are not allowed. In some cases comments will be output
+ for debugging purposes. Make sure they are allowed then. */
+static void
+cris_file_start (void)
+{
+ /* These expressions can vary at run time, so we cannot put
+ them into TARGET_INITIALIZER. */
+ targetm.asm_file_start_app_off = !(TARGET_PDEBUG || flag_print_asm_name);
+
+ default_file_start ();
+}
+
+/* Output that goes at the end of the file, similarly. */
+
+static void
+cris_file_end (void)
+{
+ /* For CRIS, the default is to assume *no* executable stack, so output
+ an executable-stack-note only when needed. */
+ if (TARGET_LINUX && trampolines_created)
+ file_end_indicate_exec_stack ();
+}
+
+/* Rename the function calls for integer multiply and divide. */
+static void
+cris_init_libfuncs (void)
+{
+ set_optab_libfunc (smul_optab, SImode, "__Mul");
+ set_optab_libfunc (sdiv_optab, SImode, "__Div");
+ set_optab_libfunc (udiv_optab, SImode, "__Udiv");
+ set_optab_libfunc (smod_optab, SImode, "__Mod");
+ set_optab_libfunc (umod_optab, SImode, "__Umod");
+
+ /* Atomic data being unaligned is unfortunately a reality.
+ Deal with it. */
+ if (TARGET_ATOMICS_MAY_CALL_LIBFUNCS)
+ {
+ set_optab_libfunc (sync_compare_and_swap_optab, SImode,
+ "__cris_atcmpxchgr32");
+ set_optab_libfunc (sync_compare_and_swap_optab, HImode,
+ "__cris_atcmpxchgr16");
+ }
+}
+
+/* The INIT_EXPANDERS worker sets the per-function-data initializer and
+ mark functions. */
+
+void
+cris_init_expanders (void)
+{
+ /* Nothing here at the moment. */
+}
+
+/* Zero initialization is OK for all current fields. */
+
+static struct machine_function *
+cris_init_machine_status (void)
+{
+ return ggc_alloc_cleared_machine_function ();
+}
+
+/* Split a 2 word move (DI or presumably DF) into component parts.
+ Originally a copy of gen_split_move_double in m32r.c. */
+
+rtx
+cris_split_movdx (rtx *operands)
+{
+ enum machine_mode mode = GET_MODE (operands[0]);
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ rtx val;
+
+ /* We used to have to handle (SUBREG (MEM)) here, but that should no
+ longer happen; after reload there are no SUBREGs any more, and we're
+ only called after reload. */
+ CRIS_ASSERT (GET_CODE (dest) != SUBREG && GET_CODE (src) != SUBREG);
+
+ start_sequence ();
+ if (REG_P (dest))
+ {
+ int dregno = REGNO (dest);
+
+ /* Reg-to-reg copy. */
+ if (REG_P (src))
+ {
+ int sregno = REGNO (src);
+
+ int reverse = (dregno == sregno + 1);
+
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register of
+ operand 1, we must copy in the opposite order. */
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, reverse, TRUE, mode),
+ operand_subword (src, reverse, TRUE, mode)));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, !reverse, TRUE, mode),
+ operand_subword (src, !reverse, TRUE, mode)));
+ }
+ /* Constant-to-reg copy. */
+ else if (CONST_INT_P (src) || GET_CODE (src) == CONST_DOUBLE)
+ {
+ rtx words[2];
+ split_double (src, &words[0], &words[1]);
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, 0, TRUE, mode),
+ words[0]));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, 1, TRUE, mode),
+ words[1]));
+ }
+ /* Mem-to-reg copy. */
+ else if (MEM_P (src))
+ {
+ /* If the high-address word is used in the address, we must load it
+ last. Otherwise, load it first. */
+ rtx addr = XEXP (src, 0);
+ int reverse
+ = (refers_to_regno_p (dregno, dregno + 1, addr, NULL) != 0);
+
+ /* The original code implies that we can't do
+ move.x [rN+],rM move.x [rN],rM+1
+ when rN is dead, because of REG_NOTES damage. That is
+ consistent with what I've seen, so don't try it.
+
+ We have two different cases here; if the addr is POST_INC,
+ just pass it through, otherwise add constants. */
+
+ if (GET_CODE (addr) == POST_INC)
+ {
+ rtx mem;
+ rtx insn;
+
+ /* Whenever we emit insns with post-incremented
+ addresses ourselves, we must add a post-inc note
+ manually. */
+ mem = change_address (src, SImode, addr);
+ insn
+ = gen_rtx_SET (VOIDmode,
+ operand_subword (dest, 0, TRUE, mode), mem);
+ insn = emit_insn (insn);
+ if (GET_CODE (XEXP (mem, 0)) == POST_INC)
+ REG_NOTES (insn)
+ = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0),
+ REG_NOTES (insn));
+
+ mem = copy_rtx (mem);
+ insn
+ = gen_rtx_SET (VOIDmode,
+ operand_subword (dest, 1, TRUE, mode), mem);
+ insn = emit_insn (insn);
+ if (GET_CODE (XEXP (mem, 0)) == POST_INC)
+ REG_NOTES (insn)
+ = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0),
+ REG_NOTES (insn));
+ }
+ else
+ {
+ /* Make sure we don't get any other addresses with
+ embedded postincrements. They should be stopped in
+ GO_IF_LEGITIMATE_ADDRESS, but we're here for your
+ safety. */
+ if (side_effects_p (addr))
+ fatal_insn ("unexpected side-effects in address", addr);
+
+ emit_insn (gen_rtx_SET
+ (VOIDmode,
+ operand_subword (dest, reverse, TRUE, mode),
+ change_address
+ (src, SImode,
+ plus_constant (Pmode, addr,
+ reverse * UNITS_PER_WORD))));
+ emit_insn (gen_rtx_SET
+ (VOIDmode,
+ operand_subword (dest, ! reverse, TRUE, mode),
+ change_address
+ (src, SImode,
+ plus_constant (Pmode, addr,
+ (! reverse) *
+ UNITS_PER_WORD))));
+ }
+ }
+ else
+ internal_error ("unknown src");
+ }
+ /* Reg-to-mem copy or clear mem. */
+ else if (MEM_P (dest)
+ && (REG_P (src)
+ || src == const0_rtx
+ || src == CONST0_RTX (DFmode)))
+ {
+ rtx addr = XEXP (dest, 0);
+
+ if (GET_CODE (addr) == POST_INC)
+ {
+ rtx mem;
+ rtx insn;
+
+ /* Whenever we emit insns with post-incremented addresses
+ ourselves, we must add a post-inc note manually. */
+ mem = change_address (dest, SImode, addr);
+ insn
+ = gen_rtx_SET (VOIDmode,
+ mem, operand_subword (src, 0, TRUE, mode));
+ insn = emit_insn (insn);
+ if (GET_CODE (XEXP (mem, 0)) == POST_INC)
+ REG_NOTES (insn)
+ = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0),
+ REG_NOTES (insn));
+
+ mem = copy_rtx (mem);
+ insn
+ = gen_rtx_SET (VOIDmode,
+ mem,
+ operand_subword (src, 1, TRUE, mode));
+ insn = emit_insn (insn);
+ if (GET_CODE (XEXP (mem, 0)) == POST_INC)
+ REG_NOTES (insn)
+ = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0),
+ REG_NOTES (insn));
+ }
+ else
+ {
+ /* Make sure we don't get any other addresses with embedded
+ postincrements. They should be stopped in
+ GO_IF_LEGITIMATE_ADDRESS, but we're here for your safety. */
+ if (side_effects_p (addr))
+ fatal_insn ("unexpected side-effects in address", addr);
+
+ emit_insn (gen_rtx_SET
+ (VOIDmode,
+ change_address (dest, SImode, addr),
+ operand_subword (src, 0, TRUE, mode)));
+
+ emit_insn (gen_rtx_SET
+ (VOIDmode,
+ change_address (dest, SImode,
+ plus_constant (Pmode, addr,
+ UNITS_PER_WORD)),
+ operand_subword (src, 1, TRUE, mode)));
+ }
+ }
+
+ else
+ internal_error ("unknown dest");
+
+ val = get_insns ();
+ end_sequence ();
+ return val;
+}
+
+/* The expander for the prologue pattern name. */
+
+void
+cris_expand_prologue (void)
+{
+ int regno;
+ int size = get_frame_size ();
+ /* Shorten the used name for readability. */
+ int cfoa_size = crtl->outgoing_args_size;
+ int last_movem_reg = -1;
+ int framesize = 0;
+ rtx mem, insn;
+ int return_address_on_stack = cris_return_address_on_stack ();
+ int got_really_used = false;
+ int n_movem_regs = 0;
+ int pretend = crtl->args.pretend_args_size;
+
+ /* Don't do anything if no prologues or epilogues are wanted. */
+ if (!TARGET_PROLOGUE_EPILOGUE)
+ return;
+
+ CRIS_ASSERT (size >= 0);
+
+ if (crtl->uses_pic_offset_table)
+ {
+ /* A reference may have been optimized out (like the abort () in
+ fde_split in unwind-dw2-fde.c, at least 3.2.1) so check that
+ it's still used. */
+ push_topmost_sequence ();
+ got_really_used
+ = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX);
+ pop_topmost_sequence ();
+ }
+
+ /* Align the size to what's best for the CPU model. */
+ if (TARGET_STACK_ALIGN)
+ size = TARGET_ALIGN_BY_32 ? (size + 3) & ~3 : (size + 1) & ~1;
+
+ if (pretend)
+ {
+ /* See also cris_setup_incoming_varargs where
+ cfun->machine->stdarg_regs is set. There are other setters of
+ crtl->args.pretend_args_size than stdarg handling, like
+ for an argument passed with parts in R13 and stack. We must
+ not store R13 into the pretend-area for that case, as GCC does
+ that itself. "Our" store would be marked as redundant and GCC
+ will attempt to remove it, which will then be flagged as an
+ internal error; trying to remove a frame-related insn. */
+ int stdarg_regs = cfun->machine->stdarg_regs;
+
+ framesize += pretend;
+
+ for (regno = CRIS_FIRST_ARG_REG + CRIS_MAX_ARGS_IN_REGS - 1;
+ stdarg_regs > 0;
+ regno--, pretend -= 4, stdarg_regs--)
+ {
+ insn = emit_insn (gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ -4)));
+ /* FIXME: When dwarf2 frame output and unless asynchronous
+ exceptions, make dwarf2 bundle together all stack
+ adjustments like it does for registers between stack
+ adjustments. */
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ mem = gen_rtx_MEM (SImode, stack_pointer_rtx);
+ set_mem_alias_set (mem, get_varargs_alias_set ());
+ insn = emit_move_insn (mem, gen_rtx_raw_REG (SImode, regno));
+
+ /* Note the absence of RTX_FRAME_RELATED_P on the above insn:
+ the value isn't restored, so we don't want to tell dwarf2
+ that it's been stored to stack, else EH handling info would
+ get confused. */
+ }
+
+ /* For other setters of crtl->args.pretend_args_size, we
+ just adjust the stack by leaving the remaining size in
+ "pretend", handled below. */
+ }
+
+ /* Save SRP if not a leaf function. */
+ if (return_address_on_stack)
+ {
+ insn = emit_insn (gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ -4 - pretend)));
+ pretend = 0;
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ mem = gen_rtx_MEM (SImode, stack_pointer_rtx);
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn = emit_move_insn (mem, gen_rtx_raw_REG (SImode, CRIS_SRP_REGNUM));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ framesize += 4;
+ }
+
+ /* Set up the frame pointer, if needed. */
+ if (frame_pointer_needed)
+ {
+ insn = emit_insn (gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ -4 - pretend)));
+ pretend = 0;
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ mem = gen_rtx_MEM (SImode, stack_pointer_rtx);
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn = emit_move_insn (mem, frame_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ framesize += 4;
+ }
+
+ /* Between frame-pointer and saved registers lie the area for local
+ variables. If we get here with "pretended" size remaining, count
+ it into the general stack size. */
+ size += pretend;
+
+ /* Get a contiguous sequence of registers, starting with R0, that need
+ to be saved. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ if (cris_reg_saved_in_regsave_area (regno, got_really_used))
+ {
+ n_movem_regs++;
+
+ /* Check if movem may be used for registers so far. */
+ if (regno == last_movem_reg + 1)
+ /* Yes, update next expected register. */
+ last_movem_reg = regno;
+ else
+ {
+ /* We cannot use movem for all registers. We have to flush
+ any movem:ed registers we got so far. */
+ if (last_movem_reg != -1)
+ {
+ int n_saved
+ = (n_movem_regs == 1) ? 1 : last_movem_reg + 1;
+
+ /* It is a win to use a side-effect assignment for
+ 64 <= size <= 128. But side-effect on movem was
+ not usable for CRIS v0..3. Also only do it if
+ side-effects insns are allowed. */
+ if ((last_movem_reg + 1) * 4 + size >= 64
+ && (last_movem_reg + 1) * 4 + size <= 128
+ && (cris_cpu_version >= CRIS_CPU_SVINTO || n_saved == 1)
+ && TARGET_SIDE_EFFECT_PREFIXES)
+ {
+ mem
+ = gen_rtx_MEM (SImode,
+ plus_constant (Pmode, stack_pointer_rtx,
+ -(n_saved * 4 + size)));
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn
+ = cris_emit_movem_store (mem, GEN_INT (n_saved),
+ -(n_saved * 4 + size),
+ true);
+ }
+ else
+ {
+ insn
+ = gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ -(n_saved * 4 + size)));
+ insn = emit_insn (insn);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ mem = gen_rtx_MEM (SImode, stack_pointer_rtx);
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn = cris_emit_movem_store (mem, GEN_INT (n_saved),
+ 0, true);
+ }
+
+ framesize += n_saved * 4 + size;
+ last_movem_reg = -1;
+ size = 0;
+ }
+
+ insn = emit_insn (gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ -4 - size)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ mem = gen_rtx_MEM (SImode, stack_pointer_rtx);
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn = emit_move_insn (mem, gen_rtx_raw_REG (SImode, regno));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ framesize += 4 + size;
+ size = 0;
+ }
+ }
+ }
+
+ /* Check after, if we could movem all registers. This is the normal case. */
+ if (last_movem_reg != -1)
+ {
+ int n_saved
+ = (n_movem_regs == 1) ? 1 : last_movem_reg + 1;
+
+ /* Side-effect on movem was not usable for CRIS v0..3. Also only
+ do it if side-effects insns are allowed. */
+ if ((last_movem_reg + 1) * 4 + size >= 64
+ && (last_movem_reg + 1) * 4 + size <= 128
+ && (cris_cpu_version >= CRIS_CPU_SVINTO || n_saved == 1)
+ && TARGET_SIDE_EFFECT_PREFIXES)
+ {
+ mem
+ = gen_rtx_MEM (SImode,
+ plus_constant (Pmode, stack_pointer_rtx,
+ -(n_saved * 4 + size)));
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn = cris_emit_movem_store (mem, GEN_INT (n_saved),
+ -(n_saved * 4 + size), true);
+ }
+ else
+ {
+ insn
+ = gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ -(n_saved * 4 + size)));
+ insn = emit_insn (insn);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ mem = gen_rtx_MEM (SImode, stack_pointer_rtx);
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn = cris_emit_movem_store (mem, GEN_INT (n_saved), 0, true);
+ }
+
+ framesize += n_saved * 4 + size;
+ /* We have to put outgoing argument space after regs. */
+ if (cfoa_size)
+ {
+ insn = emit_insn (gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ -cfoa_size)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ framesize += cfoa_size;
+ }
+ }
+ else if ((size + cfoa_size) > 0)
+ {
+ insn = emit_insn (gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ -(cfoa_size + size))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ framesize += size + cfoa_size;
+ }
+
+ /* Set up the PIC register, if it is used. */
+ if (got_really_used)
+ {
+ rtx got
+ = gen_rtx_UNSPEC (SImode, gen_rtvec (1, const0_rtx), CRIS_UNSPEC_GOT);
+ emit_move_insn (pic_offset_table_rtx, got);
+
+ /* FIXME: This is a cover-up for flow2 messing up; it doesn't
+ follow exceptional paths and tries to delete the GOT load as
+ unused, if it isn't used on the non-exceptional paths. Other
+ ports have similar or other cover-ups, or plain bugs marking
+ the GOT register load as maybe-dead. To see this, remove the
+ line below and try libsupc++/vec.cc or a trivial
+ "static void y (); void x () {try {y ();} catch (...) {}}". */
+ emit_use (pic_offset_table_rtx);
+ }
+
+ if (cris_max_stackframe && framesize > cris_max_stackframe)
+ warning (0, "stackframe too big: %d bytes", framesize);
+}
+
+/* The expander for the epilogue pattern. */
+
+void
+cris_expand_epilogue (void)
+{
+ int regno;
+ int size = get_frame_size ();
+ int last_movem_reg = -1;
+ int argspace_offset = crtl->outgoing_args_size;
+ int pretend = crtl->args.pretend_args_size;
+ rtx mem;
+ bool return_address_on_stack = cris_return_address_on_stack ();
+ /* A reference may have been optimized out
+ (like the abort () in fde_split in unwind-dw2-fde.c, at least 3.2.1)
+ so check that it's still used. */
+ int got_really_used = false;
+ int n_movem_regs = 0;
+
+ if (!TARGET_PROLOGUE_EPILOGUE)
+ return;
+
+ if (crtl->uses_pic_offset_table)
+ {
+ /* A reference may have been optimized out (like the abort () in
+ fde_split in unwind-dw2-fde.c, at least 3.2.1) so check that
+ it's still used. */
+ push_topmost_sequence ();
+ got_really_used
+ = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX);
+ pop_topmost_sequence ();
+ }
+
+ /* Align byte count of stack frame. */
+ if (TARGET_STACK_ALIGN)
+ size = TARGET_ALIGN_BY_32 ? (size + 3) & ~3 : (size + 1) & ~1;
+
+ /* Check how many saved regs we can movem. They start at r0 and must
+ be contiguous. */
+ for (regno = 0;
+ regno < FIRST_PSEUDO_REGISTER;
+ regno++)
+ if (cris_reg_saved_in_regsave_area (regno, got_really_used))
+ {
+ n_movem_regs++;
+
+ if (regno == last_movem_reg + 1)
+ last_movem_reg = regno;
+ else
+ break;
+ }
+
+ /* If there was only one register that really needed to be saved
+ through movem, don't use movem. */
+ if (n_movem_regs == 1)
+ last_movem_reg = -1;
+
+ /* Now emit "normal" move insns for all regs higher than the movem
+ regs. */
+ for (regno = FIRST_PSEUDO_REGISTER - 1;
+ regno > last_movem_reg;
+ regno--)
+ if (cris_reg_saved_in_regsave_area (regno, got_really_used))
+ {
+ rtx insn;
+
+ if (argspace_offset)
+ {
+ /* There is an area for outgoing parameters located before
+ the saved registers. We have to adjust for that. */
+ emit_insn (gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ argspace_offset)));
+ /* Make sure we only do this once. */
+ argspace_offset = 0;
+ }
+
+ mem = gen_rtx_MEM (SImode, gen_rtx_POST_INC (SImode,
+ stack_pointer_rtx));
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn = emit_move_insn (gen_rtx_raw_REG (SImode, regno), mem);
+
+ /* Whenever we emit insns with post-incremented addresses
+ ourselves, we must add a post-inc note manually. */
+ REG_NOTES (insn)
+ = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn));
+ }
+
+ /* If we have any movem-restore, do it now. */
+ if (last_movem_reg != -1)
+ {
+ rtx insn;
+
+ if (argspace_offset)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ argspace_offset)));
+ argspace_offset = 0;
+ }
+
+ mem = gen_rtx_MEM (SImode,
+ gen_rtx_POST_INC (SImode, stack_pointer_rtx));
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn
+ = emit_insn (cris_gen_movem_load (mem,
+ GEN_INT (last_movem_reg + 1), 0));
+ /* Whenever we emit insns with post-incremented addresses
+ ourselves, we must add a post-inc note manually. */
+ if (side_effects_p (PATTERN (insn)))
+ REG_NOTES (insn)
+ = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn));
+ }
+
+ /* If we don't clobber all of the allocated stack area (we've already
+ deallocated saved registers), GCC might want to schedule loads from
+ the stack to *after* the stack-pointer restore, which introduces an
+ interrupt race condition. This happened for the initial-value
+ SRP-restore for g++.dg/eh/registers1.C (noticed by inspection of
+ other failure for that test). It also happened for the stack slot
+ for the return value in (one version of)
+ linux/fs/dcache.c:__d_lookup, at least with "-O2
+ -fno-omit-frame-pointer". */
+
+ /* Restore frame pointer if necessary. */
+ if (frame_pointer_needed)
+ {
+ rtx insn;
+
+ emit_insn (gen_cris_frame_deallocated_barrier ());
+
+ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ mem = gen_rtx_MEM (SImode, gen_rtx_POST_INC (SImode,
+ stack_pointer_rtx));
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn = emit_move_insn (frame_pointer_rtx, mem);
+
+ /* Whenever we emit insns with post-incremented addresses
+ ourselves, we must add a post-inc note manually. */
+ REG_NOTES (insn)
+ = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn));
+ }
+ else if ((size + argspace_offset) != 0)
+ {
+ emit_insn (gen_cris_frame_deallocated_barrier ());
+
+ /* If there was no frame-pointer to restore sp from, we must
+ explicitly deallocate local variables. */
+
+ /* Handle space for outgoing parameters that hasn't been handled
+ yet. */
+ size += argspace_offset;
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx, size)));
+ }
+
+ /* If this function has no pushed register parameters
+ (stdargs/varargs), and if it is not a leaf function, then we have
+ the return address on the stack. */
+ if (return_address_on_stack && pretend == 0)
+ {
+ if (TARGET_V32 || crtl->calls_eh_return)
+ {
+ rtx mem;
+ rtx insn;
+ rtx srpreg = gen_rtx_raw_REG (SImode, CRIS_SRP_REGNUM);
+ mem = gen_rtx_MEM (SImode,
+ gen_rtx_POST_INC (SImode,
+ stack_pointer_rtx));
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn = emit_move_insn (srpreg, mem);
+
+ /* Whenever we emit insns with post-incremented addresses
+ ourselves, we must add a post-inc note manually. */
+ REG_NOTES (insn)
+ = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn));
+
+ if (crtl->calls_eh_return)
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ gen_rtx_raw_REG (SImode,
+ CRIS_STACKADJ_REG)));
+ cris_expand_return (false);
+ }
+ else
+ cris_expand_return (true);
+
+ return;
+ }
+
+ /* If we pushed some register parameters, then adjust the stack for
+ them. */
+ if (pretend != 0)
+ {
+ /* If SRP is stored on the way, we need to restore it first. */
+ if (return_address_on_stack)
+ {
+ rtx mem;
+ rtx srpreg = gen_rtx_raw_REG (SImode, CRIS_SRP_REGNUM);
+ rtx insn;
+
+ mem = gen_rtx_MEM (SImode,
+ gen_rtx_POST_INC (SImode,
+ stack_pointer_rtx));
+ set_mem_alias_set (mem, get_frame_alias_set ());
+ insn = emit_move_insn (srpreg, mem);
+
+ /* Whenever we emit insns with post-incremented addresses
+ ourselves, we must add a post-inc note manually. */
+ REG_NOTES (insn)
+ = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn));
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ pretend)));
+ }
+
+ /* Perform the "physical" unwinding that the EH machinery calculated. */
+ if (crtl->calls_eh_return)
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ gen_rtx_raw_REG (SImode,
+ CRIS_STACKADJ_REG)));
+ cris_expand_return (false);
+}
+
+/* Worker function for generating movem from mem for load_multiple. */
+
+rtx
+cris_gen_movem_load (rtx src, rtx nregs_rtx, int nprefix)
+{
+ int nregs = INTVAL (nregs_rtx);
+ rtvec vec;
+ int eltno = 1;
+ int i;
+ rtx srcreg = XEXP (src, 0);
+ unsigned int regno = nregs - 1;
+ int regno_inc = -1;
+
+ if (TARGET_V32)
+ {
+ regno = 0;
+ regno_inc = 1;
+ }
+
+ if (GET_CODE (srcreg) == POST_INC)
+ srcreg = XEXP (srcreg, 0);
+
+ CRIS_ASSERT (REG_P (srcreg));
+
+ /* Don't use movem for just one insn. The insns are equivalent except
+ for the pipeline hazard (on v32); movem does not forward the loaded
+ registers so there's a three cycles penalty for their use. */
+ if (nregs == 1)
+ return gen_movsi (gen_rtx_REG (SImode, 0), src);
+
+ vec = rtvec_alloc (nprefix + nregs
+ + (GET_CODE (XEXP (src, 0)) == POST_INC));
+
+ if (GET_CODE (XEXP (src, 0)) == POST_INC)
+ {
+ RTVEC_ELT (vec, nprefix + 1)
+ = gen_rtx_SET (VOIDmode, srcreg,
+ plus_constant (Pmode, srcreg, nregs * 4));
+ eltno++;
+ }
+
+ src = replace_equiv_address (src, srcreg);
+ RTVEC_ELT (vec, nprefix)
+ = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, regno), src);
+ regno += regno_inc;
+
+ for (i = 1; i < nregs; i++, eltno++)
+ {
+ RTVEC_ELT (vec, nprefix + eltno)
+ = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, regno),
+ adjust_address_nv (src, SImode, i * 4));
+ regno += regno_inc;
+ }
+
+ return gen_rtx_PARALLEL (VOIDmode, vec);
+}
+
+/* Worker function for generating movem to mem. If FRAME_RELATED, notes
+ are added that the dwarf2 machinery understands. */
+
+rtx
+cris_emit_movem_store (rtx dest, rtx nregs_rtx, int increment,
+ bool frame_related)
+{
+ int nregs = INTVAL (nregs_rtx);
+ rtvec vec;
+ int eltno = 1;
+ int i;
+ rtx insn;
+ rtx destreg = XEXP (dest, 0);
+ unsigned int regno = nregs - 1;
+ int regno_inc = -1;
+
+ if (TARGET_V32)
+ {
+ regno = 0;
+ regno_inc = 1;
+ }
+
+ if (GET_CODE (destreg) == POST_INC)
+ increment += nregs * 4;
+
+ if (GET_CODE (destreg) == POST_INC || GET_CODE (destreg) == PLUS)
+ destreg = XEXP (destreg, 0);
+
+ CRIS_ASSERT (REG_P (destreg));
+
+ /* Don't use movem for just one insn. The insns are equivalent except
+ for the pipeline hazard (on v32); movem does not forward the loaded
+ registers so there's a three cycles penalty for use. */
+ if (nregs == 1)
+ {
+ rtx mov = gen_rtx_SET (VOIDmode, dest, gen_rtx_REG (SImode, 0));
+
+ if (increment == 0)
+ {
+ insn = emit_insn (mov);
+ if (frame_related)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ return insn;
+ }
+
+ /* If there was a request for a side-effect, create the ordinary
+ parallel. */
+ vec = rtvec_alloc (2);
+
+ RTVEC_ELT (vec, 0) = mov;
+ RTVEC_ELT (vec, 1) = gen_rtx_SET (VOIDmode, destreg,
+ plus_constant (Pmode, destreg,
+ increment));
+ if (frame_related)
+ {
+ RTX_FRAME_RELATED_P (mov) = 1;
+ RTX_FRAME_RELATED_P (RTVEC_ELT (vec, 1)) = 1;
+ }
+ }
+ else
+ {
+ vec = rtvec_alloc (nregs + (increment != 0 ? 1 : 0));
+ RTVEC_ELT (vec, 0)
+ = gen_rtx_SET (VOIDmode,
+ replace_equiv_address (dest,
+ plus_constant (Pmode, destreg,
+ increment)),
+ gen_rtx_REG (SImode, regno));
+ regno += regno_inc;
+
+ /* The dwarf2 info wants this mark on each component in a parallel
+ that's part of the prologue (though it's optional on the first
+ component). */
+ if (frame_related)
+ RTX_FRAME_RELATED_P (RTVEC_ELT (vec, 0)) = 1;
+
+ if (increment != 0)
+ {
+ RTVEC_ELT (vec, 1)
+ = gen_rtx_SET (VOIDmode, destreg,
+ plus_constant (Pmode, destreg,
+ increment != 0
+ ? increment : nregs * 4));
+ eltno++;
+
+ if (frame_related)
+ RTX_FRAME_RELATED_P (RTVEC_ELT (vec, 1)) = 1;
+
+ /* Don't call adjust_address_nv on a post-incremented address if
+ we can help it. */
+ if (GET_CODE (XEXP (dest, 0)) == POST_INC)
+ dest = replace_equiv_address (dest, destreg);
+ }
+
+ for (i = 1; i < nregs; i++, eltno++)
+ {
+ RTVEC_ELT (vec, eltno)
+ = gen_rtx_SET (VOIDmode, adjust_address_nv (dest, SImode, i * 4),
+ gen_rtx_REG (SImode, regno));
+ if (frame_related)
+ RTX_FRAME_RELATED_P (RTVEC_ELT (vec, eltno)) = 1;
+ regno += regno_inc;
+ }
+ }
+
+ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, vec));
+
+ /* Because dwarf2out.c handles the insns in a parallel as a sequence,
+ we need to keep the stack adjustment separate, after the
+ MEM-setters. Else the stack-adjustment in the second component of
+ the parallel would be mishandled; the offsets for the SETs that
+ follow it would be wrong. We prepare for this by adding a
+ REG_FRAME_RELATED_EXPR with the MEM-setting parts in a SEQUENCE
+ followed by the increment. Note that we have FRAME_RELATED_P on
+ all the SETs, including the original stack adjustment SET in the
+ parallel. */
+ if (frame_related)
+ {
+ if (increment != 0)
+ {
+ rtx seq = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nregs + 1));
+ XVECEXP (seq, 0, 0) = copy_rtx (XVECEXP (PATTERN (insn), 0, 0));
+ for (i = 1; i < nregs; i++)
+ XVECEXP (seq, 0, i)
+ = copy_rtx (XVECEXP (PATTERN (insn), 0, i + 1));
+ XVECEXP (seq, 0, nregs) = copy_rtx (XVECEXP (PATTERN (insn), 0, 1));
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, seq);
+ }
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ return insn;
+}
+
+/* Worker function for expanding the address for PIC function calls. */
+
+void
+cris_expand_pic_call_address (rtx *opp)
+{
+ rtx op = *opp;
+
+ gcc_assert (MEM_P (op));
+ op = XEXP (op, 0);
+
+ /* It might be that code can be generated that jumps to 0 (or to a
+ specific address). Don't die on that. (There is a
+ testcase.) */
+ if (CONSTANT_ADDRESS_P (op) && !CONST_INT_P (op))
+ {
+ enum cris_pic_symbol_type t = cris_pic_symbol_type_of (op);
+
+ CRIS_ASSERT (can_create_pseudo_p ());
+
+ /* For local symbols (non-PLT), just get the plain symbol
+ reference into a register. For symbols that can be PLT, make
+ them PLT. */
+ if (t == cris_rel_symbol)
+ {
+ /* For v32, we're fine as-is; just PICify the symbol. Forcing
+ into a register caused performance regression for 3.2.1,
+ observable in __floatdidf and elsewhere in libgcc. */
+ if (TARGET_V32)
+ {
+ rtx sym = GET_CODE (op) != CONST ? op : get_related_value (op);
+ HOST_WIDE_INT offs = get_integer_term (op);
+
+ /* We can't get calls to sym+N, N integer, can we? */
+ gcc_assert (offs == 0);
+
+ op = gen_rtx_CONST (Pmode,
+ gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym),
+ CRIS_UNSPEC_PCREL));
+ }
+ else
+ op = force_reg (Pmode, op);
+ }
+ else if (t == cris_got_symbol)
+ {
+ if (TARGET_AVOID_GOTPLT)
+ {
+ /* Change a "jsr sym" into (allocate register rM, rO)
+ "move.d (const (unspec [sym rPIC] CRIS_UNSPEC_PLT_GOTREL)),rM"
+ "add.d rPIC,rM,rO", "jsr rO" for pre-v32 and
+ "jsr (const (unspec [sym rPIC] CRIS_UNSPEC_PLT_PCREL))"
+ for v32. */
+ rtx tem, rm, ro;
+ gcc_assert (can_create_pseudo_p ());
+ crtl->uses_pic_offset_table = 1;
+ tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op),
+ TARGET_V32
+ ? CRIS_UNSPEC_PLT_PCREL
+ : CRIS_UNSPEC_PLT_GOTREL);
+ tem = gen_rtx_CONST (Pmode, tem);
+ if (TARGET_V32)
+ op = tem;
+ else
+ {
+ rm = gen_reg_rtx (Pmode);
+ emit_move_insn (rm, tem);
+ ro = gen_reg_rtx (Pmode);
+ if (expand_binop (Pmode, add_optab, rm,
+ pic_offset_table_rtx,
+ ro, 0, OPTAB_LIB_WIDEN) != ro)
+ internal_error ("expand_binop failed in movsi got");
+ op = ro;
+ }
+ }
+ else
+ {
+ /* Change a "jsr sym" into (allocate register rM, rO)
+ "move.d (const (unspec [sym] CRIS_UNSPEC_PLTGOTREAD)),rM"
+ "add.d rPIC,rM,rO" "jsr [rO]" with the memory access
+ marked as not trapping and not aliasing. No "move.d
+ [rO],rP" as that would invite to re-use of a value
+ that should not be reused. FIXME: Need a peephole2
+ for cases when this is cse:d from the call, to change
+ back to just get the PLT entry address, so we don't
+ resolve the same symbol over and over (the memory
+ access of the PLTGOT isn't constant). */
+ rtx tem, mem, rm, ro;
+
+ gcc_assert (can_create_pseudo_p ());
+ crtl->uses_pic_offset_table = 1;
+ tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op),
+ CRIS_UNSPEC_PLTGOTREAD);
+ rm = gen_reg_rtx (Pmode);
+ emit_move_insn (rm, gen_rtx_CONST (Pmode, tem));
+ ro = gen_reg_rtx (Pmode);
+ if (expand_binop (Pmode, add_optab, rm,
+ pic_offset_table_rtx,
+ ro, 0, OPTAB_LIB_WIDEN) != ro)
+ internal_error ("expand_binop failed in movsi got");
+ mem = gen_rtx_MEM (Pmode, ro);
+
+ /* This MEM doesn't alias anything. Whether it aliases
+ other same symbols is unimportant. */
+ set_mem_alias_set (mem, new_alias_set ());
+ MEM_NOTRAP_P (mem) = 1;
+ op = mem;
+ }
+ }
+ else
+ /* Can't possibly get a GOT-needing-fixup for a function-call,
+ right? */
+ fatal_insn ("unidentifiable call op", op);
+
+ *opp = replace_equiv_address (*opp, op);
+ }
+}
+
+/* Make sure operands are in the right order for an addsi3 insn as
+ generated by a define_split. Nothing but REG_P as the first
+ operand is recognized by addsi3 after reload. OPERANDS contains
+ the operands, with the first at OPERANDS[N] and the second at
+ OPERANDS[N+1]. */
+
+void
+cris_order_for_addsi3 (rtx *operands, int n)
+{
+ if (!REG_P (operands[n]))
+ {
+ rtx tem = operands[n];
+ operands[n] = operands[n + 1];
+ operands[n + 1] = tem;
+ }
+}
+
+/* Use from within code, from e.g. PRINT_OPERAND and
+ PRINT_OPERAND_ADDRESS. Macros used in output_addr_const need to emit
+ different things depending on whether code operand or constant is
+ emitted. */
+
+static void
+cris_output_addr_const (FILE *file, rtx x)
+{
+ in_code++;
+ output_addr_const (file, x);
+ in_code--;
+}
+
+/* Worker function for ASM_OUTPUT_SYMBOL_REF. */
+
+void
+cris_asm_output_symbol_ref (FILE *file, rtx x)
+{
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+
+ if (flag_pic && in_code > 0)
+ {
+ const char *origstr = XSTR (x, 0);
+ const char *str;
+ str = (* targetm.strip_name_encoding) (origstr);
+ assemble_name (file, str);
+
+ /* Sanity check. */
+ if (!TARGET_V32 && !crtl->uses_pic_offset_table)
+ output_operand_lossage ("PIC register isn't set up");
+ }
+ else
+ assemble_name (file, XSTR (x, 0));
+}
+
+/* Worker function for ASM_OUTPUT_LABEL_REF. */
+
+void
+cris_asm_output_label_ref (FILE *file, char *buf)
+{
+ if (flag_pic && in_code > 0)
+ {
+ assemble_name (file, buf);
+
+ /* Sanity check. */
+ if (!TARGET_V32 && !crtl->uses_pic_offset_table)
+ internal_error ("emitting PIC operand, but PIC register "
+ "isn%'t set up");
+ }
+ else
+ assemble_name (file, buf);
+}
+
+/* Worker function for TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
+
+static bool
+cris_output_addr_const_extra (FILE *file, rtx xconst)
+{
+ switch (GET_CODE (xconst))
+ {
+ rtx x;
+
+ case UNSPEC:
+ x = XVECEXP (xconst, 0, 0);
+ CRIS_ASSERT (GET_CODE (x) == SYMBOL_REF
+ || GET_CODE (x) == LABEL_REF
+ || GET_CODE (x) == CONST);
+ output_addr_const (file, x);
+ switch (XINT (xconst, 1))
+ {
+ case CRIS_UNSPEC_PCREL:
+ /* We only get this with -fpic/PIC to tell it apart from an
+ invalid symbol. We can't tell here, but it should only
+ be the operand of a call or movsi. */
+ gcc_assert (TARGET_V32 && flag_pic);
+ break;
+
+ case CRIS_UNSPEC_PLT_PCREL:
+ gcc_assert (TARGET_V32);
+ fprintf (file, ":PLT");
+ break;
+
+ case CRIS_UNSPEC_PLT_GOTREL:
+ gcc_assert (!TARGET_V32);
+ fprintf (file, ":PLTG");
+ break;
+
+ case CRIS_UNSPEC_GOTREL:
+ gcc_assert (!TARGET_V32);
+ fprintf (file, ":GOTOFF");
+ break;
+
+ case CRIS_UNSPEC_GOTREAD:
+ if (flag_pic == 1)
+ fprintf (file, ":GOT16");
+ else
+ fprintf (file, ":GOT");
+ break;
+
+ case CRIS_UNSPEC_PLTGOTREAD:
+ if (flag_pic == 1)
+ fprintf (file, CRIS_GOTPLT_SUFFIX "16");
+ else
+ fprintf (file, CRIS_GOTPLT_SUFFIX);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Worker function for TARGET_STRUCT_VALUE_RTX. */
+
+static rtx
+cris_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
+ int incoming ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (Pmode, CRIS_STRUCT_VALUE_REGNUM);
+}
+
+/* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
+
+static void
+cris_setup_incoming_varargs (cumulative_args_t ca_v,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type ATTRIBUTE_UNUSED,
+ int *pretend_arg_size,
+ int second_time)
+{
+ CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
+
+ if (ca->regs < CRIS_MAX_ARGS_IN_REGS)
+ {
+ int stdarg_regs = CRIS_MAX_ARGS_IN_REGS - ca->regs;
+ cfun->machine->stdarg_regs = stdarg_regs;
+ *pretend_arg_size = stdarg_regs * 4;
+ }
+
+ if (TARGET_PDEBUG)
+ fprintf (asm_out_file,
+ "\n; VA:: ANSI: %d args before, anon @ #%d, %dtime\n",
+ ca->regs, *pretend_arg_size, second_time);
+}
+
+/* Return true if TYPE must be passed by invisible reference.
+ For cris, we pass <= 8 bytes by value, others by reference. */
+
+static bool
+cris_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
+ enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ return (targetm.calls.must_pass_in_stack (mode, type)
+ || CRIS_FUNCTION_ARG_SIZE (mode, type) > 8);
+}
+
+/* A combination of defining TARGET_PROMOTE_FUNCTION_MODE, promoting arguments
+ and *not* defining TARGET_PROMOTE_PROTOTYPES or PROMOTE_MODE gives the
+ best code size and speed for gcc, ipps and products in gcc-2.7.2. */
+
+enum machine_mode
+cris_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
+ enum machine_mode mode,
+ int *punsignedp ATTRIBUTE_UNUSED,
+ const_tree fntype ATTRIBUTE_UNUSED,
+ int for_return)
+{
+ /* Defining PROMOTE_FUNCTION_RETURN in gcc-2.7.2 uncovered bug 981110 (even
+ when modifying TARGET_FUNCTION_VALUE to return the promoted mode).
+ Maybe pointless as of now, but let's keep the old behavior. */
+ if (for_return == 1)
+ return mode;
+ return CRIS_PROMOTED_MODE (mode, *punsignedp, type);
+}
+
+/* Let's assume all functions return in r[CRIS_FIRST_ARG_REG] for the
+ time being. */
+
+static rtx
+cris_function_value(const_tree type,
+ const_tree func ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (TYPE_MODE (type), CRIS_FIRST_ARG_REG);
+}
+
+/* Let's assume all functions return in r[CRIS_FIRST_ARG_REG] for the
+ time being. */
+
+static rtx
+cris_libcall_value (enum machine_mode mode,
+ const_rtx fun ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (mode, CRIS_FIRST_ARG_REG);
+}
+
+/* Let's assume all functions return in r[CRIS_FIRST_ARG_REG] for the
+ time being. */
+
+static bool
+cris_function_value_regno_p (const unsigned int regno)
+{
+ return (regno == CRIS_FIRST_ARG_REG);
+}
+
+static int
+cris_arg_partial_bytes (cumulative_args_t ca, enum machine_mode mode,
+ tree type, bool named ATTRIBUTE_UNUSED)
+{
+ if (get_cumulative_args (ca)->regs == CRIS_MAX_ARGS_IN_REGS - 1
+ && !targetm.calls.must_pass_in_stack (mode, type)
+ && CRIS_FUNCTION_ARG_SIZE (mode, type) > 4
+ && CRIS_FUNCTION_ARG_SIZE (mode, type) <= 8)
+ return UNITS_PER_WORD;
+ else
+ return 0;
+}
+
+static rtx
+cris_function_arg_1 (cumulative_args_t ca_v,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED,
+ bool named, bool incoming)
+{
+ const CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
+
+ if ((!incoming || named) && ca->regs < CRIS_MAX_ARGS_IN_REGS)
+ return gen_rtx_REG (mode, CRIS_FIRST_ARG_REG + ca->regs);
+ else
+ return NULL_RTX;
+}
+
+/* Worker function for TARGET_FUNCTION_ARG.
+ The void_type_node is sent as a "closing" call. */
+
+static rtx
+cris_function_arg (cumulative_args_t ca, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ return cris_function_arg_1 (ca, mode, type, named, false);
+}
+
+/* Worker function for TARGET_FUNCTION_INCOMING_ARG.
+
+ The differences between this and the previous, is that this one checks
+ that an argument is named, since incoming stdarg/varargs arguments are
+ pushed onto the stack, and we don't have to check against the "closing"
+ void_type_node TYPE parameter. */
+
+static rtx
+cris_function_incoming_arg (cumulative_args_t ca, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ return cris_function_arg_1 (ca, mode, type, named, true);
+}
+
+/* Worker function for TARGET_FUNCTION_ARG_ADVANCE. */
+
+static void
+cris_function_arg_advance (cumulative_args_t ca_v, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
+
+ ca->regs += (3 + CRIS_FUNCTION_ARG_SIZE (mode, type)) / 4;
+}
+
+/* Worker function for TARGET_MD_ASM_CLOBBERS. */
+
+static tree
+cris_md_asm_clobbers (tree outputs, tree inputs, tree in_clobbers)
+{
+ HARD_REG_SET mof_set;
+ tree clobbers;
+ tree t;
+
+ CLEAR_HARD_REG_SET (mof_set);
+ SET_HARD_REG_BIT (mof_set, CRIS_MOF_REGNUM);
+
+ /* For the time being, all asms clobber condition codes. Revisit when
+ there's a reasonable use for inputs/outputs that mention condition
+ codes. */
+ clobbers
+ = tree_cons (NULL_TREE,
+ build_string (strlen (reg_names[CRIS_CC0_REGNUM]),
+ reg_names[CRIS_CC0_REGNUM]),
+ in_clobbers);
+
+ for (t = outputs; t != NULL; t = TREE_CHAIN (t))
+ {
+ tree val = TREE_VALUE (t);
+
+ /* The constraint letter for the singleton register class of MOF
+ is 'h'. If it's mentioned in the constraints, the asm is
+ MOF-aware and adding it to the clobbers would cause it to have
+ impossible constraints. */
+ if (strchr (TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))),
+ 'h') != NULL
+ || tree_overlaps_hard_reg_set (val, &mof_set) != NULL_TREE)
+ return clobbers;
+ }
+
+ for (t = inputs; t != NULL; t = TREE_CHAIN (t))
+ {
+ tree val = TREE_VALUE (t);
+
+ if (strchr (TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))),
+ 'h') != NULL
+ || tree_overlaps_hard_reg_set (val, &mof_set) != NULL_TREE)
+ return clobbers;
+ }
+
+ return tree_cons (NULL_TREE,
+ build_string (strlen (reg_names[CRIS_MOF_REGNUM]),
+ reg_names[CRIS_MOF_REGNUM]),
+ clobbers);
+}
+
+/* Implement TARGET_FRAME_POINTER_REQUIRED.
+
+ Really only needed if the stack frame has variable length (alloca
+ or variable sized local arguments (GNU C extension). See PR39499 and
+ PR38609 for the reason this isn't just 0. */
+
+bool
+cris_frame_pointer_required (void)
+{
+ return !crtl->sp_is_unchanging;
+}
+
+/* Implement TARGET_ASM_TRAMPOLINE_TEMPLATE.
+
+ This looks too complicated, and it is. I assigned r7 to be the
+ static chain register, but it is call-saved, so we have to save it,
+ and come back to restore it after the call, so we have to save srp...
+ Anyway, trampolines are rare enough that we can cope with this
+ somewhat lack of elegance.
+ (Do not be tempted to "straighten up" whitespace in the asms; the
+ assembler #NO_APP state mandates strict spacing). */
+/* ??? See the i386 regparm=3 implementation that pushes the static
+ chain value to the stack in the trampoline, and uses a call-saved
+ register when called directly. */
+
+static void
+cris_asm_trampoline_template (FILE *f)
+{
+ if (TARGET_V32)
+ {
+ /* This normally-unused nop insn acts as an instruction to
+ the simulator to flush its instruction cache. None of
+ the other instructions in the trampoline template suits
+ as a trigger for V32. The pc-relative addressing mode
+ works nicely as a trigger for V10.
+ FIXME: Have specific V32 template (possibly avoiding the
+ use of a special instruction). */
+ fprintf (f, "\tclearf x\n");
+ /* We have to use a register as an intermediate, choosing
+ semi-randomly R1 (which has to not be the STATIC_CHAIN_REGNUM),
+ so we can use it for address indirection and jsr target. */
+ fprintf (f, "\tmove $r1,$mof\n");
+ /* +4 */
+ fprintf (f, "\tmove.d 0,$r1\n");
+ fprintf (f, "\tmove.d $%s,[$r1]\n", reg_names[STATIC_CHAIN_REGNUM]);
+ fprintf (f, "\taddq 6,$r1\n");
+ fprintf (f, "\tmove $mof,[$r1]\n");
+ fprintf (f, "\taddq 6,$r1\n");
+ fprintf (f, "\tmove $srp,[$r1]\n");
+ /* +20 */
+ fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]);
+ /* +26 */
+ fprintf (f, "\tmove.d 0,$r1\n");
+ fprintf (f, "\tjsr $r1\n");
+ fprintf (f, "\tsetf\n");
+ /* +36 */
+ fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]);
+ /* +42 */
+ fprintf (f, "\tmove.d 0,$r1\n");
+ /* +48 */
+ fprintf (f, "\tmove.d 0,$r9\n");
+ fprintf (f, "\tjump $r9\n");
+ fprintf (f, "\tsetf\n");
+ }
+ else
+ {
+ fprintf (f, "\tmove.d $%s,[$pc+20]\n", reg_names[STATIC_CHAIN_REGNUM]);
+ fprintf (f, "\tmove $srp,[$pc+22]\n");
+ fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]);
+ fprintf (f, "\tjsr 0\n");
+ fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]);
+ fprintf (f, "\tjump 0\n");
+ }
+}
+
+/* Implement TARGET_TRAMPOLINE_INIT. */
+
+static void
+cris_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+{
+ rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
+ rtx tramp = XEXP (m_tramp, 0);
+ rtx mem;
+
+ emit_block_move (m_tramp, assemble_trampoline_template (),
+ GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
+
+ if (TARGET_V32)
+ {
+ mem = adjust_address (m_tramp, SImode, 6);
+ emit_move_insn (mem, plus_constant (Pmode, tramp, 38));
+ mem = adjust_address (m_tramp, SImode, 22);
+ emit_move_insn (mem, chain_value);
+ mem = adjust_address (m_tramp, SImode, 28);
+ emit_move_insn (mem, fnaddr);
+ }
+ else
+ {
+ mem = adjust_address (m_tramp, SImode, 10);
+ emit_move_insn (mem, chain_value);
+ mem = adjust_address (m_tramp, SImode, 16);
+ emit_move_insn (mem, fnaddr);
+ }
+
+ /* Note that there is no need to do anything with the cache for
+ sake of a trampoline. */
+}
+
+
+#if 0
+/* Various small functions to replace macros. Only called from a
+ debugger. They might collide with gcc functions or system functions,
+ so only emit them when '#if 1' above. */
+
+enum rtx_code Get_code (rtx);
+
+enum rtx_code
+Get_code (rtx x)
+{
+ return GET_CODE (x);
+}
+
+const char *Get_mode (rtx);
+
+const char *
+Get_mode (rtx x)
+{
+ return GET_MODE_NAME (GET_MODE (x));
+}
+
+rtx Xexp (rtx, int);
+
+rtx
+Xexp (rtx x, int n)
+{
+ return XEXP (x, n);
+}
+
+rtx Xvecexp (rtx, int, int);
+
+rtx
+Xvecexp (rtx x, int n, int m)
+{
+ return XVECEXP (x, n, m);
+}
+
+int Get_rtx_len (rtx);
+
+int
+Get_rtx_len (rtx x)
+{
+ return GET_RTX_LENGTH (GET_CODE (x));
+}
+
+/* Use upper-case to distinguish from local variables that are sometimes
+ called next_insn and prev_insn. */
+
+rtx Next_insn (rtx);
+
+rtx
+Next_insn (rtx insn)
+{
+ return NEXT_INSN (insn);
+}
+
+rtx Prev_insn (rtx);
+
+rtx
+Prev_insn (rtx insn)
+{
+ return PREV_INSN (insn);
+}
+#endif
+
+#include "gt-cris.h"
+
+/*
+ * Local variables:
+ * eval: (c-set-style "gnu")
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/gcc-4.8/gcc/config/cris/cris.h b/gcc-4.8/gcc/config/cris/cris.h
new file mode 100644
index 000000000..23ba467af
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/cris.h
@@ -0,0 +1,1081 @@
+/* Definitions for GCC. Part of the machine description for CRIS.
+ Copyright (C) 1998-2013 Free Software Foundation, Inc.
+ Contributed by Axis Communications. Written by Hans-Peter Nilsson.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* After the first "Node:" comment comes all preprocessor directives and
+ attached declarations described in the info files, the "Using and
+ Porting GCC" manual (uapgcc), in the same order as found in the "Target
+ macros" section in the gcc-2.9x CVS edition of 2000-03-17. FIXME: Not
+ really, but needs an update anyway.
+
+ There is no generic copy-of-uapgcc comment, you'll have to see uapgcc
+ for that. If applicable, there is a CRIS-specific comment. The order
+ of macro definitions follow the order in the manual. Every section in
+ the manual (node in the info pages) has an introductory `Node:
+ <subchapter>' comment. If no macros are defined for a section, only
+ the section-comment is present. */
+
+/* Note that other header files (e.g. config/elfos.h, config/linux.h,
+ and config/cris/linux.h) are responsible for lots of settings not
+ repeated below. This file contains general CRIS definitions
+ and definitions for the cris-*-elf subtarget. */
+
+/* We don't want to use gcc_assert for everything, as that can be
+ compiled out. */
+#define CRIS_ASSERT(x) \
+ do { if (!(x)) internal_error ("CRIS-port assertion failed: " #x); } while (0)
+
+/* Replacement for REG_P since it does not match SUBREGs. Happens for
+ testcase Axis-20000320 with gcc-2.9x. */
+#define REG_S_P(x) \
+ (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0))))
+
+/* Last register in main register bank r0..r15. */
+#define CRIS_LAST_GENERAL_REGISTER 15
+
+/* Descriptions of registers used for arguments. */
+#define CRIS_FIRST_ARG_REG 10
+#define CRIS_MAX_ARGS_IN_REGS 4
+
+/* See also *_REGNUM constants in cris.md. */
+
+/* Most of the time, we need the index into the register-names array.
+ When passing debug-info, we need the real hardware register number. */
+#define CRIS_CANONICAL_SRP_REGNUM (16 + 11)
+#define CRIS_CANONICAL_MOF_REGNUM (16 + 7)
+/* We have CCR in all models including v10, but that's 16 bits, so let's
+ prefer the DCCR number, which is a DMA pointer in pre-v8, so we'll
+ never clash with it for GCC purposes. */
+#define CRIS_CANONICAL_CC0_REGNUM (16 + 13)
+
+/* When generating PIC, these suffixes are added to the names of non-local
+ functions when being output. Contrary to other ports, we have offsets
+ relative to the GOT, not the PC. We might implement PC-relative PLT
+ semantics later for the general case; they are used in some cases right
+ now, such as MI thunks. */
+#define CRIS_GOTPLT_SUFFIX ":GOTPLT"
+#define CRIS_PLT_GOTOFFSET_SUFFIX ":PLTG"
+#define CRIS_PLT_PCOFFSET_SUFFIX ":PLT"
+
+#define CRIS_FUNCTION_ARG_SIZE(MODE, TYPE) \
+ ((MODE) != BLKmode ? GET_MODE_SIZE (MODE) \
+ : (unsigned) int_size_in_bytes (TYPE))
+
+/* Which CPU version this is. The parsed and adjusted cris_cpu_str. */
+extern int cris_cpu_version;
+
+/* Changing the order used to be necessary to put the fourth __make_dp
+ argument (a DImode parameter) in registers, to fit with the libfunc
+ parameter passing scheme used for intrinsic functions. FIXME: Check
+ performance. */
+#ifdef IN_LIBGCC2
+#define __make_dp(a,b,c,d) __cris_make_dp(d,a,b,c)
+#endif
+
+
+/* Node: Driver */
+
+/* Also provide canonical vN definitions when user specifies an alias. */
+
+#define CPP_SPEC \
+ "%{mtune=*:-D__tune_%* %{mtune=v*:-D__CRIS_arch_tune=%*}\
+ %{mtune=etrax4:-D__tune_v3 -D__CRIS_arch_tune=3}\
+ %{mtune=etrax100:-D__tune_v8 -D__CRIS_arch_tune=8}\
+ %{mtune=svinto:-D__tune_v8 -D__CRIS_arch_tune=8}\
+ %{mtune=etrax100lx:-D__tune_v10 -D__CRIS_arch_tune=10}\
+ %{mtune=ng:-D__tune_v10 -D__CRIS_arch_tune=10}}\
+ %{mcpu=*:-D__arch_%* %{mcpu=v*:-D__CRIS_arch_version=%*}\
+ %{mcpu=etrax4:-D__arch_v3 -D__CRIS_arch_version=3}\
+ %{mcpu=etrax100:-D__arch_v8 -D__CRIS_arch_version=8}\
+ %{mcpu=svinto:-D__arch_v8 -D__CRIS_arch_version=8}\
+ %{mcpu=etrax100lx:-D__arch_v10 -D__CRIS_arch_version=10}\
+ %{mcpu=ng:-D__arch_v10 -D__CRIS_arch_version=10}}\
+ %{march=*:-D__arch_%* %{march=v*:-D__CRIS_arch_version=%*}\
+ %{march=etrax4:-D__arch_v3 -D__CRIS_arch_version=3}\
+ %{march=etrax100:-D__arch_v8 -D__CRIS_arch_version=8}\
+ %{march=svinto:-D__arch_v8 -D__CRIS_arch_version=8}\
+ %{march=etrax100lx:-D__arch_v10 -D__CRIS_arch_version=10}\
+ %{march=ng:-D__arch_v10 -D__CRIS_arch_version=10}}\
+ %{metrax100:-D__arch__v8 -D__CRIS_arch_version=8}\
+ %{metrax4:-D__arch__v3 -D__CRIS_arch_version=3}\
+ %(cpp_subtarget)"
+
+/* For the cris-*-elf subtarget. */
+
+#define CRIS_DEFAULT_TUNE "10"
+#define CRIS_ARCH_CPP_DEFAULT
+#define CRIS_DEFAULT_ASM_ARCH_OPTION ""
+
+#ifdef TARGET_CPU_DEFAULT
+#if TARGET_CPU_DEFAULT != 32 && TARGET_CPU_DEFAULT != 10
+ #error "Due to '()'; e.g. '#define TARGET_CPU_DEFAULT (10)', stringize TARGET_CPU_DEFAULT isn't useful: update manually."
+#endif
+
+#if TARGET_CPU_DEFAULT == 32
+#undef CRIS_DEFAULT_TUNE
+#define CRIS_DEFAULT_TUNE "32"
+/* To enable use of "generic" cris-axis-elf binutils, always pass the
+ architecture option to GAS. (We don't do this for non-v32.) */
+#undef CRIS_DEFAULT_ASM_ARCH_OPTION
+#define CRIS_DEFAULT_ASM_ARCH_OPTION "--march=v32"
+#endif
+
+#undef CRIS_ARCH_CPP_DEFAULT
+#define CRIS_ARCH_CPP_DEFAULT \
+ "%{!march=*:\
+ %{!metrax*:\
+ %{!mcpu=*:\
+ %{!mtune=*:-D__tune_v" CRIS_DEFAULT_TUNE "}\
+ -D__arch_v"CRIS_DEFAULT_TUNE\
+ " -D__CRIS_arch_version=" CRIS_DEFAULT_TUNE "}}}"
+#endif
+
+#define CRIS_CPP_SUBTARGET_SPEC \
+ "%{mbest-lib-options:\
+ %{!moverride-best-lib-options:\
+ %{!march=*:%{!metrax*:%{!mcpu=*:\
+ -D__tune_v" CRIS_DEFAULT_TUNE \
+ " -D__CRIS_arch_tune=" CRIS_DEFAULT_TUNE "}}}}}"\
+ CRIS_ARCH_CPP_DEFAULT
+
+/* Override previous definitions (../linux.h). */
+#undef CC1_SPEC
+#define CC1_SPEC \
+ "%{metrax4:-march=v3}\
+ %{metrax100:-march=v8}\
+ %{march=*:-march=%*}\
+ %{mcpu=*:-mcpu=%*}\
+ %(cc1_subtarget)"
+
+/* For the cris-*-elf subtarget. */
+#define CRIS_CC1_SUBTARGET_SPEC \
+ "-melf\
+ %{mbest-lib-options:\
+ %{!moverride-best-lib-options:\
+ %{!march=*:%{!mcpu=*:-mtune=v" CRIS_DEFAULT_TUNE\
+ " -D__CRIS_arch_tune=" CRIS_DEFAULT_TUNE "}}\
+ %{!finhibit-size-directive:\
+ %{!fno-function-sections: -ffunction-sections}\
+ %{!fno-data-sections: -fdata-sections}}}}"
+
+/* This adds to CC1_SPEC. */
+#define CC1PLUS_SPEC ""
+
+#ifdef HAVE_AS_NO_MUL_BUG_ABORT_OPTION
+#define MAYBE_AS_NO_MUL_BUG_ABORT \
+ "%{mno-mul-bug-workaround:-no-mul-bug-abort} "
+#else
+#define MAYBE_AS_NO_MUL_BUG_ABORT
+#endif
+
+/* Override previous definitions (../linux.h). */
+#undef ASM_SPEC
+#define ASM_SPEC \
+ MAYBE_AS_NO_MUL_BUG_ABORT \
+ "%(asm_subtarget)\
+ %{march=*:%{mcpu=*:%edo not specify both -march=... and -mcpu=...}}\
+ %{march=v0|mcpu=v0|march=v3|mcpu=v3|march=v8|mcpu=v8:--march=v0_v10}\
+ %{march=v10|mcpu=v10:--march=v10}\
+ %{march=v32|mcpu=v32:--march=v32}"
+
+/* For the cris-*-elf subtarget. */
+#define CRIS_ASM_SUBTARGET_SPEC \
+ "--em=criself %{!march=*:%{!mcpu=*:" CRIS_DEFAULT_ASM_ARCH_OPTION "}}"
+
+/* FIXME: We should propagate the -melf option to make the criself
+ "emulation" unless a linker script is provided (-T*), but I don't know
+ how to do that if either of -Ttext, -Tdata or -Tbss is given but no
+ linker script, as is usually the case. Leave it to the user for the
+ time being. */
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{v:--verbose}\
+ %(link_subtarget)"
+
+/* For the cris-*-elf subtarget. */
+#define CRIS_LINK_SUBTARGET_SPEC \
+ "-mcriself\
+ %{sim2:%{!T*:-Tdata 0x4000000 -Tbss 0x8000000}}\
+ %{!r:%{O2|O3: --gc-sections}}"
+
+/* Which library to get. The simulator uses a different library for
+ the low-level syscalls (implementing the Linux syscall ABI instead
+ of direct-iron accesses). Default everything with the stub "nosys"
+ library. */
+/* Override previous definitions (linux.h). */
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{sim*:--start-group -lc -lsyslinux --end-group}\
+ %{!sim*:%{g*:-lg}\
+ %{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p} -lbsp}\
+ -lnosys"
+
+/* Linker startfile options; crt0 flavors.
+ We need to remove any previous definition (elfos.h). */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{sim*:crt1.o%s}%{!sim*:crt0.o%s}\
+ crti.o%s crtbegin.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+#define EXTRA_SPECS \
+ {"cpp_subtarget", CRIS_CPP_SUBTARGET_SPEC}, \
+ {"cc1_subtarget", CRIS_CC1_SUBTARGET_SPEC}, \
+ {"asm_subtarget", CRIS_ASM_SUBTARGET_SPEC}, \
+ {"link_subtarget", CRIS_LINK_SUBTARGET_SPEC}, \
+ CRIS_SUBTARGET_EXTRA_SPECS
+
+#define CRIS_SUBTARGET_EXTRA_SPECS
+
+
+/* Node: Run-time Target */
+
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("cris"); \
+ builtin_define_std ("CRIS"); \
+ builtin_define_std ("GNU_CRIS"); \
+ builtin_define ("__CRIS_ABI_version=2"); \
+ builtin_assert ("cpu=cris"); \
+ builtin_assert ("machine=cris"); \
+ } \
+ while (0)
+
+/* Previously controlled by target_flags. Note that this is *not* set
+ for -melinux. */
+#define TARGET_LINUX 0
+
+/* For the cris-*-elf subtarget. */
+#define CRIS_SUBTARGET_DEFAULT 0
+
+#define CRIS_CPU_BASE 0
+#define CRIS_CPU_ETRAX4 3 /* Just lz added. */
+#define CRIS_CPU_SVINTO 8 /* Added swap, jsrc & Co., 32-bit accesses. */
+#define CRIS_CPU_NG 10 /* Added mul[su]. */
+#define CRIS_CPU_V32 32 /* Major changes. */
+
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT CRIS_CPU_BASE
+#endif
+
+/* Default target_flags if no switches specified.
+ The alignment-by-32 is to make builtin atomic support for v10 and v32
+ work for *-elf for types without specified alignment (like plain
+ "int"). See top comment in sync.md. */
+#ifndef TARGET_DEFAULT
+# if TARGET_CPU_DEFAULT == 32
+# define TARGET_DEFAULT \
+ (MASK_STACK_ALIGN \
+ + MASK_CONST_ALIGN + MASK_DATA_ALIGN \
+ + MASK_ALIGN_BY_32 \
+ + MASK_PROLOGUE_EPILOGUE)
+# elif TARGET_CPU_DEFAULT == 10
+# define TARGET_DEFAULT \
+ (MASK_SIDE_EFFECT_PREFIXES + MASK_STACK_ALIGN \
+ + MASK_CONST_ALIGN + MASK_DATA_ALIGN \
+ + MASK_ALIGN_BY_32 \
+ + MASK_PROLOGUE_EPILOGUE + MASK_MUL_BUG)
+# else /* 0 */
+# define TARGET_DEFAULT \
+ (MASK_SIDE_EFFECT_PREFIXES + MASK_STACK_ALIGN \
+ + MASK_CONST_ALIGN + MASK_DATA_ALIGN \
+ + MASK_PROLOGUE_EPILOGUE + MASK_MUL_BUG)
+# endif
+#endif
+
+/* Local, providing a default for cris_cpu_version. */
+#define CRIS_DEFAULT_CPU_VERSION TARGET_CPU_DEFAULT
+
+#define TARGET_HAS_MUL_INSNS (cris_cpu_version >= CRIS_CPU_NG)
+#define TARGET_HAS_LZ (cris_cpu_version >= CRIS_CPU_ETRAX4)
+#define TARGET_HAS_BREAK (cris_cpu_version >= CRIS_CPU_ETRAX4)
+#define TARGET_HAS_SWAP (cris_cpu_version >= CRIS_CPU_SVINTO)
+#define TARGET_V32 (cris_cpu_version >= CRIS_CPU_V32)
+
+/* The "break" instruction was introduced with ETRAX 4. */
+#define TARGET_TRAP_USING_BREAK8 \
+ (cris_trap_using_break8 == 2 ? TARGET_HAS_BREAK : cris_trap_using_break8)
+
+/* Call library functions by default for GNU/Linux. */
+#define TARGET_ATOMICS_MAY_CALL_LIBFUNCS \
+ (cris_atomics_calling_libfunc == 2 \
+ ? TARGET_LINUX : cris_atomics_calling_libfunc)
+
+/* The < v10 atomics turn off interrupts, so they don't need alignment.
+ Incidentally, by default alignment is off there causing variables to
+ be default unaligned all over, so we'd have to make support
+ libraries use a proper atomic type (instead of "int"), one we'd
+ specify as aligned. */
+#define TARGET_TRAP_UNALIGNED_ATOMIC \
+ (cris_trap_unaligned_atomic == 2 \
+ ? (TARGET_V32 || cris_cpu_version == 10) \
+ : cris_trap_unaligned_atomic)
+
+/* Node: Storage Layout */
+
+#define BITS_BIG_ENDIAN 0
+
+#define BYTES_BIG_ENDIAN 0
+
+/* WORDS_BIG_ENDIAN is not defined in the hardware, but for consistency,
+ we use little-endianness, and we may also be able to use
+ post-increment on DImode indirect. */
+#define WORDS_BIG_ENDIAN 0
+
+#define UNITS_PER_WORD 4
+
+#define CRIS_PROMOTED_MODE(MODE, UNSIGNEDP, TYPE) \
+ (GET_MODE_CLASS (MODE) == MODE_INT && GET_MODE_SIZE (MODE) < 4) \
+ ? SImode : MODE
+
+/* We will be using prototype promotion, so they will be 32 bit. */
+#define PARM_BOUNDARY 32
+
+/* Stack boundary is guided by -mstack-align, -mno-stack-align,
+ -malign.
+ Old comment: (2.1: still valid in 2.7.2?)
+ Note that to make this macro affect the alignment of stack
+ locals, a fix was required, and special precautions when handling
+ the stack pointer in various other macros (TARGET_ASM_FUNCTION_PROLOGUE
+ et al) were required. See file "function.c". If you would just define
+ this macro, it would only affect the builtin alloca and variable
+ local data (non-ANSI, non-K&R, Gnu C extension). */
+#define STACK_BOUNDARY \
+ (TARGET_STACK_ALIGN ? (TARGET_ALIGN_BY_32 ? 32 : 16) : 8)
+
+#define FUNCTION_BOUNDARY 16
+
+/* Do not change BIGGEST_ALIGNMENT (when optimizing), as it will affect
+ strange places, at least in 2.1. */
+#define BIGGEST_ALIGNMENT 8
+
+/* If -m16bit, -m16-bit, -malign or -mdata-align,
+ align everything to 16 bit. */
+#define DATA_ALIGNMENT(TYPE, BASIC_ALIGN) \
+ (TARGET_DATA_ALIGN \
+ ? (TARGET_ALIGN_BY_32 \
+ ? (BASIC_ALIGN < 32 ? 32 : BASIC_ALIGN) \
+ : (BASIC_ALIGN < 16 ? 16 : BASIC_ALIGN)) : BASIC_ALIGN)
+
+/* Note that CONSTANT_ALIGNMENT has the effect of making gcc believe that
+ ALL references to constant stuff (in code segment, like strings) has
+ this alignment. That is a rather rushed assumption. Luckily we do not
+ care about the "alignment" operand to builtin memcpy (only place where
+ it counts), so it doesn't affect any bad spots. */
+#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \
+ (TARGET_CONST_ALIGN \
+ ? (TARGET_ALIGN_BY_32 \
+ ? (BASIC_ALIGN < 32 ? 32 : BASIC_ALIGN) \
+ : (BASIC_ALIGN < 16 ? 16 : BASIC_ALIGN)) : BASIC_ALIGN)
+
+/* FIXME: Define LOCAL_ALIGNMENT for word and dword or arrays and
+ structures (if -mstack-align=), and check that it is good. */
+
+#define EMPTY_FIELD_BOUNDARY 8
+
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+#define STRICT_ALIGNMENT 0
+
+/* Remove any previous definition (elfos.h).
+ ??? If it wasn't for all the other stuff that affects layout of
+ structures and bit-fields, this could presumably cause incompatibility
+ with other GNU/Linux ports (i.e. elfos.h users). */
+#undef PCC_BITFIELD_TYPE_MATTERS
+
+/* This is only used for non-scalars. Strange stuff happens to structs
+ (FIXME: What?) if we use anything larger than largest actually used
+ datum size, so lets make it 32. The type "long long" will still work
+ as usual. We can still have DImode insns, but they will only be used
+ for scalar data (i.e. long long). */
+#define MAX_FIXED_MODE_SIZE 32
+
+
+/* Node: Type Layout */
+
+/* Note that DOUBLE_TYPE_SIZE is not defined anymore, since the default
+ value gives a 64-bit double, which is what we now use. */
+
+/* For compatibility and historical reasons, a char should be signed. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* Note that WCHAR_TYPE_SIZE is used in cexp.y,
+ where TARGET_SHORT is not available. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+
+/* Node: Register Basics */
+
+/* We count all 16 non-special registers, SRP, a faked argument
+ pointer register, MOF and CCR/DCCR. */
+#define FIRST_PSEUDO_REGISTER (16 + 1 + 1 + 1 + 1)
+
+/* For CRIS, these are r15 (pc) and r14 (sp). Register r8 is used as a
+ frame-pointer, but is not fixed. SRP is not included in general
+ registers and will not be used automatically. All other special
+ registers are fixed at the moment. The faked argument pointer register
+ is fixed too. */
+#define FIXED_REGISTERS \
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1}
+
+/* Register r9 is used for structure-address, r10-r13 for parameters,
+ r10- for return values. */
+#define CALL_USED_REGISTERS \
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1}
+
+/* Node: Allocation Order */
+
+/* We need this on CRIS, because call-used regs should be used first,
+ (so we don't need to push). Else start using registers from r0 and up.
+ This preference is mainly because if we put call-used-regs from r0
+ and up, then we can't use movem to push the rest, (which have to be
+ saved if we use them, and movem has to start with r0).
+ Change here if you change which registers to use as call registers.
+
+ The actual need to explicitly prefer call-used registers improved the
+ situation a lot for 2.1, but might not actually be needed anymore.
+ Still, this order reflects what GCC should find out by itself, so it
+ probably does not hurt.
+
+ Order of preference: Call-used-regs first, then r0 and up, last fp &
+ sp & pc as fillers.
+ Call-used regs in opposite order, so they will cause less conflict if
+ a function has few args (<= 3) and it wants a scratch reg.
+ Use struct-return address first, since very few functions use
+ structure return values so it is likely to be available. */
+#define REG_ALLOC_ORDER \
+ {9, 13, 12, 11, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 14, 15, 17, 16, 18, 19}
+
+/* Use MOF and ACR. Prefer ACR before any other register. Prefer MOF
+ then SRP after saved registers. The *after* is because they're only
+ useful for storage, not for things being computed, which is
+ apparently more common. */
+#define REG_ALLOC_ORDER_V32 \
+ {15, 9, 13, 12, 11, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 17, 16, 14, 18, 19}
+
+
+/* Node: Values in Registers */
+
+/* The VOIDmode test is so we can omit mode on anonymous insns. FIXME:
+ Still needed in 2.9x, at least for Axis-20000319. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (MODE == VOIDmode \
+ ? 1 : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* CRIS permits all registers to hold all modes. Well, except for the
+ condition-code register. And we can't hold larger-than-register size
+ modes in the last special register that can hold a full 32 bits. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ (((MODE) == CCmode \
+ || (REGNO) != CRIS_CC0_REGNUM) \
+ && (GET_MODE_SIZE (MODE) <= UNITS_PER_WORD \
+ || ((REGNO) != CRIS_MOF_REGNUM && (REGNO) != CRIS_ACR_REGNUM)))
+
+/* Because CCmode isn't covered by the "narrower mode" statement in
+ tm.texi, we can still say all modes are tieable despite not having an
+ always 1 HARD_REGNO_MODE_OK. */
+#define MODES_TIEABLE_P(MODE1, MODE2) 1
+
+
+/* Node: Leaf Functions */
+/* (no definitions) */
+
+/* Node: Stack Registers */
+/* (no definitions) */
+
+
+/* Node: Register Classes */
+
+/* We need a separate register class to handle register allocation for
+ ACR, since it can't be used for post-increment.
+
+ It's not obvious, but having subunions of all movable-between
+ register classes does really help register allocation (pre-IRA
+ comment). */
+enum reg_class
+ {
+ NO_REGS,
+ ACR_REGS, MOF_REGS, SRP_REGS, CC0_REGS,
+ MOF_SRP_REGS, SPECIAL_REGS,
+ SPEC_ACR_REGS, GENNONACR_REGS,
+ SPEC_GENNONACR_REGS, GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+ };
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+ {"NO_REGS", \
+ "ACR_REGS", "MOF_REGS", "SRP_REGS", "CC0_REGS", \
+ "MOF_SRP_REGS", "SPECIAL_REGS", \
+ "SPEC_ACR_REGS", "GENNONACR_REGS", "SPEC_GENNONACR_REGS", \
+ "GENERAL_REGS", "ALL_REGS"}
+
+#define CRIS_SPECIAL_REGS_CONTENTS \
+ ((1 << CRIS_SRP_REGNUM) | (1 << CRIS_MOF_REGNUM) | (1 << CRIS_CC0_REGNUM))
+
+/* Count in the faked argument register in GENERAL_REGS. Keep out SRP. */
+#define REG_CLASS_CONTENTS \
+ { \
+ {0}, \
+ {1 << CRIS_ACR_REGNUM}, \
+ {1 << CRIS_MOF_REGNUM}, \
+ {1 << CRIS_SRP_REGNUM}, \
+ {1 << CRIS_CC0_REGNUM}, \
+ {(1 << CRIS_MOF_REGNUM) \
+ | (1 << CRIS_SRP_REGNUM)}, \
+ {CRIS_SPECIAL_REGS_CONTENTS}, \
+ {CRIS_SPECIAL_REGS_CONTENTS \
+ | (1 << CRIS_ACR_REGNUM)}, \
+ {(0xffff | (1 << CRIS_AP_REGNUM)) \
+ & ~(1 << CRIS_ACR_REGNUM)}, \
+ {(0xffff | (1 << CRIS_AP_REGNUM) \
+ | CRIS_SPECIAL_REGS_CONTENTS) \
+ & ~(1 << CRIS_ACR_REGNUM)}, \
+ {0xffff | (1 << CRIS_AP_REGNUM)}, \
+ {0xffff | (1 << CRIS_AP_REGNUM) \
+ | CRIS_SPECIAL_REGS_CONTENTS} \
+ }
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == CRIS_ACR_REGNUM ? ACR_REGS : \
+ (REGNO) == CRIS_MOF_REGNUM ? MOF_REGS : \
+ (REGNO) == CRIS_SRP_REGNUM ? SRP_REGS : \
+ (REGNO) == CRIS_CC0_REGNUM ? CC0_REGS : \
+ GENERAL_REGS)
+
+#define BASE_REG_CLASS GENERAL_REGS
+
+#define MODE_CODE_BASE_REG_CLASS(MODE, AS, OCODE, ICODE) \
+ ((OCODE) != POST_INC ? BASE_REG_CLASS : GENNONACR_REGS)
+
+#define INDEX_REG_CLASS GENERAL_REGS
+
+/* Since it uses reg_renumber, it is safe only once reg_renumber
+ has been allocated, which happens in reginfo.c during register
+ allocation. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) <= CRIS_LAST_GENERAL_REGISTER \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] <= CRIS_LAST_GENERAL_REGISTER \
+ || (unsigned) reg_renumber[REGNO] == ARG_POINTER_REGNUM)
+
+/* REGNO_OK_FOR_BASE_P seems to be obsolete wrt. this one, but not yet
+ documented as such. */
+#define REGNO_MODE_CODE_OK_FOR_BASE_P(REGNO, MODE, AS, OCODE, ICODE) \
+ (REGNO_OK_FOR_BASE_P (REGNO) \
+ && ((OCODE) != POST_INC \
+ || !((REGNO) == CRIS_ACR_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == CRIS_ACR_REGNUM)))
+
+/* See REGNO_OK_FOR_BASE_P. */
+#define REGNO_OK_FOR_INDEX_P(REGNO) REGNO_OK_FOR_BASE_P(REGNO)
+
+/* We can't move special registers to and from memory in smaller than
+ word_mode. We also can't move between special registers. Luckily,
+ -1, as returned by true_regnum for non-sub/registers, is valid as a
+ parameter to our REGNO_REG_CLASS, returning GENERAL_REGS, so we get
+ the effect that any X that isn't a special-register is treated as
+ a non-empty intersection with GENERAL_REGS. */
+#define SECONDARY_RELOAD_CLASS(CLASS, MODE, X) \
+ ((reg_class_subset_p (CLASS, SPECIAL_REGS) \
+ && ((GET_MODE_SIZE (MODE) < 4 && MEM_P (X)) \
+ || !reg_classes_intersect_p (REGNO_REG_CLASS (true_regnum (X)), \
+ GENERAL_REGS))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* FIXME: Fix regrename.c; it should check validity of replacements,
+ not just with a silly pass-specific macro. We may miss some
+ opportunities, but we must stop regrename from creating acr++. */
+#define HARD_REGNO_RENAME_OK(FROM, TO) ((TO) != CRIS_ACR_REGNUM)
+
+/* For CRIS, this is always the size of MODE in words,
+ since all registers are the same size. To use omitted modes in
+ patterns with reload constraints, you must say the widest size
+ which is allowed for VOIDmode.
+ FIXME: Does that still apply for gcc-2.9x? Keep poisoned until such
+ patterns are added back. News: 2001-03-16: Happens as early as the
+ underscore-test. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((MODE) == VOIDmode \
+ ? 1 /* + cris_fatal ("CLASS_MAX_NREGS with VOIDmode") */ \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+
+/* Node: Frame Layout */
+
+#define STACK_GROWS_DOWNWARD
+#define FRAME_GROWS_DOWNWARD 1
+
+/* It seems to be indicated in the code (at least 2.1) that this is
+ better a constant, and best 0. */
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) \
+ cris_return_addr_rtx (COUNT, FRAMEADDR)
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, CRIS_SRP_REGNUM)
+
+/* FIXME: Any __builtin_eh_return callers must not return anything and
+ there must not be collisions with incoming parameters. Luckily the
+ number of __builtin_eh_return callers is limited. For now return
+ parameter registers in reverse order and hope for the best. */
+#define EH_RETURN_DATA_REGNO(N) \
+ (IN_RANGE ((N), 0, 3) ? (CRIS_FIRST_ARG_REG + 3 - (N)) : INVALID_REGNUM)
+
+/* Store the stack adjustment in the structure-return-address register. */
+#define CRIS_STACKADJ_REG CRIS_STRUCT_VALUE_REGNUM
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (SImode, CRIS_STACKADJ_REG)
+
+#define EH_RETURN_HANDLER_RTX \
+ cris_return_addr_rtx (0, NULL)
+
+#define INIT_EXPANDERS cris_init_expanders ()
+
+/* FIXME: Move this to right node (it's not documented properly yet). */
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (CRIS_SRP_REGNUM)
+
+/* FIXME: Move this to right node (it's not documented properly yet).
+ FIXME: Check what alignment we can assume regarding
+ TARGET_STACK_ALIGN and TARGET_ALIGN_BY_32. */
+#define DWARF_CIE_DATA_ALIGNMENT -1
+
+/* If we would ever need an exact mapping between canonical register
+ number and dwarf frame register, we would either need to include all
+ registers in the gcc description (with some marked fixed of course), or
+ an inverse mapping from dwarf register to gcc register. There is one
+ need in dwarf2out.c:expand_builtin_init_dwarf_reg_sizes. Right now, I
+ don't see that we need exact correspondence between DWARF *frame*
+ registers and DBX_REGISTER_NUMBER, so map them onto GCC registers. */
+#define DWARF_FRAME_REGNUM(REG) (REG)
+
+/* Node: Stack Checking */
+/* (no definitions) FIXME: Check. */
+
+/* Node: Frame Registers */
+
+#define STACK_POINTER_REGNUM CRIS_SP_REGNUM
+
+/* Register used for frame pointer. This is also the last of the saved
+ registers, when a frame pointer is not used. */
+#define FRAME_POINTER_REGNUM CRIS_FP_REGNUM
+
+/* Faked register, is always eliminated. We need it to eliminate
+ allocating stack slots for the return address and the frame pointer. */
+#define ARG_POINTER_REGNUM CRIS_AP_REGNUM
+
+#define STATIC_CHAIN_REGNUM CRIS_STATIC_CHAIN_REGNUM
+
+
+/* Node: Elimination */
+
+#define ELIMINABLE_REGS \
+ {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = cris_initial_elimination_offset (FROM, TO)
+
+
+/* Node: Stack Arguments */
+
+/* Since many parameters take up one register each in any case,
+ defining TARGET_PROMOTE_PROTOTYPES that always returns true would
+ seem like a good idea, but measurements indicate that a combination
+ using PROMOTE_MODE is better. */
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+
+/* Node: Register Arguments */
+
+/* Contrary to what you'd believe, defining FUNCTION_ARG_CALLEE_COPIES
+ seems like a (small total) loss, at least for gcc-2.7.2 compiling and
+ running gcc-2.1 (small win in size, small loss running -- 100.1%),
+ and similarly for size for products (.1 .. .3% bloat, sometimes win).
+ Due to the empirical likeliness of making slower code, it is not
+ defined. */
+
+/* This no longer *needs* to be a structure; but keeping it as such should
+ not hurt (and hacking the ABI is simpler). */
+#define CUMULATIVE_ARGS struct cum_args
+struct cum_args {int regs;};
+
+/* The regs member is an integer, the number of arguments got into
+ registers so far. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+ ((CUM).regs = 0)
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= CRIS_FIRST_ARG_REG \
+ && (REGNO) < CRIS_FIRST_ARG_REG + (CRIS_MAX_ARGS_IN_REGS))
+
+
+/* Node: Aggregate Return */
+
+#define CRIS_STRUCT_VALUE_REGNUM ((CRIS_FIRST_ARG_REG) - 1)
+
+
+/* Node: Caller Saves */
+/* (no definitions) */
+
+/* Node: Function entry */
+
+/* See cris.c for TARGET_ASM_FUNCTION_PROLOGUE and
+ TARGET_ASM_FUNCTION_EPILOGUE. */
+
+/* Node: Profiling */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ error ("no FUNCTION_PROFILER for CRIS")
+
+/* FIXME: Some of the undefined macros might be mandatory. If so, fix
+ documentation. */
+
+
+/* Node: Trampolines */
+
+#define TRAMPOLINE_SIZE (TARGET_V32 ? 58 : 32)
+
+/* CRIS wants instructions on word-boundary. */
+#define TRAMPOLINE_ALIGNMENT 16
+
+/* Node: Library Calls */
+
+/* If you change this, you have to check whatever libraries and systems
+ that use it. */
+#define TARGET_EDOM 33
+
+
+/* Node: Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (CONSTANT_P (X) && cris_legitimate_address_p (QImode, X, false))
+
+/* Must be a compile-time constant, so we go with the highest value
+ among all CRIS variants. */
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Fix reloads known to cause suboptimal spilling. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, INDL, WIN) \
+ do \
+ { \
+ if (cris_reload_address_legitimized (X, MODE, OPNUM, TYPE, INDL)) \
+ goto WIN; \
+ } \
+ while (0)
+
+
+/* Node: Condition Code */
+
+#define NOTICE_UPDATE_CC(EXP, INSN) cris_notice_update_cc (EXP, INSN)
+
+/* FIXME: Maybe define CANONICALIZE_COMPARISON later, when playing with
+ optimizations. It is needed; currently we do this with instruction
+ patterns and NOTICE_UPDATE_CC. */
+
+
+/* Node: Costs */
+
+/* Regardless of the presence of delay slots, the default value of 1 for
+ BRANCH_COST is the best in the range (1, 2, 3), tested with gcc-2.7.2
+ with testcases ipps and gcc, giving smallest and fastest code. */
+
+#define SLOW_BYTE_ACCESS 0
+
+/* This is the threshold *below* which inline move sequences of
+ word-length sizes will be emitted. The "9" will translate to
+ (9 - 1) * 4 = 32 bytes maximum moved, but using 16 instructions
+ (8 instruction sequences) or less. */
+#define MOVE_RATIO(speed) 9
+
+
+/* Node: Sections */
+
+#define TEXT_SECTION_ASM_OP "\t.text"
+
+#define DATA_SECTION_ASM_OP "\t.data"
+
+/* The jump table is immediately connected to the preceding insn. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+
+/* Node: PIC */
+
+/* Helper type. */
+
+enum cris_pic_symbol_type
+ {
+ cris_no_symbol = 0,
+ cris_got_symbol = 1,
+ cris_rel_symbol = 2,
+ cris_got_symbol_needing_fixup = 3,
+ cris_invalid_pic_symbol = 4
+ };
+
+#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? CRIS_GOT_REGNUM : INVALID_REGNUM)
+
+#define LEGITIMATE_PIC_OPERAND_P(X) cris_legitimate_pic_operand (X)
+
+
+/* Node: File Framework */
+
+/* We don't want an .ident for gcc. To avoid that but still support
+ #ident, we override TARGET_ASM_OUTPUT_IDENT and, since the gcc .ident
+ is its only use besides front-end .ident directives, we return if
+ the state if the cgraph is not CGRAPH_STATE_PARSING. */
+#undef TARGET_ASM_OUTPUT_IDENT
+#define TARGET_ASM_OUTPUT_IDENT cris_asm_output_ident
+
+#define ASM_APP_ON "#APP\n"
+
+#define ASM_APP_OFF "#NO_APP\n"
+
+
+/* Node: Data Output */
+
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) (C) == '@'
+
+/* Node: Uninitialized Data */
+
+/* Remember to round off odd values if we want data alignment,
+ since we cannot do that with an .align directive.
+
+ Using .comm causes the space not to be reserved in .bss, but by
+ tricks with the symbol type. Not good if other tools than binutils
+ are used on the object files. Since ".global ... .lcomm ..." works, we
+ use that. Use .._ALIGNED_COMMON, since gcc whines when we only have
+ ..._COMMON, and we prefer to whine ourselves; BIGGEST_ALIGNMENT is not
+ the one to check. */
+/* FIXME: I suspect a bug in gcc with alignment. Do not warn until
+ investigated; it mucks up the testsuite results. */
+#define CRIS_ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN, LOCAL) \
+ do \
+ { \
+ int align_ = (ALIGN) / BITS_PER_UNIT; \
+ if (TARGET_DATA_ALIGN && TARGET_ALIGN_BY_32 && align_ < 4) \
+ align_ = 4; \
+ else if (TARGET_DATA_ALIGN && align_ < 2) \
+ align_ = 2; \
+ /* FIXME: Do we need this? */ \
+ else if (align_ < 1) \
+ align_ = 1; \
+ \
+ if (LOCAL) \
+ { \
+ fprintf ((FILE), "%s", LOCAL_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "\n"); \
+ } \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%u,%u\n", (int)(SIZE), align_); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN) \
+ CRIS_ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN, 0)
+
+#undef ASM_OUTPUT_ALIGNED_DECL_LOCAL
+#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \
+ CRIS_ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN, 1)
+
+/* Node: Label Output */
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.global "
+
+#define SUPPORTS_WEAK 1
+
+#define ASM_OUTPUT_SYMBOL_REF(STREAM, SYM) \
+ cris_asm_output_symbol_ref (STREAM, SYM)
+
+#define ASM_OUTPUT_LABEL_REF(STREAM, BUF) \
+ cris_asm_output_label_ref (STREAM, BUF)
+
+/* Remove any previous definition (elfos.h). */
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \
+ sprintf (LABEL, "*%s%s%ld", LOCAL_LABEL_PREFIX, PREFIX, (long) NUM)
+
+/* Node: Initialization */
+/* (no definitions) */
+
+/* Node: Macros for Initialization */
+/* (no definitions) */
+
+/* Node: Instruction Output */
+
+#define REGISTER_NAMES \
+ {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", \
+ "r9", "r10", "r11", "r12", "r13", "sp", "acr", "srp", "mof", "faked_ap", "dccr"}
+
+#define ADDITIONAL_REGISTER_NAMES \
+ {{"r14", 14}, {"r15", 15}, {"pc", 15}}
+
+/* Output an empty line to illustrate the presence of the delay slot. */
+#define DBR_OUTPUT_SEQEND(FILE) \
+ fprintf (FILE, "\n")
+
+#define LOCAL_LABEL_PREFIX "."
+
+/* cppinit.c initializes a const array from this, so it must be constant,
+ can't have it different based on options. Luckily, the prefix is
+ always allowed, so let's have it on all GCC-generated code. Note that
+ we have this verbatim everywhere in the back-end, not using %R or %s or
+ such. */
+#define REGISTER_PREFIX "$"
+
+/* Remove any previous definition (elfos.h). */
+/* We use -fno-leading-underscore to remove it, when necessary. */
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+#define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \
+ fprintf (FILE, \
+ TARGET_V32 \
+ ? "\tsubq 4,$sp\n\tmove $%s,[$sp]\n" : "\tpush $%s\n", \
+ reg_names[REGNO])
+
+#define ASM_OUTPUT_REG_POP(FILE, REGNO) \
+ fprintf (FILE, "\tmove [$sp+],$%s\n", reg_names[REGNO])
+
+
+/* Node: Dispatch Tables */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ do \
+ { \
+ if (TARGET_V32) \
+ asm_fprintf (FILE, "\t.word %LL%d-.\n", VALUE); \
+ else \
+ asm_fprintf (FILE, "\t.word %LL%d-%LL%d\n", VALUE, REL); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ asm_fprintf (FILE, "\t.dword %LL%d\n", VALUE)
+
+/* Defined to also emit an .align in elfos.h. We don't want that. */
+#undef ASM_OUTPUT_CASE_LABEL
+
+/* Since the "bound" insn loads the comparison value if the compared<
+ value (register) is out of bounds (0..comparison value-1), we need
+ to output another case to catch it.
+ The way to find it is to look for the label_ref at the else-arm inside
+ the expanded casesi core-insn.
+ FIXME: Check this construct when changing to new version of gcc. */
+#define ASM_OUTPUT_CASE_END(STREAM, NUM, TABLE) \
+ cris_asm_output_case_end (STREAM, NUM, TABLE)
+
+
+/* Node: Exception Region Output */
+/* (no definitions) */
+/* FIXME: Fill in with our own optimized layout. */
+
+/* Node: Alignment Output */
+
+#define ASM_OUTPUT_ALIGN(FILE, LOG) \
+ fprintf (FILE, "\t.align %d\n", (LOG))
+
+
+/* Node: All Debuggers */
+
+#define DBX_REGISTER_NUMBER(REGNO) \
+ ((REGNO) == CRIS_SRP_REGNUM ? CRIS_CANONICAL_SRP_REGNUM : \
+ (REGNO) == CRIS_MOF_REGNUM ? CRIS_CANONICAL_MOF_REGNUM : \
+ (REGNO) == CRIS_CC0_REGNUM ? CRIS_CANONICAL_CC0_REGNUM : \
+ (REGNO))
+
+/* FIXME: Investigate DEBUGGER_AUTO_OFFSET, DEBUGGER_ARG_OFFSET. */
+
+
+/* Node: DBX Options */
+
+/* Is this correct? Check later. */
+#define DBX_NO_XREFS
+
+#define DBX_CONTIN_LENGTH 0
+
+/* FIXME: Is this needed when we have 0 DBX_CONTIN_LENGTH? */
+#define DBX_CONTIN_CHAR '?'
+
+
+/* Node: DBX Hooks */
+/* (no definitions) */
+
+/* Node: File names and DBX */
+/* (no definitions) */
+
+
+/* Node: SDB and DWARF */
+/* (no definitions) */
+
+/* Node: Misc */
+
+/* A combination of the bound (umin) insn together with a
+ sign-extended add via the table to PC seems optimal.
+ If the table overflows, the assembler will take care of it.
+ Theoretically, in extreme cases (uncertain if they occur), an error
+ will be emitted, so FIXME: Check how large case-tables are emitted,
+ possible add an option to emit SImode case-tables. */
+#define CASE_VECTOR_MODE HImode
+
+#define CASE_VECTOR_PC_RELATIVE 1
+
+/* FIXME: Investigate CASE_VECTOR_SHORTEN_MODE to make sure HImode is not
+ used when broken-.word could possibly fail (plus testcase). */
+
+/* This is the number of bytes that can be moved in one
+ reasonably fast instruction sequence. For CRIS, this is two
+ instructions: mem => reg, reg => mem. */
+#define MOVE_MAX 4
+
+/* Maybe SHIFT_COUNT_TRUNCATED is safe to define? FIXME: Check later. */
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1)
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1)
+
+#define Pmode SImode
+
+#define FUNCTION_MODE QImode
+
+#define NO_IMPLICIT_EXTERN_C
+
+/*
+ * Local variables:
+ * eval: (c-set-style "gnu")
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/gcc-4.8/gcc/config/cris/cris.md b/gcc-4.8/gcc/config/cris/cris.md
new file mode 100644
index 000000000..8a7f0bfdd
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/cris.md
@@ -0,0 +1,5156 @@
+;; GCC machine description for CRIS cpu cores.
+;; Copyright (C) 1998-2013 Free Software Foundation, Inc.
+;; Contributed by Axis Communications.
+
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; The original PO technology requires these to be ordered by speed,
+;; so that assigner will pick the fastest.
+
+;; See files "md.texi" and "rtl.def" for documentation on define_insn,
+;; match_*, et. al.
+;;
+;; The function cris_notice_update_cc in cris.c handles condition code
+;; updates for most instructions, helped by the "cc" attribute.
+
+;; There are several instructions that are orthogonal in size, and seems
+;; they could be matched by a single pattern without a specified size
+;; for the operand that is orthogonal. However, this did not work on
+;; gcc-2.7.2 (and probably not on gcc-2.8.1), relating to that when a
+;; constant is substituted into an operand, the actual mode must be
+;; deduced from the pattern. There is reasonable hope that that has been
+;; fixed, so FIXME: try again.
+
+;; You will notice that three-operand alternatives ("=r", "r", "!To")
+;; are marked with a "!" constraint modifier to avoid being reloaded
+;; into. This is because gcc would otherwise prefer to use the constant
+;; pool and its offsettable address instead of reloading to an
+;; ("=r", "0", "i") alternative. Also, the constant-pool support was not
+;; only suboptimal but also buggy in 2.7.2, ??? maybe only in 2.6.3.
+
+;; All insns that look like (set (...) (plus (...) (reg:SI 8)))
+;; get problems when reloading r8 (frame pointer) to r14 + offs (stack
+;; pointer). Thus the instructions that get into trouble have specific
+;; checks against matching frame_pointer_rtx.
+;; ??? But it should be re-checked for gcc > 2.7.2
+;; FIXME: This changed some time ago (from 2000-03-16) for gcc-2.9x.
+
+;; FIXME: When PIC, all [rX=rY+S] could be enabled to match
+;; [rX=gotless_symbol].
+;; The movsi for a gotless symbol could be split (post reload).
+
+
+(define_c_enum ""
+ [
+ ;; PLT reference from call expansion: operand 0 is the address,
+ ;; the mode is VOIDmode. Always wrapped in CONST.
+ ;; The value is relative to the GOT.
+ CRIS_UNSPEC_PLT_GOTREL
+
+ ;; PLT reference from call expansion: operand 0 is the address,
+ ;; the mode is VOIDmode. Always wrapped in CONST.
+ ;; The value is relative to the PC. It's arch-dependent whether
+ ;; the offset counts from the start or the end of the current item.
+ CRIS_UNSPEC_PLT_PCREL
+
+ ;; The address of the global offset table as a source operand.
+ CRIS_UNSPEC_GOT
+
+ ;; The offset from the global offset table to the operand.
+ CRIS_UNSPEC_GOTREL
+
+ ;; The PC-relative offset to the operand. It's arch-dependent whether
+ ;; the offset counts from the start or the end of the current item.
+ CRIS_UNSPEC_PCREL
+
+ ;; The index into the global offset table of a symbol, while
+ ;; also generating a GOT entry for the symbol.
+ CRIS_UNSPEC_GOTREAD
+
+ ;; Similar to CRIS_UNSPEC_GOTREAD, but also generating a PLT entry.
+ CRIS_UNSPEC_PLTGOTREAD
+
+ ;; Condition for v32 casesi jump, since it needs to have if_then_else
+ ;; form with register as one branch and default label as other.
+ ;; Operand 0 is const_int 0.
+ CRIS_UNSPEC_CASESI
+
+ ;; Stack frame deallocation barrier.
+ CRIS_UNSPEC_FRAME_DEALLOC
+
+ ;; Swap all 32 bits of the operand; 31 <=> 0, 30 <=> 1...
+ CRIS_UNSPEC_SWAP_BITS
+ ])
+
+;; Register numbers.
+(define_constants
+ [(CRIS_GOT_REGNUM 0)
+ (CRIS_STATIC_CHAIN_REGNUM 7)
+ (CRIS_FP_REGNUM 8)
+ (CRIS_SP_REGNUM 14)
+ (CRIS_ACR_REGNUM 15)
+ (CRIS_SRP_REGNUM 16)
+ (CRIS_MOF_REGNUM 17)
+ (CRIS_AP_REGNUM 18)
+ (CRIS_CC0_REGNUM 19)]
+)
+
+;; We need an attribute to define whether an instruction can be put in
+;; a branch-delay slot or not, and whether it has a delay slot.
+;;
+;; Branches and return instructions have a delay slot, and cannot
+;; themselves be put in a delay slot. This has changed *for short
+;; branches only* between architecture variants, but the possible win
+;; is presumed negligible compared to the added complexity of the machine
+;; description: one would have to add always-correct infrastructure to
+;; distinguish short branches.
+;;
+;; Whether an instruction can be put in a delay slot depends on the
+;; instruction (all short instructions except jumps and branches)
+;; and the addressing mode (must not be prefixed or referring to pc).
+;; In short, any "slottable" instruction must be 16 bit and not refer
+;; to pc, or alter it.
+;;
+;; The possible values are "yes", "no", "has_slot", "has_return_slot"
+;; and "has_call_slot".
+;; Yes/no tells whether the insn is slottable or not. Has_call_slot means
+;; that the insn is a call insn, which for CRIS v32 has a delay-slot.
+;; Of special concern is that no RTX_FRAME_RELATED insn must go in that
+;; call delay slot, as it's located in the address *after* the call insn,
+;; and the unwind machinery doesn't know about delay slots.
+;; Has_slot means that the insn is a branch insn (which are
+;; not considered slottable since that is generally true). Having the
+;; seemingly illogical value "has_slot" means we do not have to add
+;; another attribute just to say that an insn has a delay-slot, since it
+;; also infers that it is not slottable. Better names for the attribute
+;; were found to be longer and not add readability to the machine
+;; description.
+;; Has_return_slot is similar, for the return insn.
+;;
+;; The default that is defined here for this attribute is "no", not
+;; slottable, not having a delay-slot, so there's no need to worry about
+;; it being wrong for non-branch and return instructions.
+;; The default could depend on the kind of insn and the addressing
+;; mode, but that would need more attributes and hairier, more error
+;; prone code.
+;;
+;; There is an extra memory constraint, 'Q', which recognizes an indirect
+;; register. The constraints 'Q' and '>' together match all possible
+;; memory operands that are slottable.
+;; For other operands, you need to check if it has a valid "slottable"
+;; quick-immediate operand, where the particular signedness-variation
+;; may match the constraints 'I' or 'J'.), and include it in the
+;; constraint pattern for the slottable pattern. An alternative using
+;; only "r" constraints is most often slottable.
+
+(define_attr "slottable" "no,yes,has_slot,has_return_slot,has_call_slot"
+ (const_string "no"))
+
+;; We also need attributes to sanely determine the condition code
+;; state. See cris_notice_update_cc for how this is used.
+
+(define_attr "cc" "none,clobber,normal,noov32,rev" (const_string "normal"))
+
+;; At the moment, this attribute is just used to help bb-reorder do its
+;; work; the default 0 doesn't help it. Many insns have other lengths,
+;; though none are shorter.
+(define_attr "length" "" (const_int 2))
+
+;; A branch has one delay-slot. The instruction in the
+;; delay-slot is always executed, independent of whether the branch is
+;; taken or not. Note that besides setting "slottable" to "has_slot",
+;; there also has to be a "%#" at the end of a "delayed" instruction
+;; output pattern (for "jump" this means "ba %l0%#"), so print_operand can
+;; catch it and print a "nop" if necessary. This method was stolen from
+;; sparc.md.
+
+(define_delay (eq_attr "slottable" "has_slot")
+ [(eq_attr "slottable" "yes") (nil) (nil)])
+
+;; We can't put prologue insns in call-insn delay-slots when
+;; DWARF2 unwind info is emitted, because the unwinder matches the
+;; address after the insn. It must see the return address of a call at
+;; a position at least *one byte after* the insn, or it'll think that
+;; the insn hasn't been executed. If the insn is in a delay-slot of a
+;; call, it's just *exactly* after the insn.
+
+(define_delay (eq_attr "slottable" "has_call_slot")
+ [(and (eq_attr "slottable" "yes")
+ (ior (not (match_test "RTX_FRAME_RELATED_P (insn)"))
+ (not (match_test "flag_exceptions"))))
+ (nil) (nil)])
+
+;; The insn in the return insn slot must not be the
+;; return-address-register restore. FIXME: Use has_slot and express
+;; as a parallel with a use of the return-address-register (currently
+;; only SRP). However, this requires an amount of fixing tests for
+;; naked RETURN in middle-end.
+(define_delay (eq_attr "slottable" "has_return_slot")
+ [(and (eq_attr "slottable" "yes")
+ (not (match_test "dead_or_set_regno_p (insn, CRIS_SRP_REGNUM)")))
+ (nil) (nil)])
+
+
+;; Iterator definitions.
+
+;; For the "usual" pattern size alternatives.
+(define_mode_iterator BWD [SI HI QI])
+(define_mode_iterator WD [SI HI])
+(define_mode_iterator BW [HI QI])
+(define_mode_attr S [(SI "HI") (HI "QI")])
+(define_mode_attr s [(SI "hi") (HI "qi")])
+(define_mode_attr m [(SI ".d") (HI ".w") (QI ".b")])
+(define_mode_attr mm [(SI ".w") (HI ".b")])
+(define_mode_attr nbitsm1 [(SI "31") (HI "15") (QI "7")])
+
+;; For the sign_extend+zero_extend variants.
+(define_code_iterator szext [sign_extend zero_extend])
+(define_code_attr u [(sign_extend "") (zero_extend "u")])
+(define_code_attr su [(sign_extend "s") (zero_extend "u")])
+
+;; For the shift variants.
+(define_code_iterator shift [ashiftrt lshiftrt ashift])
+(define_code_iterator shiftrt [ashiftrt lshiftrt])
+(define_code_attr shlr [(ashiftrt "ashr") (lshiftrt "lshr") (ashift "ashl")])
+(define_code_attr slr [(ashiftrt "asr") (lshiftrt "lsr") (ashift "lsl")])
+
+(define_code_iterator ncond [eq ne gtu ltu geu leu])
+(define_code_iterator ocond [gt le])
+(define_code_iterator rcond [lt ge])
+(define_code_attr CC [(eq "eq") (ne "ne") (gt "gt") (gtu "hi") (lt "lt")
+ (ltu "lo") (ge "ge") (geu "hs") (le "le") (leu "ls")])
+(define_code_attr rCC [(eq "ne") (ne "eq") (gt "le") (gtu "ls") (lt "ge")
+ (ltu "hs") (ge "lt") (geu "lo") (le "gt") (leu "hi")])
+(define_code_attr oCC [(lt "mi") (ge "pl")])
+(define_code_attr roCC [(lt "pl") (ge "mi")])
+
+;; Operand and operator predicates.
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; Test insns.
+
+;; No test insns with side-effect on the mem addressing.
+;;
+;; See note on cmp-insns with side-effects (or lack of them)
+
+;; Normal named test patterns from SI on.
+
+(define_insn "*tstsi"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "r,Q>,m")
+ (const_int 0)))]
+ ""
+{
+ if (which_alternative == 0 && TARGET_V32)
+ return "cmpq 0,%0";
+ return "test.d %0";
+}
+ [(set_attr "slottable" "yes,yes,no")])
+
+(define_insn "*tst<mode>_cmp"
+ [(set (cc0)
+ (compare (match_operand:BW 0 "nonimmediate_operand" "r,Q>,m")
+ (const_int 0)))]
+ "cris_cc0_user_requires_cmp (insn)"
+ "@
+ cmp<m> 0,%0
+ test<m> %0
+ test<m> %0"
+ [(set_attr "slottable" "no,yes,no")])
+
+(define_insn "*tst<mode>_non_cmp"
+ [(set (cc0)
+ (compare (match_operand:BW 0 "nonimmediate_operand" "r,Q>,m")
+ (const_int 0)))]
+ "!cris_cc0_user_requires_cmp (insn)"
+ "@
+ move<m> %0,%0
+ test<m> %0
+ test<m> %0"
+ [(set_attr "slottable" "yes,yes,no")
+ (set_attr "cc" "noov32,*,*")])
+
+;; It seems that the position of the sign-bit and the fact that 0.0 is
+;; all 0-bits would make "tstsf" a straight-forward implementation;
+;; either "test.d" it for positive/negative or "btstq 30,r" it for
+;; zeroness.
+;;
+;; FIXME: Do that some time; check next_cc0_user to determine if
+;; zero or negative is tested for.
+
+;; Compare insns.
+
+;; We could optimize the sizes of the immediate operands for various
+;; cases, but that is not worth it because of the very little usage of
+;; DImode for anything else but a structure/block-mode. Just do the
+;; obvious stuff for the straight-forward constraint letters.
+
+(define_insn "*cmpdi_non_v32"
+ [(set (cc0)
+ (compare (match_operand:DI 0 "nonimmediate_operand" "rm,r,r,r,r,r,r,o")
+ (match_operand:DI 1 "general_operand" "M,Kc,I,P,n,r,o,r")))]
+ "!TARGET_V32"
+ "@
+ test.d %M0\;ax\;test.d %H0
+ cmpq %1,%M0\;ax\;cmpq 0,%H0
+ cmpq %1,%M0\;ax\;cmpq -1,%H0
+ cmp%e1.%z1 %1,%M0\;ax\;cmpq %H1,%H0
+ cmp.d %M1,%M0\;ax\;cmp.d %H1,%H0
+ cmp.d %M1,%M0\;ax\;cmp.d %H1,%H0
+ cmp.d %M1,%M0\;ax\;cmp.d %H1,%H0
+ cmp.d %M0,%M1\;ax\;cmp.d %H0,%H1")
+
+(define_insn "*cmpdi_v32"
+ [(set (cc0)
+ (compare (match_operand:DI 0 "register_operand" "r,r,r,r,r")
+ (match_operand:DI 1 "nonmemory_operand" "Kc,I,P,n,r")))]
+ "TARGET_V32"
+ "@
+ cmpq %1,%M0\;ax\;cmpq 0,%H0
+ cmpq %1,%M0\;ax\;cmpq -1,%H0
+ cmp%e1.%z1 %1,%M0\;ax\;cmpq %H1,%H0
+ cmp.d %M1,%M0\;ax\;cmp.d %H1,%H0
+ cmp.d %M1,%M0\;ax\;cmp.d %H1,%H0")
+
+;; Note that compare insns with side effect addressing mode (e.g.):
+;;
+;; cmp.S [rx=ry+i],rz;
+;; cmp.S [%3=%1+%2],%0
+;;
+;; are *not* usable for gcc since the reloader *does not accept*
+;; cc0-changing insns with side-effects other than setting the condition
+;; codes. The reason is that the reload stage *may* cause another insn to
+;; be output after the main instruction, in turn invalidating cc0 for the
+;; insn using the test. (This does not apply to the CRIS case, since a
+;; reload for output -- move to memory -- does not change the condition
+;; code. Unfortunately we have no way to describe that at the moment. I
+;; think code would improve being in the order of one percent faster.
+
+;; We have cmps and cmpu (compare reg w. sign/zero extended mem).
+;; These are mostly useful for compares in SImode, using 8 or 16-bit
+;; constants, but sometimes gcc will find its way to use it for other
+;; (memory) operands. Avoid side-effect patterns, though (see above).
+
+(define_insn "*cmp_ext<mode>"
+ [(set (cc0)
+ (compare
+ (match_operand:SI 0 "register_operand" "r,r")
+ (match_operator:SI 2 "cris_extend_operator"
+ [(match_operand:BW 1 "memory_operand" "Q>,m")])))]
+ ""
+ "cmp%e2<m> %1,%0"
+ [(set_attr "slottable" "yes,no")])
+
+;; Swap operands; it seems the canonical look (if any) is not enforced.
+;;
+;; FIXME: Investigate that.
+
+(define_insn "*cmp_swapext<mode>"
+ [(set (cc0)
+ (compare
+ (match_operator:SI 2 "cris_extend_operator"
+ [(match_operand:BW 0 "memory_operand" "Q>,m")])
+ (match_operand:SI 1 "register_operand" "r,r")))]
+ ""
+ "cmp%e2<m> %0,%1"
+ [(set_attr "slottable" "yes,no")
+ (set_attr "cc" "rev")])
+
+;; The "normal" compare patterns, from SI on. Special-cases with zero
+;; are covered above.
+
+(define_insn "*cmpsi"
+ [(set (cc0)
+ (compare
+ (match_operand:SI 0 "nonimmediate_operand" "r,r,r, Q>,r,r,m")
+ (match_operand:SI 1 "general_operand" "I,r,Q>,r, P,g,r")))]
+ ""
+ "@
+ cmpq %1,%0
+ cmp.d %1,%0
+ cmp.d %1,%0
+ cmp.d %0,%1
+ cmp%e1.%z1 %1,%0
+ cmp.d %1,%0
+ cmp.d %0,%1"
+ [(set_attr "slottable" "yes,yes,yes,yes,no,no,no")
+ (set_attr "cc" "normal,normal,normal,rev,normal,normal,rev")])
+
+(define_insn "*cmp<mode>"
+ [(set (cc0)
+ (compare (match_operand:BW 0 "nonimmediate_operand" "r,r, Q>,r,m")
+ (match_operand:BW 1 "general_operand" "r,Q>,r, g,r")))]
+ ""
+ "@
+ cmp<m> %1,%0
+ cmp<m> %1,%0
+ cmp<m> %0,%1
+ cmp<m> %1,%0
+ cmp<m> %0,%1"
+ [(set_attr "slottable" "yes,yes,yes,no,no")
+ (set_attr "cc" "normal,normal,rev,normal,rev")])
+
+;; Pattern matching the BTST insn.
+;; It is useful for "if (i & val)" constructs, where val is an exact
+;; power of 2, or if val + 1 is a power of two, where we check for a bunch
+;; of zeros starting at bit 0).
+
+;; SImode. This mode is the only one needed, since gcc automatically
+;; extends subregs for lower-size modes. FIXME: Add testcase.
+(define_insn "*btst"
+ [(set (cc0)
+ (compare
+ (zero_extract:SI
+ (match_operand:SI 0 "nonmemory_operand" "r, r,r, r,r, r,Kp")
+ (match_operand:SI 1 "const_int_operand" "Kc,n,Kc,n,Kc,n,n")
+ (match_operand:SI 2 "nonmemory_operand" "M, M,Kc,n,r, r,r"))
+ (const_int 0)))]
+ ;; Either it is a single bit, or consecutive ones starting at 0.
+ ;; The btst ones depend on stuff in NOTICE_UPDATE_CC.
+ "CONST_INT_P (operands[1])
+ && (operands[1] == const1_rtx || operands[2] == const0_rtx)
+ && (REG_S_P (operands[0])
+ || (operands[1] == const1_rtx
+ && REG_S_P (operands[2])
+ && CONST_INT_P (operands[0])
+ && exact_log2 (INTVAL (operands[0])) >= 0))
+ && !TARGET_CCINIT"
+
+;; The next-to-last "&&" condition above should be caught by some kind of
+;; canonicalization in gcc, but we can easily help with it here.
+;; It results from expressions of the type
+;; "power_of_2_value & (1 << y)".
+;;
+;; Since there may be codes with tests in on bits (in constant position)
+;; beyond the size of a word, handle that by assuming those bits are 0.
+;; GCC should handle that, but it's a matter of easily-added belts while
+;; having suspenders.
+
+ "@
+ btstq (%1-1),%0
+ cmpq 0,%0
+ btstq %2,%0
+ clearf nz
+ btst %2,%0
+ clearf nz
+ cmpq %p0,%2"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+;; Move insns.
+
+;; The whole mandatory movdi family is here; expander, "anonymous"
+;; recognizer and splitter. We're forced to have a movdi pattern,
+;; although GCC should be able to split it up itself. Normally it can,
+;; but if other insns have DI operands (as is the case here), reload
+;; must be able to generate or match a movdi. many testcases fail at
+;; -O3 or -fssa if we don't have this. FIXME: Fix GCC... See
+;; <URL:http://gcc.gnu.org/ml/gcc-patches/2000-04/msg00104.html>.
+;; However, a patch from Richard Kenner (similar to the cause of
+;; discussion at the URL above), indicates otherwise. See
+;; <URL:http://gcc.gnu.org/ml/gcc-patches/2000-04/msg00554.html>.
+;; The truth has IMO is not been decided yet, so check from time to
+;; time by disabling the movdi patterns.
+
+;; To appease testcase gcc.c-torture/execute/920501-2.c (and others) at
+;; -O0, we need a movdi as a temporary measure. Here's how things fail:
+;; A cmpdi RTX needs reloading (global):
+;; (insn 185 326 186 (set (cc0)
+;; (compare (mem/f:DI (reg/v:SI 22) 0)
+;; (const_int 1 [0x1]))) 4 {cmpdi} (nil)
+;; (nil))
+;; Now, reg 22 is reloaded for input address, and the mem is also moved
+;; out of the instruction (into a register), since one of the operands
+;; must be a register. Reg 22 is reloaded (into reg 10), and the mem is
+;; moved out and synthesized in SImode parts (reg 9, reg 10 - should be ok
+;; wrt. overlap). The bad things happen with the synthesis in
+;; emit_move_insn_1; the location where to substitute reg 10 is lost into
+;; two new RTX:es, both still having reg 22. Later on, the left-over reg
+;; 22 is recognized to have an equivalent in memory which is substituted
+;; straight in, and we end up with an unrecognizable insn:
+;; (insn 325 324 326 (set (reg:SI 9 r9)
+;; (mem/f:SI (mem:SI (plus:SI (reg:SI 8 r8)
+;; (const_int -84 [0xffffffac])) 0) 0)) -1 (nil)
+;; (nil))
+;; which is the first part of the reloaded synthesized "movdi".
+;; The right thing would be to add equivalent replacement locations for
+;; insn with pseudos that need more reloading. The question is where.
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+{
+ if (MEM_P (operands[0])
+ && operands[1] != const0_rtx
+ && (!TARGET_V32 || (!REG_P (operands[1]) && can_create_pseudo_p ())))
+ operands[1] = copy_to_mode_reg (DImode, operands[1]);
+
+ /* Some other ports (as of 2001-09-10 for example mcore and romp) also
+ prefer to split up constants early, like this. The testcase in
+ gcc.c-torture/execute/961213-1.c shows that CSE2 gets confused by the
+ resulting subreg sets when using the construct from mcore (as of FSF
+ CVS, version -r 1.5), and it believes that the high part (the last one
+ emitted) is the final value. */
+ if ((CONST_INT_P (operands[1]) || GET_CODE (operands[1]) == CONST_DOUBLE)
+ && ! reload_completed
+ && ! reload_in_progress)
+ {
+ rtx insns;
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+
+ start_sequence ();
+ emit_move_insn (operand_subword (op0, 0, 1, DImode),
+ operand_subword (op1, 0, 1, DImode));
+ emit_move_insn (operand_subword (op0, 1, 1, DImode),
+ operand_subword (op1, 1, 1, DImode));
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_insn (insns);
+ DONE;
+ }
+})
+
+(define_insn_and_split "*movdi_insn_non_v32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,rx,m")
+ (match_operand:DI 1 "general_operand" "rx,g,rxM"))]
+ "(register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)
+ || operands[1] == const0_rtx)
+ && !TARGET_V32"
+ "#"
+ "&& reload_completed"
+ [(match_dup 2)]
+ "operands[2] = cris_split_movdx (operands);")
+
+;; Overlapping (but non-identical) source memory address and destination
+;; register would be a compiler bug, so we don't have to specify that.
+(define_insn "*movdi_v32"
+ [(set
+ (match_operand:DI 0 "nonimmediate_operand" "=r,rx,&r,>, m,r,x,m")
+ (match_operand:DI 1 "general_operand" "rxi,r>,m, rx,r,m,m,x"))]
+ "TARGET_V32"
+{
+ switch (which_alternative)
+ {
+ /* FIXME: 1) Use autoincrement where possible. 2) Have peephole2,
+ particularly for cases where the address register is dead. */
+ case 5:
+ if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ return "addq 4,%L1\;move.d %1,%H0\;subq 4,%L1\;move.d %1,%M0";
+ gcc_assert (REGNO (operands[0]) + 1 == REGNO (XEXP (operands[1], 0)));
+ return "move.d [%L1+],%M0\;move.d [%L1],%H0";
+ case 2:
+ /* We could do away with the addq if we knew the address-register
+ isn't ACR. If we knew the address-register is dead, we could do
+ away with the subq too. */
+ return "move.d [%L1],%M0\;addq 4,%L1\;move.d [%L1],%H0\;subq 4,%L1";
+ case 4:
+ return "move.d %M1,[%L0]\;addq 4,%L0\;move.d %H1,[%L0]\;subq 4,%L0";
+ case 6:
+ return "move [%L1],%M0\;addq 4,%L1\;move [%L1],%H0\;subq 4,%L1";
+ case 7:
+ return "move %M1,[%L0]\;addq 4,%L0\;move %H1,[%L0]\;subq 4,%L0";
+
+ default:
+ return "#";
+ }
+}
+ ;; The non-split cases clobber cc0 because of their adds and subs.
+ ;; Beware that NOTICE_UPDATE_CC is called before the forced split happens.
+ [(set_attr "cc" "*,*,clobber,*,clobber,clobber,*,*")])
+
+;; Much like "*movdi_insn_non_v32". Overlapping registers and constants
+;; is handled so much better in cris_split_movdx.
+(define_split
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ "TARGET_V32
+ && reload_completed
+ && (!MEM_P (operands[0]) || !REG_P (XEXP (operands[0], 0)))
+ && (!MEM_P (operands[1]) || !REG_P (XEXP (operands[1], 0)))"
+ [(match_dup 2)]
+ "operands[2] = cris_split_movdx (operands);")
+
+;; Side-effect patterns for move.S1 [rx=ry+rx.S2],rw
+;; and move.S1 [rx=ry+i],rz
+;; Then movs.S1 and movu.S1 for both modes.
+;;
+;; move.S1 [rx=ry+rz.S],rw avoiding when rx is ry, or rw is rx
+;; FIXME: These could have anonymous mode for operand 0.
+;; FIXME: Special registers' alternatives too.
+
+(define_insn "*mov_side<mode>_biap"
+ [(set (match_operand:BW 0 "register_operand" "=r,r")
+ (mem:BW (plus:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "n,n"))
+ (match_operand:SI 3 "register_operand" "r,r"))))
+ (set (match_operand:SI 4 "register_operand" "=*3,r")
+ (plus:SI (mult:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))]
+ "cris_side_effect_mode_ok (MULT, operands, 4, 3, 1, 2, 0)"
+ "@
+ #
+ move<m> [%4=%3+%1%T2],%0")
+
+(define_insn "*mov_sidesisf_biap"
+ [(set (match_operand 0 "register_operand" "=r,r,x,x")
+ (mem (plus:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "r,r,r,r")
+ (match_operand:SI 2 "const_int_operand" "n,n,n,n"))
+ (match_operand:SI 3 "register_operand" "r,r,r,r"))))
+ (set (match_operand:SI 4 "register_operand" "=*3,r,*3,r")
+ (plus:SI (mult:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))]
+ "GET_MODE_SIZE (GET_MODE (operands[0])) == UNITS_PER_WORD
+ && cris_side_effect_mode_ok (MULT, operands, 4, 3, 1, 2, 0)"
+ "@
+ #
+ move.%s0 [%4=%3+%1%T2],%0
+ #
+ move [%4=%3+%1%T2],%0")
+
+;; move.S1 [rx=ry+i],rz
+;; avoiding move.S1 [ry=ry+i],rz
+;; and move.S1 [rz=ry+i],rz
+;; Note that "i" is allowed to be a register.
+
+(define_insn "*mov_side<mode>"
+ [(set (match_operand:BW 0 "register_operand" "=r,r,r,r,r")
+ (mem:BW
+ (plus:SI (match_operand:SI 1 "cris_bdap_operand" "%r,r,r,R,R")
+ (match_operand:SI 2 "cris_bdap_operand" "r>Rn,r,>Rn,r,r"))))
+ (set (match_operand:SI 3 "register_operand" "=*1,r,r,*2,r")
+ (plus:SI (match_dup 1)
+ (match_dup 2)))]
+ "cris_side_effect_mode_ok (PLUS, operands, 3, 1, 2, -1, 0)"
+{
+ if ((which_alternative == 0 || which_alternative == 3)
+ && (!CONST_INT_P (operands[2])
+ || INTVAL (operands[2]) > 127
+ || INTVAL (operands[2]) < -128
+ || satisfies_constraint_N (operands[2])
+ || satisfies_constraint_J (operands[2])))
+ return "#";
+ if (which_alternative == 4)
+ return "move<m> [%3=%2%S1],%0";
+ return "move<m> [%3=%1%S2],%0";
+})
+
+(define_insn "*mov_sidesisf"
+ [(set (match_operand 0 "register_operand" "=r,r,r,x,x,x,r,r,x,x")
+ (mem
+ (plus:SI
+ (match_operand:SI 1 "cris_bdap_operand" "%r,r,r,r,r,r,R,R,R,R")
+ (match_operand:SI 2 "cris_bdap_operand" "r>Rn,r,>Rn,r>Rn,r,>Rn,r,r,r,r"))))
+ (set (match_operand:SI 3 "register_operand" "=*1,r,r,*1,r,r,*2,r,*2,r")
+ (plus:SI (match_dup 1)
+ (match_dup 2)))]
+ "GET_MODE_SIZE (GET_MODE (operands[0])) == UNITS_PER_WORD
+ && cris_side_effect_mode_ok (PLUS, operands, 3, 1, 2, -1, 0)"
+{
+ if ((which_alternative == 0
+ || which_alternative == 3
+ || which_alternative == 6
+ || which_alternative == 8)
+ && (!CONST_INT_P (operands[2])
+ || INTVAL (operands[2]) > 127
+ || INTVAL (operands[2]) < -128
+ || satisfies_constraint_N (operands[2])
+ || satisfies_constraint_J (operands[2])))
+ return "#";
+ if (which_alternative < 3)
+ return "move.%s0 [%3=%1%S2],%0";
+ if (which_alternative == 7)
+ return "move.%s0 [%3=%2%S1],%0";
+ if (which_alternative == 9)
+ return "move [%3=%2%S1],%0";
+ return "move [%3=%1%S2],%0";
+})
+
+;; Other way around; move to memory.
+
+;; Note that the condition (which for side-effect patterns is usually a
+;; call to cris_side_effect_mode_ok), isn't consulted for register
+;; allocation preferences -- constraints is the method for that. The
+;; drawback is that we can't exclude register allocation to cause
+;; "move.s rw,[rx=ry+rz.S]" when rw==rx without also excluding rx==ry or
+;; rx==rz if we use an earlyclobber modifier for the constraint for rx.
+;; Instead of that, we recognize and split the cases where dangerous
+;; register combinations are spotted: where a register is set in the
+;; side-effect, and used in the main insn. We don't handle the case where
+;; the set in the main insn overlaps the set in the side-effect; that case
+;; must be handled in gcc. We handle just the case where the set in the
+;; side-effect overlaps the input operand of the main insn (i.e. just
+;; moves to memory).
+
+;;
+;; move.s rz,[ry=rx+rw.S]
+
+(define_insn "*mov_side<mode>_biap_mem"
+ [(set (mem:BW (plus:SI
+ (mult:SI (match_operand:SI 0 "register_operand" "r,r,r")
+ (match_operand:SI 1 "const_int_operand" "n,n,n"))
+ (match_operand:SI 2 "register_operand" "r,r,r")))
+ (match_operand:BW 3 "register_operand" "r,r,r"))
+ (set (match_operand:SI 4 "register_operand" "=*2,!3,r")
+ (plus:SI (mult:SI (match_dup 0)
+ (match_dup 1))
+ (match_dup 2)))]
+ "cris_side_effect_mode_ok (MULT, operands, 4, 2, 0, 1, 3)"
+ "@
+ #
+ #
+ move<m> %3,[%4=%2+%0%T1]")
+
+(define_insn "*mov_sidesisf_biap_mem"
+ [(set (mem (plus:SI
+ (mult:SI (match_operand:SI 0 "register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 1 "const_int_operand" "n,n,n,n,n,n"))
+ (match_operand:SI 2 "register_operand" "r,r,r,r,r,r")))
+ (match_operand 3 "register_operand" "r,r,r,x,x,x"))
+ (set (match_operand:SI 4 "register_operand" "=*2,!3,r,*2,!3,r")
+ (plus:SI (mult:SI (match_dup 0)
+ (match_dup 1))
+ (match_dup 2)))]
+ "GET_MODE_SIZE (GET_MODE (operands[3])) == UNITS_PER_WORD
+ && cris_side_effect_mode_ok (MULT, operands, 4, 2, 0, 1, 3)"
+ "@
+ #
+ #
+ move.%s3 %3,[%4=%2+%0%T1]
+ #
+ #
+ move %3,[%4=%2+%0%T1]")
+
+;; Split for the case above where we're out of luck with register
+;; allocation (again, the condition isn't checked for that), and we end up
+;; with the set in the side-effect getting the same register as the input
+;; register.
+
+(define_split
+ [(parallel
+ [(set (match_operator
+ 6 "cris_mem_op"
+ [(plus:SI
+ (mult:SI (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))
+ (match_operand:SI 2 "register_operand" ""))])
+ (match_operand 3 "register_operand" ""))
+ (set (match_operand:SI 4 "register_operand" "")
+ (plus:SI (mult:SI (match_dup 0)
+ (match_dup 1))
+ (match_dup 2)))])]
+ "reload_completed && reg_overlap_mentioned_p (operands[4], operands[3])"
+ [(set (match_dup 5) (match_dup 3))
+ (set (match_dup 4) (match_dup 2))
+ (set (match_dup 4)
+ (plus:SI (mult:SI (match_dup 0)
+ (match_dup 1))
+ (match_dup 4)))]
+ "operands[5]
+ = replace_equiv_address (operands[6],
+ gen_rtx_PLUS (SImode,
+ gen_rtx_MULT (SImode,
+ operands[0],
+ operands[1]),
+ operands[2]));")
+
+;; move.s rx,[ry=rz+i]
+;; FIXME: These could have anonymous mode for operand 2.
+
+;; QImode
+
+(define_insn "*mov_side<mode>_mem"
+ [(set (mem:BW
+ (plus:SI (match_operand:SI 0 "cris_bdap_operand" "%r,r,r,r,R,R,R")
+ (match_operand:SI 1 "cris_bdap_operand" "r>Rn,r>Rn,r,>Rn,r,r,r")))
+ (match_operand:BW 2 "register_operand" "r,r,r,r,r,r,r"))
+ (set (match_operand:SI 3 "register_operand" "=*0,!*2,r,r,*1,!*2,r")
+ (plus:SI (match_dup 0)
+ (match_dup 1)))]
+ "cris_side_effect_mode_ok (PLUS, operands, 3, 0, 1, -1, 2)"
+{
+ if ((which_alternative == 0 || which_alternative == 4)
+ && (!CONST_INT_P (operands[1])
+ || INTVAL (operands[1]) > 127
+ || INTVAL (operands[1]) < -128
+ || satisfies_constraint_N (operands[1])
+ || satisfies_constraint_J (operands[1])))
+ return "#";
+ if (which_alternative == 1 || which_alternative == 5)
+ return "#";
+ if (which_alternative == 6)
+ return "move.%s2 %2,[%3=%1%S0]";
+ return "move<m> %2,[%3=%0%S1]";
+})
+
+;; SImode
+
+(define_insn "*mov_sidesisf_mem"
+ [(set (mem
+ (plus:SI
+ (match_operand:SI
+ 0 "cris_bdap_operand"
+ "%r, r, r,r, r, r,r, R,R, R,R, R")
+ (match_operand:SI
+ 1 "cris_bdap_operand"
+ "r>Rn,r>Rn,r,>Rn,r>Rn,r,>Rn,r,r, r,r, r")))
+ (match_operand 2 "register_operand"
+ "r, r, r,r, x, x,x, r,r, r,x, x"))
+ (set (match_operand:SI 3 "register_operand"
+ "=*0,!2, r,r, *0, r,r, *1,!*2,r,*1,r")
+ (plus:SI (match_dup 0)
+ (match_dup 1)))]
+ "GET_MODE_SIZE (GET_MODE (operands[2])) == UNITS_PER_WORD
+ && cris_side_effect_mode_ok (PLUS, operands, 3, 0, 1, -1, 2)"
+{
+ if ((which_alternative == 0 || which_alternative == 4)
+ && (!CONST_INT_P (operands[1])
+ || INTVAL (operands[1]) > 127
+ || INTVAL (operands[1]) < -128
+ || satisfies_constraint_N (operands[1])
+ || satisfies_constraint_J (operands[1])))
+ return "#";
+ if (which_alternative == 1
+ || which_alternative == 7
+ || which_alternative == 8
+ || which_alternative == 10)
+ return "#";
+ if (which_alternative < 4)
+ return "move.%s2 %2,[%3=%0%S1]";
+ if (which_alternative == 9)
+ return "move.%s2 %2,[%3=%1%S0]";
+ if (which_alternative == 11)
+ return "move %2,[%3=%1%S0]";
+ return "move %2,[%3=%0%S1]";
+})
+
+;; Like the biap case, a split where the set in the side-effect gets the
+;; same register as the input register to the main insn, since the
+;; condition isn't checked at register allocation.
+
+(define_split
+ [(parallel
+ [(set (match_operator
+ 4 "cris_mem_op"
+ [(plus:SI
+ (match_operand:SI 0 "cris_bdap_operand" "")
+ (match_operand:SI 1 "cris_bdap_operand" ""))])
+ (match_operand 2 "register_operand" ""))
+ (set (match_operand:SI 3 "register_operand" "")
+ (plus:SI (match_dup 0) (match_dup 1)))])]
+ "reload_completed && reg_overlap_mentioned_p (operands[3], operands[2])"
+ [(set (match_dup 4) (match_dup 2))
+ (set (match_dup 3) (match_dup 0))
+ (set (match_dup 3) (plus:SI (match_dup 3) (match_dup 1)))]
+ "")
+
+;; Clear memory side-effect patterns. It is hard to get to the mode if
+;; the MEM was anonymous, so there will be one for each mode.
+
+;; clear.[bwd] [ry=rx+rw.s2]
+
+(define_insn "*clear_side<mode>_biap"
+ [(set (mem:BWD (plus:SI
+ (mult:SI (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "const_int_operand" "n,n"))
+ (match_operand:SI 2 "register_operand" "r,r")))
+ (const_int 0))
+ (set (match_operand:SI 3 "register_operand" "=*2,r")
+ (plus:SI (mult:SI (match_dup 0)
+ (match_dup 1))
+ (match_dup 2)))]
+ "cris_side_effect_mode_ok (MULT, operands, 3, 2, 0, 1, -1)"
+ "@
+ #
+ clear<m> [%3=%2+%0%T1]")
+
+;; clear.[bwd] [ry=rz+i]
+
+(define_insn "*clear_side<mode>"
+ [(set (mem:BWD
+ (plus:SI (match_operand:SI 0 "cris_bdap_operand" "%r,r,r,R,R")
+ (match_operand:SI 1 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))
+ (const_int 0))
+ (set (match_operand:SI 2 "register_operand" "=*0,r,r,*1,r")
+ (plus:SI (match_dup 0)
+ (match_dup 1)))]
+ "cris_side_effect_mode_ok (PLUS, operands, 2, 0, 1, -1, -1)"
+{
+ if ((which_alternative == 0 || which_alternative == 3)
+ && (!CONST_INT_P (operands[1])
+ || INTVAL (operands[1]) > 127
+ || INTVAL (operands[1]) < -128
+ || satisfies_constraint_N (operands[1])
+ || satisfies_constraint_J (operands[1])))
+ return "#";
+ if (which_alternative == 4)
+ return "clear<m> [%2=%1%S0]";
+ return "clear<m> [%2=%0%S1]";
+})
+
+;; Normal move patterns from SI on.
+
+(define_expand "movsi"
+ [(set
+ (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "cris_general_operand_or_symbol" ""))]
+ ""
+{
+ /* If the output goes to a MEM, make sure we have zero or a register as
+ input. */
+ if (MEM_P (operands[0])
+ && ! REG_S_P (operands[1])
+ && operands[1] != const0_rtx
+ && can_create_pseudo_p ())
+ operands[1] = force_reg (SImode, operands[1]);
+
+ /* If we're generating PIC and have an incoming symbol, validize it to a
+ general operand or something that will match a special pattern.
+
+ FIXME: Do we *have* to recognize anything that would normally be a
+ valid symbol? Can we exclude global PIC addresses with an added
+ offset? */
+ if (flag_pic
+ && CONSTANT_ADDRESS_P (operands[1])
+ && !cris_valid_pic_const (operands[1], false))
+ {
+ enum cris_pic_symbol_type t = cris_pic_symbol_type_of (operands[1]);
+
+ gcc_assert (t != cris_no_symbol);
+
+ if (! REG_S_P (operands[0]))
+ {
+ /* We must have a register as destination for what we're about to
+ do, and for the patterns we generate. */
+ CRIS_ASSERT (can_create_pseudo_p ());
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+ else
+ {
+ /* FIXME: add a REG_EQUAL (or is it REG_EQUIV) note to the
+ destination register for the symbol. It might not be
+ worth it. Measure. */
+ crtl->uses_pic_offset_table = 1;
+ if (t == cris_rel_symbol)
+ {
+ /* Change a "move.d sym(+offs),rN" into (allocate register rM)
+ for pre-v32:
+ "move.d (const (plus (unspec [sym]
+ CRIS_UNSPEC_GOTREL) offs)),rM" "add.d rPIC,rM,rN"
+ and for v32:
+ "move.d (const (plus (unspec [sym]
+ CRIS_UNSPEC_PCREL) offs)),rN". */
+ rtx tem, rm, rn = operands[0];
+ rtx sym = GET_CODE (operands[1]) != CONST
+ ? operands[1] : get_related_value (operands[1]);
+ HOST_WIDE_INT offs = get_integer_term (operands[1]);
+
+ gcc_assert (can_create_pseudo_p ());
+
+ if (TARGET_V32)
+ {
+ tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym),
+ CRIS_UNSPEC_PCREL);
+ if (offs != 0)
+ tem = plus_constant (Pmode, tem, offs);
+ rm = rn;
+ emit_move_insn (rm, gen_rtx_CONST (Pmode, tem));
+ }
+ else
+ {
+ /* We still uses GOT-relative addressing for
+ pre-v32. */
+ crtl->uses_pic_offset_table = 1;
+ tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym),
+ CRIS_UNSPEC_GOTREL);
+ if (offs != 0)
+ tem = plus_constant (Pmode, tem, offs);
+ rm = gen_reg_rtx (Pmode);
+ emit_move_insn (rm, gen_rtx_CONST (Pmode, tem));
+ if (expand_binop (Pmode, add_optab, rm, pic_offset_table_rtx,
+ rn, 0, OPTAB_LIB_WIDEN) != rn)
+ internal_error ("expand_binop failed in movsi gotrel");
+ }
+ DONE;
+ }
+ else if (t == cris_got_symbol)
+ {
+ /* Change a "move.d sym,rN" into (allocate register rM, rO)
+ "move.d (const (unspec [sym] CRIS_UNSPEC_GOTREAD)),rM"
+ "add.d rPIC,rM,rO", "move.d [rO],rN" with
+ the memory access marked as read-only. */
+ rtx tem, mem, rm, ro, rn = operands[0];
+ gcc_assert (can_create_pseudo_p ());
+ tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, operands[1]),
+ CRIS_UNSPEC_GOTREAD);
+ rm = gen_reg_rtx (Pmode);
+ emit_move_insn (rm, gen_rtx_CONST (Pmode, tem));
+ ro = gen_reg_rtx (Pmode);
+ if (expand_binop (Pmode, add_optab, rm, pic_offset_table_rtx,
+ ro, 0, OPTAB_LIB_WIDEN) != ro)
+ internal_error ("expand_binop failed in movsi got");
+ mem = gen_rtx_MEM (Pmode, ro);
+
+ /* This MEM doesn't alias anything. Whether it
+ aliases other same symbols is unimportant. */
+ set_mem_alias_set (mem, new_alias_set ());
+ MEM_NOTRAP_P (mem) = 1;
+
+ /* We can set the GOT memory read of a non-called symbol
+ to readonly, but not that of a call symbol, as those
+ are subject to lazy evaluation and usually have the value
+ changed from the first call to the second (but
+ constant thereafter). */
+ MEM_READONLY_P (mem) = 1;
+ emit_move_insn (rn, mem);
+ DONE;
+ }
+ else
+ {
+ /* We get here when we have to change something that would
+ be recognizable if it wasn't PIC. A ``sym'' is ok for
+ PIC symbols both with and without a GOT entry. And ``sym
+ + offset'' is ok for local symbols, so the only thing it
+ could be, is a global symbol with an offset. Check and
+ abort if not. */
+ rtx reg = gen_reg_rtx (Pmode);
+ rtx sym = get_related_value (operands[1]);
+ HOST_WIDE_INT offs = get_integer_term (operands[1]);
+
+ gcc_assert (can_create_pseudo_p ()
+ && t == cris_got_symbol_needing_fixup
+ && sym != NULL_RTX && offs != 0);
+
+ emit_move_insn (reg, sym);
+ if (expand_binop (SImode, add_optab, reg,
+ GEN_INT (offs), operands[0], 0,
+ OPTAB_LIB_WIDEN) != operands[0])
+ internal_error ("expand_binop failed in movsi got+offs");
+ DONE;
+ }
+ }
+ }
+})
+
+(define_insn "*movsi_got_load"
+ [(set (reg:SI CRIS_GOT_REGNUM) (unspec:SI [(const_int 0)] CRIS_UNSPEC_GOT))]
+ "flag_pic"
+{
+ return TARGET_V32
+ ? "lapc _GLOBAL_OFFSET_TABLE_,%:"
+ : "move.d $pc,%:\;sub.d .:GOTOFF,%:";
+}
+ [(set_attr "cc" "clobber")])
+
+(define_insn "*movsi_internal"
+ [(set
+ (match_operand:SI 0 "nonimmediate_operand"
+ "=r,r, r,Q>,r,Q>,g,r,r, r,g,rQ>,x, m,x")
+ (match_operand:SI 1 "cris_general_operand_or_pic_source"
+ "r,Q>,M,M, I,r, M,n,!S,g,r,x, rQ>,x,gi"))]
+ ;; Note that we prefer not to use the S alternative (if for some reason
+ ;; it competes with others) above, but g matches S.
+ ""
+{
+ /* Better to have c-switch here; it is worth it to optimize the size of
+ move insns. The alternative would be to try to find more constraint
+ letters. FIXME: Check again. It seems this could shrink a bit. */
+ switch (which_alternative)
+ {
+ case 9:
+ if (TARGET_V32)
+ {
+ if (!flag_pic
+ && (GET_CODE (operands[1]) == SYMBOL_REF
+ || GET_CODE (operands[1]) == LABEL_REF
+ || GET_CODE (operands[1]) == CONST))
+ {
+ /* FIXME: Express this through (set_attr cc none) instead,
+ since we can't express the ``none'' at this point. FIXME:
+ Use lapc for everything except const_int and when next cc0
+ user would want the flag setting. */
+ CC_STATUS_INIT;
+ return "lapc %1,%0";
+ }
+ if (flag_pic == 1
+ && GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (operands[1], 0)) == UNSPEC
+ && XINT (XEXP (operands[1], 0), 1) == CRIS_UNSPEC_GOTREAD)
+ return "movu.w %1,%0";
+ }
+ /* FALLTHROUGH */
+ case 0:
+ case 1:
+ case 5:
+ case 10:
+ return "move.d %1,%0";
+
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ return "move %d1,%0";
+
+ case 2:
+ case 3:
+ case 6:
+ return "clear.d %0";
+
+ /* Constants -32..31 except 0. */
+ case 4:
+ return "moveq %1,%0";
+
+ /* We can win a little on constants -32768..-33, 32..65535. */
+ case 7:
+ if (INTVAL (operands[1]) > 0 && INTVAL (operands[1]) < 65536)
+ {
+ if (INTVAL (operands[1]) < 256)
+ return "movu.b %1,%0";
+ return "movu.w %1,%0";
+ }
+ else if (INTVAL (operands[1]) >= -32768 && INTVAL (operands[1]) < 32768)
+ {
+ if (INTVAL (operands[1]) >= -128 && INTVAL (operands[1]) < 128)
+ return "movs.b %1,%0";
+ return "movs.w %1,%0";
+ }
+ return "move.d %1,%0";
+
+ case 8:
+ {
+ rtx tem = operands[1];
+ gcc_assert (GET_CODE (tem) == CONST);
+ tem = XEXP (tem, 0);
+ if (GET_CODE (tem) == PLUS
+ && GET_CODE (XEXP (tem, 0)) == UNSPEC
+ && (XINT (XEXP (tem, 0), 1) == CRIS_UNSPEC_GOTREL
+ || XINT (XEXP (tem, 0), 1) == CRIS_UNSPEC_PCREL)
+ && CONST_INT_P (XEXP (tem, 1)))
+ tem = XEXP (tem, 0);
+ gcc_assert (GET_CODE (tem) == UNSPEC);
+ switch (XINT (tem, 1))
+ {
+ case CRIS_UNSPEC_GOTREAD:
+ case CRIS_UNSPEC_PLTGOTREAD:
+ /* Using sign-extend mostly to be consistent with the
+ indexed addressing mode. */
+ if (flag_pic == 1)
+ return "movs.w %1,%0";
+ return "move.d %1,%0";
+
+ case CRIS_UNSPEC_GOTREL:
+ case CRIS_UNSPEC_PLT_GOTREL:
+ gcc_assert (!TARGET_V32);
+ return "move.d %1,%0";
+
+ case CRIS_UNSPEC_PCREL:
+ case CRIS_UNSPEC_PLT_PCREL:
+ gcc_assert (TARGET_V32);
+ return "lapc %1,%0";
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ default:
+ return "BOGUS: %1 to %0";
+ }
+}
+ [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,no,no,no,no,no,yes,yes,no,no")
+ (set_attr "cc" "*,*,*,*,*,*,*,*,*,*,*,none,none,none,none")])
+
+;; Extend operations with side-effect from mem to register, using
+;; MOVS/MOVU. These are from mem to register only.
+;;
+;; [rx=ry+rz.S]
+;;
+;; QImode to HImode
+;;
+;; FIXME: Can we omit extend to HImode, since GCC should truncate for
+;; HImode by itself? Perhaps use only anonymous modes?
+
+(define_insn "*ext_sideqihi_biap"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (match_operator:HI
+ 5 "cris_extend_operator"
+ [(mem:QI (plus:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "n,n"))
+ (match_operand:SI 3 "register_operand" "r,r")))]))
+ (set (match_operand:SI 4 "register_operand" "=*3,r")
+ (plus:SI (mult:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))]
+ "cris_side_effect_mode_ok (MULT, operands, 4, 3, 1, 2, 0)"
+ "@
+ #
+ mov%e5.%m5 [%4=%3+%1%T2],%0")
+
+(define_insn "*ext_side<mode>si_biap"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (match_operator:SI
+ 5 "cris_extend_operator"
+ [(mem:BW (plus:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "n,n"))
+ (match_operand:SI 3 "register_operand" "r,r")))]))
+ (set (match_operand:SI 4 "register_operand" "=*3,r")
+ (plus:SI (mult:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))]
+ "cris_side_effect_mode_ok (MULT, operands, 4, 3, 1, 2, 0)"
+ "@
+ #
+ mov%e5<m> [%4=%3+%1%T2],%0")
+
+;; Same but [rx=ry+i]
+
+;; QImode to HImode
+
+(define_insn "*ext_sideqihi"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
+ (match_operator:HI
+ 4 "cris_extend_operator"
+ [(mem:QI (plus:SI
+ (match_operand:SI 1 "cris_bdap_operand" "%r,r,r,R,R")
+ (match_operand:SI 2 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))]))
+ (set (match_operand:SI 3 "register_operand" "=*1,r,r,*2,r")
+ (plus:SI (match_dup 1)
+ (match_dup 2)))]
+ "cris_side_effect_mode_ok (PLUS, operands, 3, 1, 2, -1, 0)"
+{
+ if ((which_alternative == 0 || which_alternative == 3)
+ && (!CONST_INT_P (operands[2])
+ || INTVAL (operands[2]) > 127
+ || INTVAL (operands[2]) < -128
+ || satisfies_constraint_N (operands[2])
+ || satisfies_constraint_J (operands[2])))
+ return "#";
+ if (which_alternative == 4)
+ return "mov%e4.%m4 [%3=%2%S1],%0";
+ return "mov%e4.%m4 [%3=%1%S2],%0";
+})
+
+(define_insn "*ext_side<mode>si"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r")
+ (match_operator:SI
+ 4 "cris_extend_operator"
+ [(mem:BW (plus:SI
+ (match_operand:SI 1 "cris_bdap_operand" "%r,r,r,R,R")
+ (match_operand:SI 2 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))]))
+ (set (match_operand:SI 3 "register_operand" "=*1,r,r,*2,r")
+ (plus:SI (match_dup 1)
+ (match_dup 2)))]
+ "cris_side_effect_mode_ok (PLUS, operands, 3, 1, 2, -1, 0)"
+{
+ if ((which_alternative == 0 || which_alternative == 3)
+ && (!CONST_INT_P (operands[2])
+ || INTVAL (operands[2]) > 127
+ || INTVAL (operands[2]) < -128
+ || satisfies_constraint_N (operands[2])
+ || satisfies_constraint_J (operands[2])))
+ return "#";
+ if (which_alternative == 4)
+ return "mov%e4<m> [%3=%2%S1],%0";
+ return "mov%e4<m> [%3=%1%S2],%0";
+})
+
+;; FIXME: See movsi.
+
+(define_insn "movhi"
+ [(set
+ (match_operand:HI 0 "nonimmediate_operand" "=r,r, r,Q>,r,Q>,r,r,r,g,g,r,r,x")
+ (match_operand:HI 1 "general_operand" "r,Q>,M,M, I,r, L,O,n,M,r,g,x,r"))]
+ ""
+{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 5:
+ case 10:
+ case 11:
+ return "move.w %1,%0";
+ case 12:
+ case 13:
+ return "move %1,%0";
+ case 2:
+ case 3:
+ case 9:
+ return "clear.w %0";
+ case 4:
+ return "moveq %1,%0";
+ case 6:
+ case 8:
+ if (INTVAL (operands[1]) < 256 && INTVAL (operands[1]) >= -128)
+ {
+ if (INTVAL (operands[1]) > 0)
+ return "movu.b %1,%0";
+ return "movs.b %1,%0";
+ }
+ return "move.w %1,%0";
+ case 7:
+ return "movEq %b1,%0";
+ default:
+ return "BOGUS: %1 to %0";
+ }
+}
+ [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,no,yes,no,no,no,no,yes,yes")
+ (set_attr "cc" "*,*,none,none,*,none,*,clobber,*,none,none,*,none,none")])
+
+(define_insn "movstricthi"
+ [(set
+ (strict_low_part
+ (match_operand:HI 0 "nonimmediate_operand" "+r,r, r,Q>,Q>,g,r,g"))
+ (match_operand:HI 1 "general_operand" "r,Q>,M,M, r, M,g,r"))]
+ ""
+ "@
+ move.w %1,%0
+ move.w %1,%0
+ clear.w %0
+ clear.w %0
+ move.w %1,%0
+ clear.w %0
+ move.w %1,%0
+ move.w %1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,yes,no,no,no")])
+
+(define_expand "reload_in<mode>"
+ [(set (match_operand:BW 2 "register_operand" "=r")
+ (match_operand:BW 1 "memory_operand" "m"))
+ (set (match_operand:BW 0 "register_operand" "=x")
+ (match_dup 2))]
+ ""
+ "")
+
+(define_expand "reload_out<mode>"
+ [(set (match_operand:BW 2 "register_operand" "=&r")
+ (match_operand:BW 1 "register_operand" "x"))
+ (set (match_operand:BW 0 "memory_operand" "=m")
+ (match_dup 2))]
+ ""
+ "")
+
+(define_insn "movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,Q>,r, r,Q>,r,g,g,r,r,r,x")
+ (match_operand:QI 1 "general_operand" "r,r, Q>,M,M, I,M,r,O,g,x,r"))]
+ ""
+ "@
+ move.b %1,%0
+ move.b %1,%0
+ move.b %1,%0
+ clear.b %0
+ clear.b %0
+ moveq %1,%0
+ clear.b %0
+ move.b %1,%0
+ moveq %b1,%0
+ move.b %1,%0
+ move %1,%0
+ move %1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,no,no,yes,no,yes,yes")
+ (set_attr "cc" "*,*,*,*,*,*,*,*,clobber,*,none,none")])
+
+(define_insn "movstrictqi"
+ [(set (strict_low_part
+ (match_operand:QI 0 "nonimmediate_operand" "+r,Q>,r, r,Q>,g,g,r"))
+ (match_operand:QI 1 "general_operand" "r,r, Q>,M,M, M,r,g"))]
+ ""
+ "@
+ move.b %1,%0
+ move.b %1,%0
+ move.b %1,%0
+ clear.b %0
+ clear.b %0
+ clear.b %0
+ move.b %1,%0
+ move.b %1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,yes,no,no,no")])
+
+;; The valid "quick" bit-patterns are, except for 0.0, denormalized
+;; values REALLY close to 0, and some NaN:s (I think; their exponent is
+;; all ones); the worthwhile one is "0.0".
+;; It will use clear, so we know ALL types of immediate 0 never change cc.
+
+(define_insn "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,Q>,r, r,Q>,g,g,r,r,x,Q>,m,x, x")
+ (match_operand:SF 1 "general_operand" "r,r, Q>,G,G, G,r,g,x,r,x, x,Q>,g"))]
+ ""
+ "@
+ move.d %1,%0
+ move.d %1,%0
+ move.d %1,%0
+ clear.d %0
+ clear.d %0
+ clear.d %0
+ move.d %1,%0
+ move.d %1,%0
+ move %1,%0
+ move %1,%0
+ move %1,%0
+ move %1,%0
+ move %1,%0
+ move %1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,yes,no,no,no,yes,yes,yes,no,yes,no")])
+
+;; Movem patterns. Primarily for use in function prologue and epilogue.
+;; The V32 variants have an ordering matching the expectations of the
+;; standard names "load_multiple" and "store_multiple"; pre-v32 movem
+;; store R0 in the highest memory location.
+
+(define_expand "load_multiple"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "memory_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")]
+ "TARGET_V32"
+{
+ rtx indreg;
+
+ /* Apparently the predicate isn't checked, so we need to do so
+ manually. Once happened for libstdc++-v3 locale_facets.tcc. */
+ if (!MEM_P (operands[1]))
+ FAIL;
+
+ indreg = XEXP (operands[1], 0);
+
+ if (GET_CODE (indreg) == POST_INC)
+ indreg = XEXP (indreg, 0);
+ if (!REG_P (indreg)
+ || GET_CODE (operands[2]) != CONST_INT
+ || !REG_P (operands[0])
+ || REGNO (operands[0]) != 0
+ || INTVAL (operands[2]) > CRIS_SP_REGNUM
+ || (int) REGNO (indreg) < INTVAL (operands[2]))
+ FAIL;
+ gcc_unreachable ();
+ emit_insn (cris_gen_movem_load (operands[1], operands[2], 0));
+ DONE;
+})
+
+(define_expand "store_multiple"
+ [(match_operand:SI 0 "memory_operand" "")
+ (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")]
+ "TARGET_V32"
+{
+ rtx indreg;
+
+ /* See load_multiple. */
+ if (!MEM_P (operands[0]))
+ FAIL;
+
+ indreg = XEXP (operands[0], 0);
+
+ if (GET_CODE (indreg) == POST_INC)
+ indreg = XEXP (indreg, 0);
+ if (!REG_P (indreg)
+ || GET_CODE (operands[2]) != CONST_INT
+ || !REG_P (operands[1])
+ || REGNO (operands[1]) != 0
+ || INTVAL (operands[2]) > CRIS_SP_REGNUM
+ || (int) REGNO (indreg) < INTVAL (operands[2]))
+ FAIL;
+ gcc_unreachable ();
+ cris_emit_movem_store (operands[0], operands[2], 0, false);
+ DONE;
+})
+
+(define_insn "*cris_load_multiple"
+ [(match_parallel 0 "cris_load_multiple_op"
+ [(set (match_operand:SI 1 "register_operand" "=r,r")
+ (match_operand:SI 2 "memory_operand" "Q,m"))])]
+ ""
+ "movem %O0,%o0"
+ [(set_attr "cc" "none")
+ (set_attr "slottable" "yes,no")
+ ;; Not true, but setting the length to 0 causes return sequences (ret
+ ;; movem) to have the cost they had when (return) included the movem
+ ;; and reduces the performance penalty taken for needing to emit an
+ ;; epilogue (in turn copied by bb-reorder) instead of return patterns.
+ ;; FIXME: temporary change until all insn lengths are correctly
+ ;; described. FIXME: have better target control over bb-reorder.
+ (set_attr "length" "0")])
+
+(define_insn "*cris_store_multiple"
+ [(match_parallel 0 "cris_store_multiple_op"
+ [(set (match_operand:SI 2 "memory_operand" "=Q,m")
+ (match_operand:SI 1 "register_operand" "r,r"))])]
+ ""
+ "movem %o0,%O0"
+ [(set_attr "cc" "none")
+ (set_attr "slottable" "yes,no")])
+
+
+;; Sign- and zero-extend insns with standard names.
+;; Those for integer source operand are ordered with the widest source
+;; type first.
+
+;; Sign-extend.
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "general_operand" "g")))]
+ ""
+ "move.d %1,%M0\;smi %H0\;neg.d %H0,%H0")
+
+(define_insn "extend<mode>di2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:BW 1 "general_operand" "g")))]
+ ""
+ "movs<m> %1,%M0\;smi %H0\;neg.d %H0,%H0")
+
+(define_insn "extend<mode>si2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:BW 1 "general_operand" "r,Q>,g")))]
+ ""
+ "movs<m> %1,%0"
+ [(set_attr "slottable" "yes,yes,no")])
+
+;; To do a byte->word extension, extend to dword, except that the top half
+;; of the register will be clobbered. FIXME: Perhaps this is not needed.
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r")
+ (sign_extend:HI (match_operand:QI 1 "general_operand" "r,Q>,g")))]
+ ""
+ "movs.b %1,%0"
+ [(set_attr "slottable" "yes,yes,no")])
+
+
+;; Zero-extend. The DImode ones are synthesized by gcc, so we don't
+;; specify them here.
+
+(define_insn "zero_extend<mode>si2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (zero_extend:SI
+ (match_operand:BW 1 "nonimmediate_operand" "r,Q>,m")))]
+ ""
+ "movu<m> %1,%0"
+ [(set_attr "slottable" "yes,yes,no")])
+
+;; Same comment as sign-extend QImode to HImode above applies.
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r")
+ (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "r,Q>,m")))]
+ ""
+ "movu.b %1,%0"
+ [(set_attr "slottable" "yes,yes,no")])
+
+;; All kinds of arithmetic and logical instructions.
+;;
+;; First, anonymous patterns to match addressing modes with
+;; side-effects.
+;;
+;; op.S [rx=ry+I],rz; (add, sub, or, and, bound).
+;;
+;; [rx=ry+rz.S]
+
+(define_insn "*op_side<mode>_biap"
+ [(set (match_operand:BWD 0 "register_operand" "=r,r")
+ (match_operator:BWD
+ 6 "cris_orthogonal_operator"
+ [(match_operand:BWD 1 "register_operand" "0,0")
+ (mem:BWD (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "r,r")
+ (match_operand:SI 3 "const_int_operand" "n,n"))
+ (match_operand:SI 4 "register_operand" "r,r")))]))
+ (set (match_operand:SI 5 "register_operand" "=*4,r")
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 3))
+ (match_dup 4)))]
+ "cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)"
+ "@
+ #
+ %x6<m> [%5=%4+%2%T3],%0")
+
+;; [rx=ry+i] ([%4=%2+%3])
+
+(define_insn "*op_side<mode>"
+ [(set (match_operand:BWD 0 "register_operand" "=r,r,r,r,r")
+ (match_operator:BWD
+ 5 "cris_orthogonal_operator"
+ [(match_operand:BWD 1 "register_operand" "0,0,0,0,0")
+ (mem:BWD (plus:SI
+ (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R")
+ (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))]))
+ (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r")
+ (plus:SI (match_dup 2)
+ (match_dup 3)))]
+ "cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)"
+{
+ if ((which_alternative == 0 || which_alternative == 3)
+ && (!CONST_INT_P (operands[3])
+ || INTVAL (operands[3]) > 127
+ || INTVAL (operands[3]) < -128
+ || satisfies_constraint_N (operands[3])
+ || satisfies_constraint_J (operands[3])))
+ return "#";
+ if (which_alternative == 4)
+ return "%x5.%s0 [%4=%3%S2],%0";
+ return "%x5<m> [%4=%2%S3],%0";
+})
+
+;; To match all cases for commutative operations we may have to have the
+;; following pattern for add, or & and. I do not know really, but it does
+;; not break anything.
+;;
+;; FIXME: This really ought to be checked.
+;;
+;; op.S [rx=ry+I],rz;
+;;
+;; [rx=ry+rz.S]
+
+(define_insn "*op_swap_side<mode>_biap"
+ [(set (match_operand:BWD 0 "register_operand" "=r,r")
+ (match_operator:BWD
+ 6 "cris_commutative_orth_op"
+ [(mem:BWD (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "r,r")
+ (match_operand:SI 3 "const_int_operand" "n,n"))
+ (match_operand:SI 4 "register_operand" "r,r")))
+ (match_operand:BWD 1 "register_operand" "0,0")]))
+ (set (match_operand:SI 5 "register_operand" "=*4,r")
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 3))
+ (match_dup 4)))]
+ "cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)"
+ "@
+ #
+ %x6<m> [%5=%4+%2%T3],%0")
+
+;; [rx=ry+i] ([%4=%2+%3])
+;; FIXME: These could have anonymous mode for operand 0.
+
+;; QImode
+
+(define_insn "*op_swap_side<mode>"
+ [(set (match_operand:BWD 0 "register_operand" "=r,r,r,r,r")
+ (match_operator:BWD
+ 5 "cris_commutative_orth_op"
+ [(mem:BWD
+ (plus:SI (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R")
+ (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))
+ (match_operand:BWD 1 "register_operand" "0,0,0,0,0")]))
+ (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r")
+ (plus:SI (match_dup 2)
+ (match_dup 3)))]
+ "cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)"
+{
+ if ((which_alternative == 0 || which_alternative == 3)
+ && (!CONST_INT_P (operands[3])
+ || INTVAL (operands[3]) > 127
+ || INTVAL (operands[3]) < -128
+ || satisfies_constraint_N (operands[3])
+ || satisfies_constraint_J (operands[3])))
+ return "#";
+ if (which_alternative == 4)
+ return "%x5<m> [%4=%3%S2],%0";
+ return "%x5<m> [%4=%2%S3],%0";
+})
+
+;; Add operations, standard names.
+
+;; Note that for the 'P' constraint, the high part can be -1 or 0. We
+;; output the insn through the 'A' output modifier as "adds.w" and "addq",
+;; respectively.
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "register_operand")
+ (plus:DI (match_operand:DI 1 "register_operand")
+ (match_operand:DI 2 "general_operand")))]
+ ""
+{
+ if (MEM_P (operands[2]) && TARGET_V32)
+ operands[2] = force_reg (DImode, operands[2]);
+})
+
+(define_insn "*adddi3_non_v32"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,&r,&r")
+ (plus:DI (match_operand:DI 1 "register_operand" "%0,0,0,0,r")
+ (match_operand:DI 2 "general_operand" "J,N,P,g,!To")))]
+ "!TARGET_V32"
+ "@
+ addq %2,%M0\;ax\;addq 0,%H0
+ subq %n2,%M0\;ax\;subq 0,%H0
+ add%e2.%z2 %2,%M0\;ax\;%A2 %H2,%H0
+ add.d %M2,%M0\;ax\;add.d %H2,%H0
+ add.d %M2,%M1,%M0\;ax\;add.d %H2,%H1,%H0")
+
+; It seems no use allowing a memory operand for this one, because we'd
+; need a scratch register for incrementing the address.
+(define_insn "*adddi3_v32"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r")
+ (plus:DI (match_operand:DI 1 "register_operand" "%0,0,0,0,0")
+ (match_operand:DI 2 "nonmemory_operand" "J,N,P,r,n")))]
+ "TARGET_V32"
+ "@
+ addq %2,%M0\;addc 0,%H0
+ subq %n2,%M0\;ax\;subq 0,%H0
+ add%e2.%z2 %2,%M0\;addc %H2,%H0
+ add.d %M2,%M0\;addc %H2,%H0
+ add.d %M2,%M0\;addc %H2,%H0")
+
+(define_expand "add<mode>3"
+ [(set (match_operand:BWD 0 "register_operand")
+ (plus:BWD
+ (match_operand:BWD 1 "register_operand")
+ (match_operand:BWD 2 "general_operand")))]
+ ""
+ "")
+
+(define_insn "*addsi3_non_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r, r,r,r,r, r,r, r")
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "%0,0, 0,0,0,0, 0,r, r")
+ (match_operand:SI 2 "general_operand" "r,Q>,J,N,n,!S,g,!To,0")))]
+
+;; The last constraint is due to that after reload, the '%' is not
+;; honored, and canonicalization doesn't care about keeping the same
+;; register as in destination. This will happen after insn splitting.
+;; gcc <= 2.7.2. FIXME: Check for gcc-2.9x
+
+ "!TARGET_V32"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ return "add.d %2,%0";
+ case 2:
+ return "addq %2,%0";
+ case 3:
+ return "subq %n2,%0";
+ case 4:
+ /* 'Known value', but not in -63..63.
+ Check if addu/subu may be used. */
+ if (INTVAL (operands[2]) > 0)
+ {
+ if (INTVAL (operands[2]) < 256)
+ return "addu.b %2,%0";
+ if (INTVAL (operands[2]) < 65536)
+ return "addu.w %2,%0";
+ }
+ else
+ {
+ if (INTVAL (operands[2]) >= -255)
+ return "subu.b %n2,%0";
+ if (INTVAL (operands[2]) >= -65535)
+ return "subu.w %n2,%0";
+ }
+ return "add.d %2,%0";
+ case 5:
+ {
+ rtx tem = operands[2];
+ gcc_assert (GET_CODE (tem) == CONST);
+ tem = XEXP (tem, 0);
+ if (GET_CODE (tem) == PLUS
+ && GET_CODE (XEXP (tem, 0)) == UNSPEC
+ /* We don't allow CRIS_UNSPEC_PCREL here; we can't have a
+ pc-relative operand in an add insn. */
+ && XINT (XEXP (tem, 0), 1) == CRIS_UNSPEC_GOTREL
+ && CONST_INT_P (XEXP (tem, 1)))
+ tem = XEXP (tem, 0);
+ gcc_assert (GET_CODE (tem) == UNSPEC);
+ switch (XINT (tem, 1))
+ {
+ case CRIS_UNSPEC_GOTREAD:
+ case CRIS_UNSPEC_PLTGOTREAD:
+ /* Using sign-extend mostly to be consistent with the
+ indexed addressing mode. */
+ if (flag_pic == 1)
+ return "adds.w %2,%0";
+ return "add.d %2,%0";
+
+ case CRIS_UNSPEC_PLT_GOTREL:
+ case CRIS_UNSPEC_GOTREL:
+ return "add.d %2,%0";
+ default:
+ gcc_unreachable ();
+ }
+ }
+ case 6:
+ return "add%u2 %2,%0";
+ case 7:
+ return "add.d %2,%1,%0";
+ case 8:
+ return "add.d %1,%0";
+ default:
+ return "BOGUS addsi %2+%1 to %0";
+ }
+}
+ [(set_attr "slottable" "yes,yes,yes,yes,no,no,no,no,yes")])
+
+; FIXME: Check what's best: having the three-operand ACR alternative
+; before or after the corresponding-operand2 alternative. Check for
+; *all* insns. FIXME: constant constraint letter for -128..127.
+(define_insn "*addsi3_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,!a,r,!a, r,r,!a,r,!a,r,r,r,!a")
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "%0,r, 0, r, 0,0,r, 0,r, 0,0,0,r")
+ (match_operand:SI 2 "general_operand" "r, r, Q>,Q>,J,N,NJ,L,L, P,n,g,g")))]
+ "TARGET_V32"
+ "@
+ add.d %2,%0
+ addi %2.b,%1,%0
+ add.d %2,%0
+ addo.d %2,%1,%0
+ addq %2,%0
+ subq %n2,%0
+ addoq %2,%1,%0
+ adds.w %2,%0
+ addo %2,%1,%0
+ addu.w %2,%0
+ add.d %2,%0
+ add%u2 %2,%0
+ addo.%Z2 %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,yes,no,no,no,no,no,no")
+ (set_attr "cc" "*,none,*,none,*,*,none,*,none,*,*,*,none")])
+
+(define_insn "*addhi3_non_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r,r, r,r,r,r")
+ (plus:HI (match_operand:HI 1 "register_operand" "%0,0, 0,0,0,r")
+ (match_operand:HI 2 "general_operand" "r,Q>,J,N,g,!To")))]
+ "!TARGET_V32"
+ "@
+ add.w %2,%0
+ add.w %2,%0
+ addq %2,%0
+ subq %n2,%0
+ add.w %2,%0
+ add.w %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,no,no")
+ (set_attr "cc" "normal,normal,clobber,clobber,normal,normal")])
+
+(define_insn "*addhi3_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r, !a,r,!a, r,r,!a,r,!a")
+ (plus:HI
+ (match_operand:HI 1 "register_operand" "%0,r, 0, r, 0,0,r, 0,r")
+ (match_operand:HI 2 "general_operand" "r, r, Q>,Q>,J,N,NJ,g,g")))]
+ "TARGET_V32"
+ "@
+ add.w %2,%0
+ addi %2.b,%1,%0
+ add.w %2,%0
+ addo.w %2,%1,%0
+ addq %2,%0
+ subq %n2,%0
+ addoq %2,%1,%0
+ add.w %2,%0
+ addo.w %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,yes,no,no")
+ (set_attr "cc" "*,none,*,none,clobber,clobber,none,*,none")])
+
+(define_insn "*addqi3_non_v32"
+ [(set (match_operand:QI 0 "register_operand" "=r,r, r,r,r,r,r")
+ (plus:QI (match_operand:QI 1 "register_operand" "%0,0, 0,0,0,0,r")
+ (match_operand:QI 2 "general_operand" "r,Q>,J,N,O,g,!To")))]
+ "!TARGET_V32"
+ "@
+ add.b %2,%0
+ add.b %2,%0
+ addq %2,%0
+ subq %n2,%0
+ subQ -%b2,%0
+ add.b %2,%0
+ add.b %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,yes,no,no")
+ (set_attr "cc" "normal,normal,clobber,clobber,clobber,normal,normal")])
+
+(define_insn "*addqi3_v32"
+ [(set (match_operand:QI 0 "register_operand" "=r,!a,r,!a, r,r,!a,r,r,!a")
+ (plus:QI
+ (match_operand:QI 1 "register_operand" "%0,r, 0, r, 0,0,r, 0,0,r")
+ (match_operand:QI 2 "general_operand" "r,r, Q>,Q>,J,N,NJ,O,g,g")))]
+ "TARGET_V32"
+ "@
+ add.b %2,%0
+ addi %2.b,%1,%0
+ add.b %2,%0
+ addo.b %2,%1,%0
+ addq %2,%0
+ subq %n2,%0
+ addoq %2,%1,%0
+ subQ -%b2,%0
+ add.b %2,%0
+ addo.b %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,yes,yes,no,no")
+ (set_attr "cc" "*,none,*,none,clobber,clobber,none,clobber,*,none")])
+
+;; Subtract.
+;;
+;; Note that because of insn canonicalization these will *seldom* but
+;; rarely be used with a known constant as an operand.
+
+;; Note that for the 'P' constraint, the high part can be -1 or 0. We
+;; output the insn through the 'D' output modifier as "subs.w" and "subq",
+;; respectively.
+(define_expand "subdi3"
+ [(set (match_operand:DI 0 "register_operand")
+ (minus:DI (match_operand:DI 1 "register_operand")
+ (match_operand:DI 2 "general_operand")))]
+ ""
+{
+ if (TARGET_V32 && MEM_P (operands[2]))
+ operands[2] = force_reg (DImode, operands[2]);
+})
+
+(define_insn "*subdi3_non_v32"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,&r,&r")
+ (minus:DI (match_operand:DI 1 "register_operand" "0,0,0,0,r")
+ (match_operand:DI 2 "general_operand" "J,N,P,g,!To")))]
+ "!TARGET_V32"
+ "@
+ subq %2,%M0\;ax\;subq 0,%H0
+ addq %n2,%M0\;ax\;addq 0,%H0
+ sub%e2.%z2 %2,%M0\;ax\;%D2 %H2,%H0
+ sub.d %M2,%M0\;ax\;sub.d %H2,%H0
+ sub.d %M2,%M1,%M0\;ax\;sub.d %H2,%H1,%H0")
+
+(define_insn "*subdi3_v32"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,&r")
+ (minus:DI (match_operand:DI 1 "register_operand" "0,0,0,0")
+ (match_operand:DI 2 "nonmemory_operand" "J,N,P,r")))]
+ "TARGET_V32"
+ "@
+ subq %2,%M0\;ax\;subq 0,%H0
+ addq %n2,%M0\;ax\;addq 0,%H0
+ sub%e2.%z2 %2,%M0\;ax\;%D2 %H2,%H0
+ sub.d %M2,%M0\;ax\;sub.d %H2,%H0")
+
+(define_expand "sub<mode>3"
+ [(set (match_operand:BWD 0 "register_operand")
+ (minus:BWD
+ (match_operand:BWD 1 "register_operand")
+ (match_operand:BWD 2 "general_operand")))]
+ ""
+ "")
+
+(define_insn "*subsi3_non_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r, r,r,r,r,r,r")
+ (minus:SI
+ (match_operand:SI 1 "register_operand" "0,0, 0,0,0,0,0,r")
+ (match_operand:SI 2 "general_operand" "r,Q>,J,N,P,n,g,!To")))]
+ "!TARGET_V32"
+
+;; This does not do the optimal: "addu.w 65535,r0" when %2 is negative.
+;; But then again, %2 should not be negative.
+
+ "@
+ sub.d %2,%0
+ sub.d %2,%0
+ subq %2,%0
+ addq %n2,%0
+ sub%e2.%z2 %2,%0
+ sub.d %2,%0
+ sub.d %2,%0
+ sub.d %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,no,no,no,no")])
+
+(define_insn "*subsi3_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (minus:SI
+ (match_operand:SI 1 "register_operand" "0,0,0,0,0,0,0")
+ (match_operand:SI 2 "general_operand" "r,Q>,J,N,P,n,g")))]
+ "TARGET_V32"
+ "@
+ sub.d %2,%0
+ sub.d %2,%0
+ subq %2,%0
+ addq %n2,%0
+ sub%e2.%z2 %2,%0
+ sub.d %2,%0
+ sub.d %2,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,no,no,no")])
+
+(define_insn "*sub<mode>3_nonv32"
+ [(set (match_operand:BW 0 "register_operand" "=r,r, r,r,r,r")
+ (minus:BW (match_operand:BW 1 "register_operand" "0,0, 0,0,0,r")
+ (match_operand:BW 2 "general_operand" "r,Q>,J,N,g,!To")))]
+ "!TARGET_V32"
+ "@
+ sub<m> %2,%0
+ sub<m> %2,%0
+ subq %2,%0
+ addq %n2,%0
+ sub<m> %2,%0
+ sub<m> %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,no,no")
+ (set_attr "cc" "normal,normal,clobber,clobber,normal,normal")])
+
+(define_insn "*sub<mode>3_v32"
+ [(set (match_operand:BW 0 "register_operand" "=r,r,r,r,r")
+ (minus:BW (match_operand:BW 1 "register_operand" "0,0,0,0,0")
+ (match_operand:BW 2 "general_operand" "r,Q>,J,N,g")))]
+ "TARGET_V32"
+ "@
+ sub<m> %2,%0
+ sub<m> %2,%0
+ subq %2,%0
+ addq %n2,%0
+ sub<m> %2,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,no")
+ (set_attr "cc" "normal,normal,clobber,clobber,normal")])
+
+;; CRIS has some add/sub-with-sign/zero-extend instructions.
+;; Although these perform sign/zero-extension to SImode, they are
+;; equally applicable for the HImode case.
+;; FIXME: Check; GCC should handle the widening.
+;; Note that these must be located after the normal add/sub patterns,
+;; so not to get constants into any less specific operands.
+;;
+;; Extend with add/sub and side-effect.
+;;
+;; ADDS/SUBS/ADDU/SUBU and BOUND, which needs a check for zero_extend
+;;
+;; adds/subs/addu/subu bound [rx=ry+rz.S]
+
+;; QImode to HImode
+;; FIXME: GCC should widen.
+
+(define_insn "*extopqihi_side_biap"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (match_operator:HI
+ 6 "cris_additive_operand_extend_operator"
+ [(match_operand:HI 1 "register_operand" "0,0")
+ (match_operator:HI
+ 7 "cris_extend_operator"
+ [(mem:QI (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "r,r")
+ (match_operand:SI 3 "const_int_operand" "n,n"))
+ (match_operand:SI 4 "register_operand" "r,r")))])]))
+ (set (match_operand:SI 5 "register_operand" "=*4,r")
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 3))
+ (match_dup 4)))]
+ "cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)"
+ "@
+ #
+ %x6%e7.%m7 [%5=%4+%2%T3],%0")
+
+(define_insn "*extop<mode>si_side_biap"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (match_operator:SI
+ 6 "cris_operand_extend_operator"
+ [(match_operand:SI 1 "register_operand" "0,0")
+ (match_operator:SI
+ 7 "cris_extend_operator"
+ [(mem:BW (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "r,r")
+ (match_operand:SI 3 "const_int_operand" "n,n"))
+ (match_operand:SI 4 "register_operand" "r,r")))])]))
+ (set (match_operand:SI 5 "register_operand" "=*4,r")
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 3))
+ (match_dup 4)))]
+ "(GET_CODE (operands[6]) != UMIN || GET_CODE (operands[7]) == ZERO_EXTEND)
+ && cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)"
+ "@
+ #
+ %x6%e7<m> [%5=%4+%2%T3],%0")
+
+
+;; [rx=ry+i]
+
+;; QImode to HImode
+
+(define_insn "*extopqihi_side"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
+ (match_operator:HI
+ 5 "cris_additive_operand_extend_operator"
+ [(match_operand:HI 1 "register_operand" "0,0,0,0,0")
+ (match_operator:HI
+ 6 "cris_extend_operator"
+ [(mem:QI
+ (plus:SI (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R")
+ (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")
+ ))])]))
+ (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r")
+ (plus:SI (match_dup 2)
+ (match_dup 3)))]
+ "cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)"
+{
+ if ((which_alternative == 0 || which_alternative == 3)
+ && (!CONST_INT_P (operands[3])
+ || INTVAL (operands[3]) > 127
+ || INTVAL (operands[3]) < -128
+ || satisfies_constraint_N (operands[3])
+ || satisfies_constraint_J (operands[3])))
+ return "#";
+ if (which_alternative == 4)
+ return "%x5%E6.%m6 [%4=%3%S2],%0";
+ return "%x5%E6.%m6 [%4=%2%S3],%0";
+})
+
+(define_insn "*extop<mode>si_side"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r")
+ (match_operator:SI
+ 5 "cris_operand_extend_operator"
+ [(match_operand:SI 1 "register_operand" "0,0,0,0,0")
+ (match_operator:SI
+ 6 "cris_extend_operator"
+ [(mem:BW
+ (plus:SI (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R")
+ (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")
+ ))])]))
+ (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r")
+ (plus:SI (match_dup 2)
+ (match_dup 3)))]
+ "(GET_CODE (operands[5]) != UMIN || GET_CODE (operands[6]) == ZERO_EXTEND)
+ && cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)"
+{
+ if ((which_alternative == 0 || which_alternative == 3)
+ && (!CONST_INT_P (operands[3])
+ || INTVAL (operands[3]) > 127
+ || INTVAL (operands[3]) < -128
+ || satisfies_constraint_N (operands[3])
+ || satisfies_constraint_J (operands[3])))
+ return "#";
+ if (which_alternative == 4)
+ return "%x5%E6<m> [%4=%3%S2],%0";
+ return "%x5%E6<m> [%4=%2%S3],%0";
+})
+
+
+;; As with op.S we may have to add special pattern to match commuted
+;; operands to adds/addu and bound
+;;
+;; adds/addu/bound [rx=ry+rz.S]
+
+;; QImode to HImode
+;; FIXME: GCC should widen.
+
+(define_insn "*extopqihi_swap_side_biap"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (plus:HI
+ (match_operator:HI
+ 6 "cris_extend_operator"
+ [(mem:QI (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "r,r")
+ (match_operand:SI 3 "const_int_operand" "n,n"))
+ (match_operand:SI 4 "register_operand" "r,r")))])
+ (match_operand:HI 1 "register_operand" "0,0")))
+ (set (match_operand:SI 5 "register_operand" "=*4,r")
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 3))
+ (match_dup 4)))]
+ "cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)"
+ "@
+ #
+ add%e6.b [%5=%4+%2%T3],%0")
+
+(define_insn "*extop<mode>si_swap_side_biap"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (match_operator:SI
+ 7 "cris_plus_or_bound_operator"
+ [(match_operator:SI
+ 6 "cris_extend_operator"
+ [(mem:BW (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "r,r")
+ (match_operand:SI 3 "const_int_operand" "n,n"))
+ (match_operand:SI 4 "register_operand" "r,r")))])
+ (match_operand:SI 1 "register_operand" "0,0")]))
+ (set (match_operand:SI 5 "register_operand" "=*4,r")
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 3))
+ (match_dup 4)))]
+ "(GET_CODE (operands[7]) != UMIN || GET_CODE (operands[6]) == ZERO_EXTEND)
+ && cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)"
+ "@
+ #
+ %x7%E6<m> [%5=%4+%2%T3],%0")
+
+;; [rx=ry+i]
+;; FIXME: GCC should widen.
+
+;; QImode to HImode
+
+(define_insn "*extopqihi_swap_side"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
+ (plus:HI
+ (match_operator:HI
+ 5 "cris_extend_operator"
+ [(mem:QI (plus:SI
+ (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R")
+ (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))])
+ (match_operand:HI 1 "register_operand" "0,0,0,0,0")))
+ (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r")
+ (plus:SI (match_dup 2)
+ (match_dup 3)))]
+ "cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)"
+{
+ if ((which_alternative == 0 || which_alternative == 3)
+ && (!CONST_INT_P (operands[3])
+ || INTVAL (operands[3]) > 127
+ || INTVAL (operands[3]) < -128
+ || satisfies_constraint_N (operands[3])
+ || satisfies_constraint_J (operands[3])))
+ return "#";
+ if (which_alternative == 4)
+ return "add%e5.b [%4=%3%S2],%0";
+ return "add%e5.b [%4=%2%S3],%0";
+})
+
+(define_insn "*extop<mode>si_swap_side"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r")
+ (match_operator:SI
+ 6 "cris_plus_or_bound_operator"
+ [(match_operator:SI
+ 5 "cris_extend_operator"
+ [(mem:BW (plus:SI
+ (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R")
+ (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))])
+ (match_operand:SI 1 "register_operand" "0,0,0,0,0")]))
+ (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r")
+ (plus:SI (match_dup 2)
+ (match_dup 3)))]
+ "(GET_CODE (operands[6]) != UMIN || GET_CODE (operands[5]) == ZERO_EXTEND)
+ && cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)"
+{
+ if ((which_alternative == 0 || which_alternative == 3)
+ && (!CONST_INT_P (operands[3])
+ || INTVAL (operands[3]) > 127
+ || INTVAL (operands[3]) < -128
+ || satisfies_constraint_N (operands[3])
+ || satisfies_constraint_J (operands[3])))
+ return "#";
+ if (which_alternative == 4)
+ return \"%x6%E5.%m5 [%4=%3%S2],%0\";
+ return "%x6%E5<m> [%4=%2%S3],%0";
+})
+
+;; Extend versions (zero/sign) of normal add/sub (no side-effects).
+
+;; QImode to HImode
+;; FIXME: GCC should widen.
+
+(define_insn "*extopqihi_non_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
+ (match_operator:HI
+ 3 "cris_additive_operand_extend_operator"
+ [(match_operand:HI 1 "register_operand" "0,0,0,r")
+ (match_operator:HI
+ 4 "cris_extend_operator"
+ [(match_operand:QI 2 "nonimmediate_operand" "r,Q>,m,!To")])]))]
+ "!TARGET_V32 && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && (operands[1] != frame_pointer_rtx || GET_CODE (operands[3]) != PLUS)"
+ "@
+ %x3%E4.%m4 %2,%0
+ %x3%E4.%m4 %2,%0
+ %x3%E4.%m4 %2,%0
+ %x3%E4.%m4 %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,no,no")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*extopqihi_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (match_operator:HI
+ 3 "cris_additive_operand_extend_operator"
+ [(match_operand:HI 1 "register_operand" "0,0")
+ (match_operator:HI
+ 4 "cris_extend_operator"
+ [(match_operand:QI 2 "nonimmediate_operand" "r,m")])]))]
+ "TARGET_V32"
+ "%x3%e4.%m4 %2,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "clobber")])
+
+;; QImode to SImode
+
+(define_insn "*extop<mode>si_non_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (match_operator:SI
+ 3 "cris_operand_extend_operator"
+ [(match_operand:SI 1 "register_operand" "0,0,0,r")
+ (match_operator:SI
+ 4 "cris_extend_operator"
+ [(match_operand:BW 2 "nonimmediate_operand" "r,Q>,m,!To")])]))]
+ "!TARGET_V32
+ && (GET_CODE (operands[3]) != UMIN || GET_CODE (operands[4]) == ZERO_EXTEND)
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && (operands[1] != frame_pointer_rtx || GET_CODE (operands[3]) != PLUS)"
+ "@
+ %x3%E4<m> %2,%0
+ %x3%E4<m> %2,%0
+ %x3%E4<m> %2,%0
+ %x3%E4<m> %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,no,no")])
+
+(define_insn "*extop<mode>si_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (match_operator:SI
+ 3 "cris_additive_operand_extend_operator"
+ [(match_operand:SI 1 "register_operand" "0,0")
+ (match_operator:SI
+ 4 "cris_extend_operator"
+ [(match_operand:BW 2 "nonimmediate_operand" "r,m")])]))]
+ "TARGET_V32"
+ "%x3%e4.%m4 %2,%0"
+ [(set_attr "slottable" "yes")])
+
+;; As with the side-effect patterns, may have to have swapped operands for add.
+;; For commutative operands, these are the canonical forms.
+
+;; QImode to HImode
+
+(define_insn "*addxqihi_swap_non_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
+ (plus:HI
+ (match_operator:HI
+ 3 "cris_extend_operator"
+ [(match_operand:QI 2 "nonimmediate_operand" "r,Q>,m,!To")])
+ (match_operand:HI 1 "register_operand" "0,0,0,r")))]
+ "!TARGET_V32 && operands[1] != frame_pointer_rtx"
+ "@
+ add%e3.b %2,%0
+ add%e3.b %2,%0
+ add%e3.b %2,%0
+ add%e3.b %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,no,no")
+ (set_attr "cc" "clobber")])
+
+;; A case for v32, to catch the "addo" insn in addition to "adds". We
+;; only care to match the canonical form; there should be no other.
+
+(define_insn "*addsbw_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,!a")
+ (plus:HI
+ (sign_extend:HI
+ (match_operand:QI 2 "nonimmediate_operand" "r,m,m"))
+ (match_operand:HI 1 "register_operand" "0,0,r")))]
+ "TARGET_V32"
+ "@
+ adds.b %2,%0
+ adds.b %2,%0
+ addo.b %2,%1,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "clobber,clobber,none")])
+
+(define_insn "*addubw_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (plus:HI
+ (zero_extend:HI
+ (match_operand:QI 2 "nonimmediate_operand" "r,m"))
+ (match_operand:HI 1 "register_operand" "0,0")))]
+ "TARGET_V32"
+ "addu.b %2,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*extop<mode>si_swap_non_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (match_operator:SI
+ 4 "cris_plus_or_bound_operator"
+ [(match_operator:SI
+ 3 "cris_extend_operator"
+ [(match_operand:BW 2 "nonimmediate_operand" "r,Q>,m,!To")])
+ (match_operand:SI 1 "register_operand" "0,0,0,r")]))]
+ "!TARGET_V32
+ && (GET_CODE (operands[4]) != UMIN || GET_CODE (operands[3]) == ZERO_EXTEND)
+ && operands[1] != frame_pointer_rtx"
+ "@
+ %x4%E3<m> %2,%0
+ %x4%E3<m> %2,%0
+ %x4%E3<m> %2,%0
+ %x4%E3<m> %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,no,no")])
+
+(define_insn "*adds<mode>_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,!a")
+ (plus:SI
+ (sign_extend:SI
+ (match_operand:BW 2 "nonimmediate_operand" "r,m,m"))
+ (match_operand:SI 1 "register_operand" "0,0,r")))]
+ "TARGET_V32"
+ "@
+ adds<m> %2,%0
+ adds<m> %2,%0
+ addo<m> %2,%1,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "*,*,none")])
+
+(define_insn "*addu<mode>_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI
+ (zero_extend:SI
+ (match_operand:BW 2 "nonimmediate_operand" "r,m"))
+ (match_operand:SI 1 "register_operand" "0,0")))]
+ "TARGET_V32 && operands[1] != frame_pointer_rtx"
+ "addu<m> %2,%0"
+ [(set_attr "slottable" "yes")])
+
+(define_insn "*bound<mode>_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (umin:SI
+ (zero_extend:SI
+ (match_operand:BW 2 "register_operand" "r"))
+ (match_operand:SI 1 "register_operand" "0")))]
+ "TARGET_V32 && operands[1] != frame_pointer_rtx"
+ "bound<m> %2,%0"
+ [(set_attr "slottable" "yes")])
+
+;; This is the special case when we use what corresponds to the
+;; instruction above in "casesi". Do *not* change it to use the generic
+;; pattern and "REG 15" as pc; I did that and it led to madness and
+;; maintenance problems: Instead of (as imagined) recognizing and removing
+;; or replacing this pattern with something simpler, other variant
+;; patterns were recognized or combined, including some prefix variants
+;; where the value in pc is not that of the next instruction (which means
+;; this instruction actually *is* special and *should* be marked as such).
+;; When switching from the "generic pattern match" approach to this simpler
+;; approach, there were insignificant differences in gcc, ipps and
+;; product code, somehow due to scratching reload behind the ear or
+;; something. Testcase "gcc" looked .01% slower and 4 bytes bigger;
+;; product code became .001% smaller but "looked better". The testcase
+;; "ipps" was just different at register allocation).
+;;
+;; Assumptions in the jump optimizer forces us to use IF_THEN_ELSE in this
+;; pattern with the default-label as the else, with the "if" being
+;; index-is-less-than the max number of cases plus one. The default-label
+;; is attached to the end of the case-table at time of output.
+
+(define_insn "*casesi_adds_w"
+ [(set (pc)
+ (if_then_else
+ (ltu (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "n"))
+ (plus:SI (sign_extend:SI
+ (mem:HI
+ (plus:SI (mult:SI (match_dup 0) (const_int 2))
+ (pc))))
+ (pc))
+ (label_ref (match_operand 2 "" ""))))
+ (use (label_ref (match_operand 3 "" "")))]
+ "!TARGET_V32 && operands[0] != frame_pointer_rtx"
+ "adds.w [$pc+%0.w],$pc"
+ [(set_attr "cc" "clobber")])
+
+;; For V32, we just have a jump, but we need to mark the table as used,
+;; and the jump insn must have the if_then_else form expected by core
+;; GCC. Since we don't want to prolong the lifetime of the original
+;; index value, we compare against "unspec 0". It's a pity we have to
+;; jump through to get the default label in place and to keep the jump
+;; table around. FIXME: Look into it some time.
+
+(define_insn "*casesi_jump_v32"
+ [(set (pc)
+ (if_then_else
+ (ltu (unspec [(const_int 0)] CRIS_UNSPEC_CASESI)
+ (match_operand:SI 0 "const_int_operand" "n"))
+ (match_operand:SI 1 "register_operand" "r")
+ (label_ref (match_operand 2 "" ""))))
+ (use (label_ref (match_operand 3 "" "")))]
+ "TARGET_V32"
+ "jump %1%#"
+ [(set_attr "cc" "clobber")
+ (set_attr "slottable" "has_slot")])
+
+;; Multiply instructions.
+
+;; Sometimes powers of 2 (which are normally canonicalized to a
+;; left-shift) appear here, as a result of address reloading.
+;; As a special, for values 3 and 5, we can match with an addi, so add those.
+;;
+;; FIXME: This may be unnecessary now.
+;; Explicitly named for convenience of having a gen_... function.
+
+(define_insn "addi_mul"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI
+ (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "operands[0] != frame_pointer_rtx
+ && operands[1] != frame_pointer_rtx
+ && CONST_INT_P (operands[2])
+ && (INTVAL (operands[2]) == 2
+ || INTVAL (operands[2]) == 4 || INTVAL (operands[2]) == 3
+ || INTVAL (operands[2]) == 5)"
+{
+ if (INTVAL (operands[2]) == 2)
+ return "lslq 1,%0";
+ else if (INTVAL (operands[2]) == 4)
+ return "lslq 2,%0";
+ else if (INTVAL (operands[2]) == 3)
+ return "addi %0.w,%0";
+ else if (INTVAL (operands[2]) == 5)
+ return "addi %0.d,%0";
+ return "BAD: adr_mulsi: %0=%1*%2";
+}
+[(set_attr "slottable" "yes")
+ ;; No flags are changed if this insn is "addi", but it does not seem
+ ;; worth the trouble to distinguish that to the lslq cases.
+ (set_attr "cc" "clobber")])
+
+;; The addi insn as it is normally used.
+
+;; Make the ACR alternative taste bad enough to not choose it as a
+;; preference to avoid spilling problems (unwind-dw2-fde.c at build).
+;; FIXME: Revisit for new register allocator.
+
+(define_insn "*addi"
+ [(set (match_operand:SI 0 "register_operand" "=r,!a")
+ (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "r,r")
+ (match_operand:SI 3 "const_int_operand" "n,n"))
+ (match_operand:SI 1 "register_operand" "0,r")))]
+ "operands[0] != frame_pointer_rtx
+ && operands[1] != frame_pointer_rtx
+ && CONST_INT_P (operands[3])
+ && (INTVAL (operands[3]) == 1
+ || INTVAL (operands[3]) == 2 || INTVAL (operands[3]) == 4)"
+ "@
+ addi %2%T3,%0
+ addi %2%T3,%1,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "none")])
+
+;; The mstep instruction. Probably not useful by itself; it's to
+;; non-linear wrt. the other insns. We used to expand to it, so at least
+;; it's correct.
+
+(define_insn "mstep_shift"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (lt:SI (cc0) (const_int 0))
+ (plus:SI (ashift:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 1))
+ (match_operand:SI 2 "register_operand" "r"))
+ (ashift:SI (match_operand:SI 3 "register_operand" "0")
+ (const_int 1))))]
+ "!TARGET_V32"
+ "mstep %2,%0"
+ [(set_attr "slottable" "yes")])
+
+;; When illegitimate addresses are legitimized, sometimes gcc forgets
+;; to canonicalize the multiplications.
+;;
+;; FIXME: Check gcc > 2.7.2, remove and possibly fix in gcc.
+
+(define_insn "mstep_mul"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (lt:SI (cc0) (const_int 0))
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 2))
+ (match_operand:SI 2 "register_operand" "r"))
+ (mult:SI (match_operand:SI 3 "register_operand" "0")
+ (const_int 2))))]
+ "!TARGET_V32
+ && operands[0] != frame_pointer_rtx
+ && operands[1] != frame_pointer_rtx
+ && operands[2] != frame_pointer_rtx
+ && operands[3] != frame_pointer_rtx"
+ "mstep %2,%0"
+ [(set_attr "slottable" "yes")])
+
+(define_insn "<u>mul<s><mode>3"
+ [(set (match_operand:WD 0 "register_operand" "=r")
+ (mult:WD
+ (szext:WD (match_operand:<S> 1 "register_operand" "%0"))
+ (szext:WD (match_operand:<S> 2 "register_operand" "r"))))
+ (clobber (match_scratch:SI 3 "=h"))]
+ "TARGET_HAS_MUL_INSNS"
+ "%!mul<su><mm> %2,%0"
+ [(set (attr "slottable")
+ (if_then_else (match_test "TARGET_MUL_BUG")
+ (const_string "no")
+ (const_string "yes")))
+ ;; For umuls.[bwd] it's just N unusable here, but let's be safe.
+ ;; For muls.b, this really extends to SImode, so cc should be
+ ;; considered clobbered.
+ ;; For muls.w, it's just N unusable here, but let's be safe.
+ (set_attr "cc" "clobber")])
+
+;; Note that gcc does not make use of such a thing as umulqisi3. It gets
+;; confused and will erroneously use it instead of umulhisi3, failing (at
+;; least) gcc.c-torture/execute/arith-rand.c at all optimization levels.
+;; Inspection of optab code shows that there must be only one widening
+;; multiplication per mode widened to.
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (match_scratch:SI 3 "=h"))]
+ "TARGET_HAS_MUL_INSNS"
+ "%!muls.d %2,%0"
+ [(set (attr "slottable")
+ (if_then_else (match_test "TARGET_MUL_BUG")
+ (const_string "no")
+ (const_string "yes")))
+ ;; Just N unusable here, but let's be safe.
+ (set_attr "cc" "clobber")])
+
+;; A few multiply variations.
+
+;; When needed, we can get the high 32 bits from the overflow
+;; register. We don't care to split and optimize these.
+;;
+;; Note that cc0 is still valid after the move-from-overflow-register
+;; insn; no special precaution need to be taken in cris_notice_update_cc.
+
+(define_insn "<u>mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI
+ (szext:DI (match_operand:SI 1 "register_operand" "%0"))
+ (szext:DI (match_operand:SI 2 "register_operand" "r"))))
+ (clobber (match_scratch:SI 3 "=h"))]
+ "TARGET_HAS_MUL_INSNS"
+ "%!mul<su>.d %2,%M0\;move $mof,%H0")
+
+;; These two patterns may be expressible by other means, perhaps by making
+;; [u]?mulsidi3 a define_expand.
+
+;; Due to register allocation braindamage, the clobber 1,2 alternatives
+;; cause a move into the clobbered register *before* the insn, then
+;; after the insn, mof is moved too, rather than the clobber assigned
+;; the last mof target. This became apparent when making MOF and SRP
+;; visible registers, with the necessary tweak to smulsi3_highpart.
+;; Because these patterns are used in division by constants, that damage
+;; is visible (ipps regression tests). Therefore the last two
+;; alternatives, "helping" reload to avoid an unnecessary move, but
+;; punished by force of one "?". Check code from "int d (int a) {return
+;; a / 1000;}" and unsigned. FIXME: Comment above was for 3.2, revisit.
+
+(define_insn "<su>mulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "=h,h,?r,?r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI
+ (szext:DI (match_operand:SI 1 "register_operand" "r,r,0,r"))
+ (szext:DI (match_operand:SI 2 "register_operand" "r,r,r,0")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=1,2,h,h"))]
+ "TARGET_HAS_MUL_INSNS"
+ "@
+ %!mul<su>.d %2,%1
+ %!mul<su>.d %1,%2
+ %!mul<su>.d %2,%1\;move $mof,%0
+ %!mul<su>.d %1,%2\;move $mof,%0"
+ [(set_attr "slottable" "yes,yes,no,no")
+ (set_attr "cc" "clobber")])
+
+;; Divide and modulus instructions. CRIS only has a step instruction.
+
+(define_insn "dstep_shift"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (geu:SI (ashift:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 1))
+ (match_operand:SI 2 "register_operand" "r"))
+ (minus:SI (ashift:SI (match_operand:SI 3 "register_operand" "0")
+ (const_int 1))
+ (match_operand:SI 4 "register_operand" "2"))
+ (ashift:SI (match_operand:SI 5 "register_operand" "0")
+ (const_int 1))))]
+ ""
+ "dstep %2,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+;; Here's a variant with mult instead of ashift.
+;;
+;; FIXME: This should be investigated. Which one matches through combination?
+
+(define_insn "dstep_mul"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (geu:SI (mult:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 2))
+ (match_operand:SI 2 "register_operand" "r"))
+ (minus:SI (mult:SI (match_operand:SI 3 "register_operand" "0")
+ (const_int 2))
+ (match_operand:SI 4 "register_operand" "2"))
+ (mult:SI (match_operand:SI 5 "register_operand" "0")
+ (const_int 2))))]
+ "operands[0] != frame_pointer_rtx
+ && operands[1] != frame_pointer_rtx
+ && operands[2] != frame_pointer_rtx
+ && operands[3] != frame_pointer_rtx"
+ "dstep %2,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+;; Logical operators.
+
+;; Bitwise "and".
+
+;; There is no use in defining "anddi3", because gcc can expand this by
+;; itself, and make reasonable code without interference.
+
+;; If the first operand is memory or a register and is the same as the
+;; second operand, and the third operand is -256 or -65536, we can use
+;; CLEAR instead. Or, if the first operand is a register, and the third
+;; operand is 255 or 65535, we can zero_extend.
+;; GCC isn't smart enough to recognize these cases (yet), and they seem
+;; to be common enough to be worthwhile.
+;; FIXME: This should be made obsolete.
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (and:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ""
+{
+ if (! (CONST_INT_P (operands[2])
+ && (((INTVAL (operands[2]) == -256
+ || INTVAL (operands[2]) == -65536)
+ && rtx_equal_p (operands[1], operands[0]))
+ || ((INTVAL (operands[2]) == 255
+ || INTVAL (operands[2]) == 65535)
+ && REG_P (operands[0])))))
+ {
+ /* Make intermediate steps if operand0 is not a register or
+ operand1 is not a register, and hope that the reload pass will
+ make something useful out of it. Note that the operands are
+ *not* canonicalized. For the moment, I chicken out on this,
+ because all or most ports do not describe 'and' with
+ canonicalized operands, and I seem to remember magic in reload,
+ checking that operand1 has constraint '%0', in which case
+ operand0 and operand1 must have similar predicates.
+ FIXME: Investigate. */
+ rtx reg0 = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (SImode);
+ rtx reg1 = operands[1];
+
+ if (! REG_P (reg1))
+ {
+ emit_move_insn (reg0, reg1);
+ reg1 = reg0;
+ }
+
+ emit_insn (gen_rtx_SET (SImode, reg0,
+ gen_rtx_AND (SImode, reg1, operands[2])));
+
+ /* Make sure we get the right *final* destination. */
+ if (! REG_P (operands[0]))
+ emit_move_insn (operands[0], reg0);
+
+ DONE;
+ }
+})
+
+;; Some special cases of andsi3.
+
+(define_insn "*andsi_movu"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (and:SI (match_operand:SI 1 "nonimmediate_operand" "%r,Q,To")
+ (match_operand:SI 2 "const_int_operand" "n,n,n")))]
+ "(INTVAL (operands[2]) == 255 || INTVAL (operands[2]) == 65535)
+ && !side_effects_p (operands[1])"
+ "movu.%z2 %1,%0"
+ [(set_attr "slottable" "yes,yes,no")])
+
+(define_insn "*andsi_clear"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,Q,Q,To,To")
+ (and:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0,0,0,0,0")
+ (match_operand:SI 2 "const_int_operand" "P,n,P,n,P,n")))]
+ "(INTVAL (operands[2]) == -65536 || INTVAL (operands[2]) == -256)
+ && !side_effects_p (operands[0])"
+ "@
+ cLear.b %0
+ cLear.w %0
+ cLear.b %0
+ cLear.w %0
+ cLear.b %0
+ cLear.w %0"
+ [(set_attr "slottable" "yes,yes,yes,yes,no,no")
+ (set_attr "cc" "none")])
+
+;; This is a catch-all pattern, taking care of everything that was not
+;; matched in the insns above.
+;;
+;; Sidenote: the tightening from "nonimmediate_operand" to
+;; "register_operand" for operand 1 actually increased the register
+;; pressure (worse code). That will hopefully change with an
+;; improved reload pass.
+
+(define_insn "*expanded_andsi_non_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r, r,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0,0, 0,r")
+ (match_operand:SI 2 "general_operand" "I,r,Q>,g,!To")))]
+ "!TARGET_V32"
+ "@
+ andq %2,%0
+ and.d %2,%0
+ and.d %2,%0
+ and.d %2,%0
+ and.d %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,no,no")])
+
+(define_insn "*expanded_andsi_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0,0,0")
+ (match_operand:SI 2 "general_operand" "I,r,Q>,g")))]
+ "TARGET_V32"
+ "@
+ andq %2,%0
+ and.d %2,%0
+ and.d %2,%0
+ and.d %2,%0"
+ [(set_attr "slottable" "yes,yes,yes,no")
+ (set_attr "cc" "noov32")])
+
+;; For both QI and HI we may use the quick patterns. This results in
+;; useless condition codes, but that is used rarely enough for it to
+;; normally be a win (could check ahead for use of cc0, but seems to be
+;; more pain than win).
+
+;; FIXME: See note for andsi3
+
+(define_expand "andhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (and:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+{
+ if (! (CONST_INT_P (operands[2])
+ && (((INTVAL (operands[2]) == -256
+ || INTVAL (operands[2]) == 65280)
+ && rtx_equal_p (operands[1], operands[0]))
+ || (INTVAL (operands[2]) == 255
+ && REG_P (operands[0])))))
+ {
+ /* See comment for andsi3. */
+ rtx reg0 = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (HImode);
+ rtx reg1 = operands[1];
+
+ if (! REG_P (reg1))
+ {
+ emit_move_insn (reg0, reg1);
+ reg1 = reg0;
+ }
+
+ emit_insn (gen_rtx_SET (HImode, reg0,
+ gen_rtx_AND (HImode, reg1, operands[2])));
+
+ /* Make sure we get the right destination. */
+ if (! REG_P (operands[0]))
+ emit_move_insn (operands[0], reg0);
+
+ DONE;
+ }
+})
+
+;; Some fast andhi3 special cases.
+
+(define_insn "*andhi_movu"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r")
+ (and:HI (match_operand:HI 1 "nonimmediate_operand" "r,Q,To")
+ (const_int 255)))]
+ "!side_effects_p (operands[1])"
+ "mOvu.b %1,%0"
+ [(set_attr "slottable" "yes,yes,no")])
+
+(define_insn "*andhi_clear"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,Q,To")
+ (and:HI (match_operand:HI 1 "nonimmediate_operand" "0,0,0")
+ (const_int -256)))]
+ "!side_effects_p (operands[0])"
+ "cLear.b %0"
+ [(set_attr "slottable" "yes,yes,no")
+ (set_attr "cc" "none")])
+
+;; Catch-all andhi3 pattern.
+
+(define_insn "*expanded_andhi_non_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r, r,r,r,r")
+ (and:HI (match_operand:HI 1 "register_operand" "%0,0,0, 0,0,0,r")
+ (match_operand:HI 2 "general_operand" "I,r,Q>,L,O,g,!To")))]
+
+;; Sidenote: the tightening from "general_operand" to
+;; "register_operand" for operand 1 actually increased the register
+;; pressure (worse code). That will hopefully change with an
+;; improved reload pass.
+
+ "!TARGET_V32"
+ "@
+ andq %2,%0
+ and.w %2,%0
+ and.w %2,%0
+ and.w %2,%0
+ anDq %b2,%0
+ and.w %2,%0
+ and.w %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,no,yes,no,no")
+ (set_attr "cc" "clobber,normal,normal,normal,clobber,normal,normal")])
+
+(define_insn "*expanded_andhi_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r")
+ (and:HI (match_operand:HI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:HI 2 "general_operand" "I,r,Q>,L,O,g")))]
+ "TARGET_V32"
+ "@
+ andq %2,%0
+ and.w %2,%0
+ and.w %2,%0
+ and.w %2,%0
+ anDq %b2,%0
+ and.w %2,%0"
+ [(set_attr "slottable" "yes,yes,yes,no,yes,no")
+ (set_attr "cc" "clobber,noov32,noov32,noov32,clobber,noov32")])
+
+;; A strict_low_part pattern.
+
+;; Note the use of (match_dup 0) for the first operand of the operation
+;; here. Reload can't handle an operand pair where one is read-write
+;; and must match a read, like in:
+;; (insn 80 79 81 4
+;; (set (strict_low_part
+;; (subreg:QI (reg/v:SI 0 r0 [orig:36 data ] [36]) 0))
+;; (and:QI
+;; (subreg:QI (reg:SI 15 acr [orig:27 D.7531 ] [27]) 0)
+;; (const_int -64 [0xf..fc0]))) x.c:126 147 {*andqi_lowpart_v32}
+;; (nil))
+;; In theory, it could reload this as a movstrictqi of the register
+;; operand at the and:QI to the destination register and change the
+;; and:QI operand to the same as the read-write output operand and the
+;; result would be recognized, but it doesn't recognize that's a valid
+;; reload for a strict_low_part-destination; it just sees a "+" at the
+;; destination constraints. Better than adding complexity to reload is
+;; to follow the lead of m68k (see comment that begins with "These insns
+;; must use MATCH_DUP") since prehistoric times and make it just a
+;; match_dup. FIXME: a sanity-check in gen* to refuse an insn with
+;; input-constraints matching input-output-constraints, e.g. "+r" <- "0".
+
+(define_insn "*andhi_lowpart_non_v32"
+ [(set (strict_low_part
+ (match_operand:HI 0 "register_operand" "+r,r,r"))
+ (and:HI (match_dup 0)
+ (match_operand:HI 1 "general_operand" "r,Q>,g")))]
+ "!TARGET_V32"
+ "@
+ and.w %1,%0
+ and.w %1,%0
+ and.w %1,%0"
+ [(set_attr "slottable" "yes,yes,no")])
+
+(define_insn "*andhi_lowpart_v32"
+ [(set (strict_low_part
+ (match_operand:HI 0 "register_operand" "+r,r,r"))
+ (and:HI (match_dup 0)
+ (match_operand:HI 1 "general_operand" "r,Q>,g")))]
+ "TARGET_V32"
+ "@
+ and.w %1,%0
+ and.w %1,%0
+ and.w %1,%0"
+ [(set_attr "slottable" "yes,yes,no")
+ (set_attr "cc" "noov32")])
+
+(define_expand "andqi3"
+ [(set (match_operand:QI 0 "register_operand")
+ (and:QI (match_operand:QI 1 "register_operand")
+ (match_operand:QI 2 "general_operand")))]
+ ""
+ "")
+
+(define_insn "*andqi3_non_v32"
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r, r,r,r")
+ (and:QI (match_operand:QI 1 "register_operand" "%0,0,0, 0,0,r")
+ (match_operand:QI 2 "general_operand" "I,r,Q>,O,g,!To")))]
+ "!TARGET_V32"
+ "@
+ andq %2,%0
+ and.b %2,%0
+ and.b %2,%0
+ andQ %b2,%0
+ and.b %2,%0
+ and.b %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,no,no")
+ (set_attr "cc" "clobber,normal,normal,clobber,normal,normal")])
+
+(define_insn "*andqi3_v32"
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,r")
+ (and:QI (match_operand:QI 1 "register_operand" "%0,0,0,0,0")
+ (match_operand:QI 2 "general_operand" "I,r,Q>,O,g")))]
+ "TARGET_V32"
+ "@
+ andq %2,%0
+ and.b %2,%0
+ and.b %2,%0
+ andQ %b2,%0
+ and.b %2,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,no")
+ (set_attr "cc" "clobber,noov32,noov32,clobber,noov32")])
+
+(define_insn "*andqi_lowpart_non_v32"
+ [(set (strict_low_part
+ (match_operand:QI 0 "register_operand" "+r,r,r"))
+ (and:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "r,Q>,g")))]
+ "!TARGET_V32"
+ "@
+ and.b %1,%0
+ and.b %1,%0
+ and.b %1,%0"
+ [(set_attr "slottable" "yes,yes,no")])
+
+(define_insn "*andqi_lowpart_v32"
+ [(set (strict_low_part
+ (match_operand:QI 0 "register_operand" "+r,r,r"))
+ (and:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "r,Q>,g")))]
+ "TARGET_V32"
+ "@
+ and.b %1,%0
+ and.b %1,%0
+ and.b %1,%0"
+ [(set_attr "slottable" "yes,yes,no")
+ (set_attr "cc" "noov32")])
+
+;; Bitwise or.
+
+;; Same comment as anddi3 applies here - no need for such a pattern.
+
+;; It seems there's no need to jump through hoops to get good code such as
+;; with andsi3.
+
+(define_expand "ior<mode>3"
+ [(set (match_operand:BWD 0 "register_operand")
+ (ior:BWD (match_operand:BWD 1 "register_operand")
+ (match_operand:BWD 2 "general_operand")))]
+ ""
+ "")
+
+(define_insn "*iorsi3_non_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r, r,r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0,0, 0,0,r")
+ (match_operand:SI 2 "general_operand" "I, r,Q>,n,g,!To")))]
+ "!TARGET_V32"
+ "@
+ orq %2,%0
+ or.d %2,%0
+ or.d %2,%0
+ oR.%s2 %2,%0
+ or.d %2,%0
+ or.d %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,no,no,no")
+ (set_attr "cc" "normal,normal,normal,clobber,normal,normal")])
+
+(define_insn "*iorsi3_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0")
+ (match_operand:SI 2 "general_operand" "I,r,Q>,n,g")))]
+ "TARGET_V32"
+ "@
+ orq %2,%0
+ or.d %2,%0
+ or.d %2,%0
+ oR.%s2 %2,%0
+ or.d %2,%0"
+ [(set_attr "slottable" "yes,yes,yes,no,no")
+ (set_attr "cc" "noov32,noov32,noov32,clobber,noov32")])
+
+(define_insn "*iorhi3_non_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r, r,r,r,r")
+ (ior:HI (match_operand:HI 1 "register_operand" "%0,0,0, 0,0,0,r")
+ (match_operand:HI 2 "general_operand" "I,r,Q>,L,O,g,!To")))]
+ "!TARGET_V32"
+ "@
+ orq %2,%0
+ or.w %2,%0
+ or.w %2,%0
+ or.w %2,%0
+ oRq %b2,%0
+ or.w %2,%0
+ or.w %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,no,yes,no,no")
+ (set_attr "cc" "clobber,normal,normal,normal,clobber,normal,normal")])
+
+(define_insn "*iorhi3_v32"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r")
+ (ior:HI (match_operand:HI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:HI 2 "general_operand" "I,r,Q>,L,O,g")))]
+ "TARGET_V32"
+ "@
+ orq %2,%0
+ or.w %2,%0
+ or.w %2,%0
+ or.w %2,%0
+ oRq %b2,%0
+ or.w %2,%0"
+ [(set_attr "slottable" "yes,yes,yes,no,yes,no")
+ (set_attr "cc" "clobber,noov32,noov32,noov32,clobber,noov32")])
+
+(define_insn "*iorqi3_non_v32"
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r, r,r,r")
+ (ior:QI (match_operand:QI 1 "register_operand" "%0,0,0, 0,0,r")
+ (match_operand:QI 2 "general_operand" "I,r,Q>,O,g,!To")))]
+ "!TARGET_V32"
+ "@
+ orq %2,%0
+ or.b %2,%0
+ or.b %2,%0
+ orQ %b2,%0
+ or.b %2,%0
+ or.b %2,%1,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,no,no")
+ (set_attr "cc" "clobber,normal,normal,clobber,normal,normal")])
+
+(define_insn "*iorqi3_v32"
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,r")
+ (ior:QI (match_operand:QI 1 "register_operand" "%0,0,0,0,0")
+ (match_operand:QI 2 "general_operand" "I,r,Q>,O,g")))]
+ "TARGET_V32"
+ "@
+ orq %2,%0
+ or.b %2,%0
+ or.b %2,%0
+ orQ %b2,%0
+ or.b %2,%0"
+ [(set_attr "slottable" "yes,yes,yes,yes,no")
+ (set_attr "cc" "clobber,noov32,noov32,clobber,noov32")])
+
+;; Exclusive-or
+
+;; See comment about "anddi3" for xordi3 - no need for such a pattern.
+;; FIXME: Do we really need the shorter variants?
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "xor %2,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+(define_insn "xor<mode>3"
+ [(set (match_operand:BW 0 "register_operand" "=r")
+ (xor:BW (match_operand:BW 1 "register_operand" "%0")
+ (match_operand:BW 2 "register_operand" "r")))]
+ ""
+ "xor %2,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "clobber")])
+
+;; Negation insns.
+
+;; Questionable use, here mostly as a (slightly usable) define_expand
+;; example.
+
+(define_expand "negsf2"
+ [(set (match_dup 2)
+ (match_dup 3))
+ (parallel [(set (match_operand:SF 0 "register_operand" "=r")
+ (neg:SF (match_operand:SF 1
+ "register_operand" "0")))
+ (use (match_dup 2))])]
+ ""
+{
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = GEN_INT (1 << 31);
+})
+
+(define_insn "*expanded_negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (neg:SF (match_operand:SF 1 "register_operand" "0")))
+ (use (match_operand:SI 2 "register_operand" "r"))]
+ ""
+ "xor %2,%0"
+ [(set_attr "slottable" "yes")])
+
+;; No "negdi2" although we could make one up that may be faster than
+;; the one in libgcc.
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:BWD 0 "register_operand" "=r")
+ (neg:BWD (match_operand:BWD 1 "register_operand" "r")))]
+ ""
+ "neg<m> %1,%0"
+ [(set_attr "slottable" "yes")])
+
+;; One-complements.
+
+;; See comment on anddi3 - no need for a DImode pattern.
+;; See also xor comment.
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "register_operand" "0")))]
+ ""
+ "not %0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:BW 0 "register_operand" "=r")
+ (not:BW (match_operand:BW 1 "register_operand" "0")))]
+ ""
+ "not %0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "clobber")])
+
+;; Arithmetic/Logical shift right (and SI left).
+
+(define_insn "<shlr>si3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (shift:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "nonmemory_operand" "Kcr")))]
+ ""
+{
+ if (REG_S_P (operands[2]))
+ return "<slr>.d %2,%0";
+
+ return "<slr>q %2,%0";
+}
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+;; Since gcc gets lost, and forgets to zero-extend the source (or mask
+;; the destination) when it changes shifts of lower modes into SImode,
+;; it is better to make these expands an anonymous patterns instead of
+;; the more correct define_insns. This occurs when gcc thinks that is
+;; is better to widen to SImode and use immediate shift count.
+
+;; FIXME: Is this legacy or still true for gcc >= 2.7.2?
+
+;; FIXME: Can't parametrize sign_extend and zero_extend (before
+;; mentioning "shiftrt"), so we need two patterns.
+(define_expand "ashr<mode>3"
+ [(set (match_dup 3)
+ (sign_extend:SI (match_operand:BW 1 "nonimmediate_operand" "")))
+ (set (match_dup 4)
+ (zero_extend:SI (match_operand:BW 2 "nonimmediate_operand" "")))
+ (set (match_dup 5) (ashiftrt:SI (match_dup 3) (match_dup 4)))
+ (set (match_operand:BW 0 "general_operand" "")
+ (subreg:BW (match_dup 5) 0))]
+ ""
+{
+ int i;
+
+ for (i = 3; i < 6; i++)
+ operands[i] = gen_reg_rtx (SImode);
+})
+
+(define_expand "lshr<mode>3"
+ [(set (match_dup 3)
+ (zero_extend:SI (match_operand:BW 1 "nonimmediate_operand" "")))
+ (set (match_dup 4)
+ (zero_extend:SI (match_operand:BW 2 "nonimmediate_operand" "")))
+ (set (match_dup 5) (lshiftrt:SI (match_dup 3) (match_dup 4)))
+ (set (match_operand:BW 0 "general_operand" "")
+ (subreg:BW (match_dup 5) 0))]
+ ""
+{
+ int i;
+
+ for (i = 3; i < 6; i++)
+ operands[i] = gen_reg_rtx (SImode);
+})
+
+(define_insn "*expanded_<shlr><mode>"
+ [(set (match_operand:BW 0 "register_operand" "=r")
+ (shiftrt:BW (match_operand:BW 1 "register_operand" "0")
+ (match_operand:BW 2 "register_operand" "r")))]
+ ""
+ "<slr><m> %2,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+(define_insn "*<shlr><mode>_lowpart"
+ [(set (strict_low_part (match_operand:BW 0 "register_operand" "+r"))
+ (shiftrt:BW (match_dup 0)
+ (match_operand:BW 1 "register_operand" "r")))]
+ ""
+ "<slr><m> %1,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+;; Arithmetic/logical shift left.
+
+;; For narrower modes than SI, we can use lslq although it makes cc
+;; unusable. The win is that we do not have to reload the shift-count
+;; into a register.
+
+(define_insn "ashl<mode>3"
+ [(set (match_operand:BW 0 "register_operand" "=r,r")
+ (ashift:BW (match_operand:BW 1 "register_operand" "0,0")
+ (match_operand:BW 2 "nonmemory_operand" "r,Kc")))]
+ ""
+{
+ return
+ (CONST_INT_P (operands[2]) && INTVAL (operands[2]) > <nbitsm1>)
+ ? "moveq 0,%0"
+ : (CONSTANT_P (operands[2])
+ ? "lslq %2,%0" : "lsl<m> %2,%0");
+}
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32,clobber")])
+
+;; A strict_low_part matcher.
+
+(define_insn "*ashl<mode>_lowpart"
+ [(set (strict_low_part (match_operand:BW 0 "register_operand" "+r"))
+ (ashift:BW (match_dup 0)
+ (match_operand:HI 1 "register_operand" "r")))]
+ ""
+ "lsl<m> %1,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+;; Various strange insns that gcc likes.
+
+;; Fortunately, it is simple to construct an abssf (although it may not
+;; be very much used in practice).
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (abs:SF (match_operand:SF 1 "register_operand" "0")))]
+ ""
+ "lslq 1,%0\;lsrq 1,%0")
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (abs:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "abs %1,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+;; FIXME: GCC should be able to do these expansions itself.
+
+(define_expand "abs<mode>2"
+ [(set (match_dup 2)
+ (sign_extend:SI (match_operand:BW 1 "general_operand" "")))
+ (set (match_dup 3) (abs:SI (match_dup 2)))
+ (set (match_operand:BW 0 "register_operand" "")
+ (subreg:BW (match_dup 3) 0))]
+ ""
+ "operands[2] = gen_reg_rtx (SImode); operands[3] = gen_reg_rtx (SImode);")
+
+(define_insn "clzsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (clz:SI (match_operand:SI 1 "register_operand" "r")))]
+ "TARGET_HAS_LZ"
+ "lz %1,%0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+(define_insn "bswapsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (bswap:SI (match_operand:SI 1 "register_operand" "0")))]
+ "TARGET_HAS_SWAP"
+ "swapwb %0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+;; This instruction swaps all bits in a register.
+;; That means that the most significant bit is put in the place
+;; of the least significant bit, and so on.
+
+(define_insn "cris_swap_bits"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "0")]
+ CRIS_UNSPEC_SWAP_BITS))]
+ "TARGET_HAS_SWAP"
+ "swapwbr %0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "noov32")])
+
+;; Implement ctz using two instructions, one for bit swap and one for clz.
+;; Defines a scratch register to avoid clobbering input.
+
+(define_expand "ctzsi2"
+ [(set (match_dup 2)
+ (match_operand:SI 1 "register_operand"))
+ (set (match_dup 2)
+ (unspec:SI [(match_dup 2)] CRIS_UNSPEC_SWAP_BITS))
+ (set (match_operand:SI 0 "register_operand")
+ (clz:SI (match_dup 2)))]
+ "TARGET_HAS_LZ && TARGET_HAS_SWAP"
+ "operands[2] = gen_reg_rtx (SImode);")
+
+;; Bound-insn. Defined to be the same as an unsigned minimum, which is an
+;; operation supported by gcc. Used in casesi, but used now and then in
+;; normal code too.
+
+(define_expand "uminsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (umin:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ""
+{
+ if (MEM_P (operands[2]) && TARGET_V32)
+ operands[2] = force_reg (SImode, operands[2]);
+})
+
+(define_insn "*uminsi3_non_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r, r,r")
+ (umin:SI (match_operand:SI 1 "register_operand" "%0,0, 0,r")
+ (match_operand:SI 2 "general_operand" "r,Q>,g,!To")))]
+ "!TARGET_V32"
+{
+ if (CONST_INT_P (operands[2]))
+ {
+ /* Constant operands are zero-extended, so only 32-bit operands
+ may be negative. */
+ if (INTVAL (operands[2]) >= 0)
+ {
+ if (INTVAL (operands[2]) < 256)
+ return "bound.b %2,%0";
+
+ if (INTVAL (operands[2]) < 65536)
+ return "bound.w %2,%0";
+ }
+ }
+ else if (which_alternative == 3)
+ return "bound.d %2,%1,%0";
+
+ return "bound.d %2,%0";
+}
+ [(set_attr "slottable" "yes,yes,no,no")])
+
+(define_insn "*uminsi3_v32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (umin:SI (match_operand:SI 1 "register_operand" "%0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,i")))]
+ "TARGET_V32"
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ /* Constant operands are zero-extended, so only 32-bit operands
+ may be negative. */
+ if (INTVAL (operands[2]) >= 0)
+ {
+ if (INTVAL (operands[2]) < 256)
+ return "bound.b %2,%0";
+
+ if (INTVAL (operands[2]) < 65536)
+ return "bound.w %2,%0";
+ }
+ }
+
+ return "bound.d %2,%0";
+}
+ [(set_attr "slottable" "yes,no")])
+
+;; Jump and branch insns.
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "ba %l0%#"
+ [(set_attr "slottable" "has_slot")])
+
+;; Testcase gcc.c-torture/compile/991213-3.c fails if we allow a constant
+;; here, since the insn is not recognized as an indirect jump by
+;; jmp_uses_reg_or_mem used by computed_jump_p. Perhaps it is a kludge to
+;; change from general_operand to nonimmediate_operand (at least the docs
+;; should be changed), but then again the pattern is called indirect_jump.
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "nonimmediate_operand"))]
+ ""
+{
+ if (TARGET_V32 && MEM_P (operands[0]))
+ operands[0] = force_reg (SImode, operands[0]);
+})
+
+(define_insn "*indirect_jump_non_v32"
+ [(set (pc) (match_operand:SI 0 "nonimmediate_operand" "rm"))]
+ "!TARGET_V32"
+ "jump %0")
+
+(define_insn "*indirect_jump_v32"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
+ "TARGET_V32"
+ "jump %0%#"
+ [(set_attr "slottable" "has_slot")])
+
+;; Return insn. Used whenever the epilogue is very simple; if it is only
+;; a single ret or jump [sp+]. No allocated stack space or saved
+;; registers are allowed.
+;; Note that for this pattern, although named, it is ok to check the
+;; context of the insn in the test, not only compiler switches.
+
+(define_expand "return"
+ [(return)]
+ "cris_simple_epilogue ()"
+ "cris_expand_return (cris_return_address_on_stack ()); DONE;")
+
+(define_insn "*return_expanded"
+ [(return)]
+ ""
+{
+ return cris_return_address_on_stack_for_return ()
+ ? "jump [$sp+]" : "ret%#";
+}
+ [(set (attr "slottable")
+ (if_then_else
+ (match_test "cris_return_address_on_stack_for_return ()")
+ (const_string "no")
+ (const_string "has_return_slot")))])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ "TARGET_PROLOGUE_EPILOGUE"
+ "cris_expand_prologue (); DONE;")
+
+;; Note that the (return) from the expander itself is always the last
+;; insn in the epilogue.
+(define_expand "epilogue"
+ [(const_int 0)]
+ "TARGET_PROLOGUE_EPILOGUE"
+ "cris_expand_epilogue (); DONE;")
+
+;; Conditional branches.
+
+(define_expand "cbranch<mode>4"
+ [(set (cc0) (compare
+ (match_operand:BWD 1 "nonimmediate_operand")
+ (match_operand:BWD 2 "general_operand")))
+ (set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "cbranchdi4"
+ [(set (cc0)
+ (compare (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "general_operand" "")))
+ (set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+{
+ if (TARGET_V32 && !REG_P (operands[1]))
+ operands[1] = force_reg (DImode, operands[1]);
+ if (TARGET_V32 && MEM_P (operands[2]))
+ operands[2] = force_reg (DImode, operands[2]);
+})
+
+
+;; We suffer from the same overflow-bit-gets-in-the-way problem as
+;; e.g. m68k, so we have to check if overflow bit is set on all "signed"
+;; conditions.
+
+(define_insn "b<ncond:code>"
+ [(set (pc)
+ (if_then_else (ncond (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "b<CC> %l0%#"
+ [(set_attr "slottable" "has_slot")])
+
+(define_insn "b<ocond:code>"
+ [(set (pc)
+ (if_then_else (ocond (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ return
+ (cc_prev_status.flags & CC_NO_OVERFLOW)
+ ? 0 : "b<CC> %l0%#";
+}
+ [(set_attr "slottable" "has_slot")])
+
+(define_insn "b<rcond:code>"
+ [(set (pc)
+ (if_then_else (rcond (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ return
+ (cc_prev_status.flags & CC_NO_OVERFLOW)
+ ? "b<oCC> %l0%#" : "b<CC> %l0%#";
+}
+ [(set_attr "slottable" "has_slot")])
+
+;; Reversed anonymous patterns to the ones above, as mandated.
+
+(define_insn "*b<ncond:code>_reversed"
+ [(set (pc)
+ (if_then_else (ncond (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "b<rCC> %l0%#"
+ [(set_attr "slottable" "has_slot")])
+
+(define_insn "*b<ocond:code>_reversed"
+ [(set (pc)
+ (if_then_else (ocond (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ return
+ (cc_prev_status.flags & CC_NO_OVERFLOW)
+ ? 0 : "b<rCC> %l0%#";
+}
+ [(set_attr "slottable" "has_slot")])
+
+(define_insn "*b<rcond:code>_reversed"
+ [(set (pc)
+ (if_then_else (rcond (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+{
+ return
+ (cc_prev_status.flags & CC_NO_OVERFLOW)
+ ? "b<roCC> %l0%#" : "b<rCC> %l0%#";
+}
+ [(set_attr "slottable" "has_slot")])
+
+;; Set on condition: sCC.
+
+(define_expand "cstoredi4"
+ [(set (cc0) (compare
+ (match_operand:DI 2 "nonimmediate_operand")
+ (match_operand:DI 3 "general_operand")))
+ (set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "ordered_comparison_operator"
+ [(cc0) (const_int 0)]))]
+ ""
+{
+ if (TARGET_V32 && !REG_P (operands[2]))
+ operands[2] = force_reg (DImode, operands[2]);
+ if (TARGET_V32 && MEM_P (operands[3]))
+ operands[3] = force_reg (DImode, operands[3]);
+})
+
+(define_expand "cstore<mode>4"
+ [(set (cc0) (compare
+ (match_operand:BWD 2 "nonimmediate_operand")
+ (match_operand:BWD 3 "general_operand")))
+ (set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "ordered_comparison_operator"
+ [(cc0) (const_int 0)]))]
+ ""
+ "")
+
+;; Like bCC, we have to check the overflow bit for
+;; signed conditions.
+
+(define_insn "s<ncond:code>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ncond:SI (cc0) (const_int 0)))]
+ ""
+ "s<CC> %0"
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "none")])
+
+(define_insn "s<rcond:code>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rcond:SI (cc0) (const_int 0)))]
+ ""
+{
+ return
+ (cc_prev_status.flags & CC_NO_OVERFLOW)
+ ? "s<oCC> %0" : "s<CC> %0";
+}
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "none")])
+
+(define_insn "s<ocond:code>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ocond:SI (cc0) (const_int 0)))]
+ ""
+{
+ return
+ (cc_prev_status.flags & CC_NO_OVERFLOW)
+ ? 0 : "s<CC> %0";
+}
+ [(set_attr "slottable" "yes")
+ (set_attr "cc" "none")])
+
+;; Call insns.
+
+;; We need to make these patterns "expand", since the real operand is
+;; hidden in a (mem:QI ) inside operand[0] (call_value: operand[1]),
+;; and cannot be checked if it were a "normal" pattern.
+;; Note that "call" and "call_value" are *always* called with a
+;; mem-operand for operand 0 and 1 respective. What happens for combined
+;; instructions is a different issue.
+
+(define_expand "call"
+ [(parallel [(call (match_operand:QI 0 "cris_mem_call_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (clobber (reg:SI CRIS_SRP_REGNUM))])]
+ ""
+{
+ gcc_assert (MEM_P (operands[0]));
+ if (flag_pic)
+ cris_expand_pic_call_address (&operands[0]);
+})
+
+;; Accept *anything* as operand 1. Accept operands for operand 0 in
+;; order of preference.
+
+(define_insn "*expanded_call_non_v32"
+ [(call (mem:QI (match_operand:SI 0 "general_operand" "r,Q>,g"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI CRIS_SRP_REGNUM))]
+ "!TARGET_V32"
+ "jsr %0")
+
+(define_insn "*expanded_call_v32"
+ [(call
+ (mem:QI
+ (match_operand:SI 0 "cris_nonmemory_operand_or_callable_symbol" "n,r,U,i"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI CRIS_SRP_REGNUM))]
+ "TARGET_V32"
+ "@
+ jsr %0%#
+ jsr %0%#
+ bsr %0%#
+ bsr %0%#"
+ [(set_attr "slottable" "has_call_slot")])
+
+;; Parallel when calculating and reusing address of indirect pointer
+;; with simple offset. (Makes most sense with PIC.) It looks a bit
+;; wrong not to have the clobber last, but that's the way combine
+;; generates it (except it doesn' look into the *inner* mem, so this
+;; just matches a peephole2). FIXME: investigate that.
+(define_insn "*expanded_call_side"
+ [(call (mem:QI
+ (mem:SI
+ (plus:SI (match_operand:SI 0 "cris_bdap_operand" "%r, r,r")
+ (match_operand:SI 1 "cris_bdap_operand" "r>Rn,r,>Rn"))))
+ (match_operand 2 "" ""))
+ (clobber (reg:SI CRIS_SRP_REGNUM))
+ (set (match_operand:SI 3 "register_operand" "=*0,r,r")
+ (plus:SI (match_dup 0)
+ (match_dup 1)))]
+ "!TARGET_AVOID_GOTPLT && !TARGET_V32"
+ "jsr [%3=%0%S1]")
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "cris_mem_call_operand" "")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI CRIS_SRP_REGNUM))])]
+ ""
+{
+ gcc_assert (MEM_P (operands[1]));
+ if (flag_pic)
+ cris_expand_pic_call_address (&operands[1]);
+})
+
+;; Accept *anything* as operand 2. The validity other than "general" of
+;; operand 0 will be checked elsewhere. Accept operands for operand 1 in
+;; order of preference (Q includes r, but r is shorter, faster).
+;; We also accept a PLT symbol. We output it as [rPIC+sym:GOTPLT] rather
+;; than requiring getting rPIC + sym:PLT into a register.
+
+(define_insn "*expanded_call_value_non_v32"
+ [(set (match_operand 0 "nonimmediate_operand" "=g,g,g")
+ (call (mem:QI (match_operand:SI 1 "general_operand" "r,Q>,g"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI CRIS_SRP_REGNUM))]
+ "!TARGET_V32"
+ "Jsr %1"
+ [(set_attr "cc" "clobber")])
+
+;; See similar call special-case.
+(define_insn "*expanded_call_value_side"
+ [(set (match_operand 0 "nonimmediate_operand" "=g,g,g")
+ (call
+ (mem:QI
+ (mem:SI
+ (plus:SI (match_operand:SI 1 "cris_bdap_operand" "%r, r,r")
+ (match_operand:SI 2 "cris_bdap_operand" "r>Rn,r,>Rn"))))
+ (match_operand 3 "" "")))
+ (clobber (reg:SI CRIS_SRP_REGNUM))
+ (set (match_operand:SI 4 "register_operand" "=*1,r,r")
+ (plus:SI (match_dup 1)
+ (match_dup 2)))]
+ "!TARGET_AVOID_GOTPLT && !TARGET_V32"
+ "Jsr [%4=%1%S2]"
+ [(set_attr "cc" "clobber")])
+
+(define_insn "*expanded_call_value_v32"
+ [(set
+ (match_operand 0 "nonimmediate_operand" "=g,g,g,g")
+ (call
+ (mem:QI
+ (match_operand:SI 1 "cris_nonmemory_operand_or_callable_symbol" "n,r,U,i"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 16))]
+ "TARGET_V32"
+ "@
+ Jsr %1%#
+ Jsr %1%#
+ Bsr %1%#
+ Bsr %1%#"
+ [(set_attr "cc" "clobber")
+ (set_attr "slottable" "has_call_slot")])
+
+;; Used in debugging. No use for the direct pattern; unfilled
+;; delayed-branches are taken care of by other means.
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "cc" "none")])
+
+;; Same as the gdb trap breakpoint, will cause a SIGTRAP for
+;; cris-linux* and crisv32-linux*, as intended. Will work in
+;; freestanding environments with sufficient framework.
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 8))]
+ "TARGET_TRAP_USING_BREAK8"
+ "break 8")
+
+;; We need to stop accesses to the stack after the memory is
+;; deallocated. Unfortunately, reorg doesn't look at naked clobbers,
+;; e.g. (insn ... (clobber (mem:BLK (stack_pointer_rtx)))) and we don't
+;; want to use a naked (unspec_volatile) as that would stop any
+;; scheduling in the epilogue. Hence we model it as a "real" insn that
+;; sets the memory in an unspecified manner. FIXME: Unfortunately it
+;; still has the effect of an unspec_volatile.
+(define_insn "cris_frame_deallocated_barrier"
+ [(set (mem:BLK (reg:SI CRIS_SP_REGNUM))
+ (unspec:BLK [(const_int 0)] CRIS_UNSPEC_FRAME_DEALLOC))]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+;; We expand on casesi so we can use "bound" and "add offset fetched from
+;; a table to pc" (adds.w [pc+%0.w],pc).
+
+;; Note: if you change the "parallel" (or add anything after it) in
+;; this expansion, you must change the macro ASM_OUTPUT_CASE_END
+;; accordingly, to add the default case at the end of the jump-table.
+
+(define_expand "cris_casesi_non_v32"
+ [(set (match_dup 5) (match_operand:SI 0 "general_operand" ""))
+ (set (match_dup 6)
+ (minus:SI (match_dup 5)
+ (match_operand:SI 1 "const_int_operand" "n")))
+ (set (match_dup 7)
+ (umin:SI (match_dup 6)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (parallel
+ [(set (pc)
+ (if_then_else
+ (ltu (match_dup 7) (match_dup 2))
+ (plus:SI (sign_extend:SI
+ (mem:HI
+ (plus:SI (mult:SI (match_dup 7) (const_int 2))
+ (pc))))
+ (pc))
+ (label_ref (match_operand 4 "" ""))))
+ (use (label_ref (match_operand 3 "" "")))])]
+ ""
+{
+ operands[2] = plus_constant (SImode, operands[2], 1);
+ operands[5] = gen_reg_rtx (SImode);
+ operands[6] = gen_reg_rtx (SImode);
+ operands[7] = gen_reg_rtx (SImode);
+})
+
+;; FIXME: Check effect of not JUMP_TABLES_IN_TEXT_SECTION.
+(define_expand "cris_casesi_v32"
+ [(set (match_dup 5) (match_operand:SI 0 "general_operand"))
+ (set (match_dup 6)
+ (minus:SI (match_dup 5)
+ (match_operand:SI 1 "const_int_operand")))
+ (set (match_dup 7)
+ (umin:SI (match_dup 6)
+ (match_operand:SI 2 "const_int_operand")))
+ (set (match_dup 8) (match_dup 11))
+ (set (match_dup 9)
+ (plus:SI (mult:SI (match_dup 7) (const_int 2))
+ (match_dup 8)))
+ (set (match_dup 10)
+ (plus:SI (sign_extend:SI (mem:HI (match_dup 9)))
+ (match_dup 9)))
+ (parallel
+ [(set (pc)
+ (if_then_else
+ (ltu (unspec [(const_int 0)] CRIS_UNSPEC_CASESI) (match_dup 2))
+ (match_dup 10)
+ (label_ref (match_operand 4 "" ""))))
+ (use (label_ref (match_dup 3)))])]
+ "TARGET_V32"
+{
+ int i;
+ rtx xlabel = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
+ for (i = 5; i <= 10; i++)
+ operands[i] = gen_reg_rtx (SImode);
+ operands[2] = plus_constant (SImode, operands[2], 1);
+
+ /* Don't forget to decorate labels too, for PIC. */
+ operands[11] = flag_pic
+ ? gen_rtx_CONST (Pmode,
+ gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xlabel),
+ CRIS_UNSPEC_PCREL))
+ : xlabel;
+})
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "general_operand")
+ (match_operand:SI 1 "const_int_operand")
+ (match_operand:SI 2 "const_int_operand")
+ (match_operand 3 "" "")
+ (match_operand 4 "" "")]
+ ""
+{
+ if (TARGET_V32)
+ emit_insn (gen_cris_casesi_v32 (operands[0], operands[1], operands[2],
+ operands[3], operands[4]));
+ else
+ emit_insn (gen_cris_casesi_non_v32 (operands[0], operands[1], operands[2],
+ operands[3], operands[4]));
+ DONE;
+})
+
+;; Split-patterns. Some of them have modes unspecified. This
+;; should always be ok; if for no other reason sparc.md has it as
+;; well.
+;;
+;; When register_operand is specified for an operand, we can get a
+;; subreg as well (Axis-990331), so don't just assume that REG_P is true
+;; for a register_operand and that REGNO can be used as is. It is best to
+;; guard with REG_P, unless it is worth it to adjust for the subreg case.
+
+;; op [rx + 0],ry,rz
+;; The index to rx is optimized into zero, and gone.
+
+;; First, recognize bound [rx],ry,rz; where [rx] is zero-extended,
+;; and add/sub [rx],ry,rz, with zero or sign-extend on [rx].
+;; Split this into:
+;; move ry,rz
+;; op [rx],rz
+;; Lose if rz=ry or rx=rz.
+;; Call this op-extend-split.
+;; Do not match for V32; the addo and addi shouldn't be split
+;; up.
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 4 "cris_operand_extend_operator"
+ [(match_operand 1 "register_operand" "")
+ (match_operator
+ 3 "cris_extend_operator"
+ [(match_operand 2 "memory_operand" "")])]))]
+ "!TARGET_V32
+ && REG_P (operands[0])
+ && REG_P (operands[1])
+ && REGNO (operands[1]) != REGNO (operands[0])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && REG_P (XEXP (operands[2], 0))
+ && REGNO (XEXP (operands[2], 0)) != REGNO (operands[0])"
+ [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup
+ 4 [(match_dup 0)
+ (match_op_dup 3 [(match_dup 2)])]))]
+ "")
+
+;; As op-extend-split, but recognize and split op [rz],ry,rz into
+;; ext [rz],rz
+;; op ry,rz
+;; Do this for plus or bound only, being commutative operations, since we
+;; have swapped the operands.
+;; Call this op-extend-split-rx=rz
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 4 "cris_plus_or_bound_operator"
+ [(match_operand 1 "register_operand" "")
+ (match_operator
+ 3 "cris_extend_operator"
+ [(match_operand 2 "memory_operand" "")])]))]
+ "!TARGET_V32
+ && REG_P (operands[0])
+ && REG_P (operands[1])
+ && REGNO (operands[1]) != REGNO (operands[0])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && REG_P (XEXP (operands[2], 0))
+ && REGNO (XEXP (operands[2], 0)) == REGNO (operands[0])"
+ [(set (match_dup 0)
+ (match_op_dup 3 [(match_dup 2)]))
+ (set (match_dup 0)
+ (match_op_dup
+ 4 [(match_dup 0)
+ (match_dup 1)]))]
+ "")
+
+;; As the op-extend-split, but swapped operands, and only for
+;; plus or bound, being the commutative extend-operators. FIXME: Why is
+;; this needed? Is it?
+;; Call this op-extend-split-swapped
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 4 "cris_plus_or_bound_operator"
+ [(match_operator
+ 3 "cris_extend_operator"
+ [(match_operand 2 "memory_operand" "")])
+ (match_operand 1 "register_operand" "")]))]
+ "!TARGET_V32
+ && REG_P (operands[0])
+ && REG_P (operands[1])
+ && REGNO (operands[1]) != REGNO (operands[0])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && REG_P (XEXP (operands[2], 0))
+ && REGNO (XEXP (operands[2], 0)) != REGNO (operands[0])"
+ [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup
+ 4 [(match_dup 0)
+ (match_op_dup 3 [(match_dup 2)])]))]
+ "")
+
+;; As op-extend-split-rx=rz, but swapped operands, only for plus or
+;; bound. Call this op-extend-split-swapped-rx=rz.
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 4 "cris_plus_or_bound_operator"
+ [(match_operator
+ 3 "cris_extend_operator"
+ [(match_operand 2 "memory_operand" "")])
+ (match_operand 1 "register_operand" "")]))]
+ "!TARGET_V32
+ && REG_P (operands[0])
+ && REG_P (operands[1])
+ && REGNO (operands[1]) != REGNO (operands[0])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && REG_P (XEXP (operands[2], 0))
+ && REGNO (XEXP (operands[2], 0)) == REGNO (operands[0])"
+ [(set (match_dup 0)
+ (match_op_dup 3 [(match_dup 2)]))
+ (set (match_dup 0)
+ (match_op_dup
+ 4 [(match_dup 0)
+ (match_dup 1)]))]
+ "")
+
+;; As op-extend-split, but the mem operand is not extended.
+;;
+;; op [rx],ry,rz changed into
+;; move ry,rz
+;; op [rx],rz
+;; lose if ry=rz or rx=rz
+;; Call this op-extend.
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 3 "cris_orthogonal_operator"
+ [(match_operand 1 "register_operand" "")
+ (match_operand 2 "memory_operand" "")]))]
+ "!TARGET_V32
+ && REG_P (operands[0])
+ && REG_P (operands[1])
+ && REGNO (operands[1]) != REGNO (operands[0])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && REG_P (XEXP (operands[2], 0))
+ && REGNO (XEXP (operands[2], 0)) != REGNO (operands[0])"
+ [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup
+ 3 [(match_dup 0)
+ (match_dup 2)]))]
+ "")
+
+;; As op-extend-split-rx=rz, non-extended.
+;; Call this op-split-rx=rz
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 3 "cris_commutative_orth_op"
+ [(match_operand 2 "memory_operand" "")
+ (match_operand 1 "register_operand" "")]))]
+ "!TARGET_V32
+ && REG_P (operands[0])
+ && REG_P (operands[1])
+ && REGNO (operands[1]) != REGNO (operands[0])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && REG_P (XEXP (operands[2], 0))
+ && REGNO (XEXP (operands[2], 0)) != REGNO (operands[0])"
+ [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup
+ 3 [(match_dup 0)
+ (match_dup 2)]))]
+ "")
+
+;; As op-extend-split-swapped, nonextended.
+;; Call this op-split-swapped.
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 3 "cris_commutative_orth_op"
+ [(match_operand 1 "register_operand" "")
+ (match_operand 2 "memory_operand" "")]))]
+ "!TARGET_V32
+ && REG_P (operands[0]) && REG_P (operands[1])
+ && REGNO (operands[1]) != REGNO (operands[0])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && REG_P (XEXP (operands[2], 0))
+ && REGNO (XEXP (operands[2], 0)) == REGNO (operands[0])"
+ [(set (match_dup 0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (match_op_dup
+ 3 [(match_dup 0)
+ (match_dup 1)]))]
+ "")
+
+;; As op-extend-split-swapped-rx=rz, non-extended.
+;; Call this op-split-swapped-rx=rz.
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 3 "cris_orthogonal_operator"
+ [(match_operand 2 "memory_operand" "")
+ (match_operand 1 "register_operand" "")]))]
+ "!TARGET_V32
+ && REG_P (operands[0]) && REG_P (operands[1])
+ && REGNO (operands[1]) != REGNO (operands[0])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && REG_P (XEXP (operands[2], 0))
+ && REGNO (XEXP (operands[2], 0)) == REGNO (operands[0])"
+ [(set (match_dup 0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (match_op_dup
+ 3 [(match_dup 0)
+ (match_dup 1)]))]
+ "")
+
+(include "sync.md")
+
+;; Splits for all cases in side-effect insns where (possibly after reload
+;; and register allocation) rx and ry in [rx=ry+i] are equal.
+
+;; move.S1 [rx=rx+rz.S2],ry
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 6 "cris_mem_op"
+ [(plus:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "register_operand" ""))]))
+ (set (match_operand:SI 4 "register_operand" "")
+ (plus:SI (mult:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))])]
+ "REG_P (operands[3]) && REG_P (operands[4])
+ && REGNO (operands[3]) == REGNO (operands[4])"
+ [(set (match_dup 4) (plus:SI (mult:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0) (match_dup 5))]
+ "operands[5] = replace_equiv_address (operands[6], operands[3]);")
+
+;; move.S1 [rx=rx+i],ry
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 5 "cris_mem_op"
+ [(plus:SI (match_operand:SI 1 "cris_bdap_operand" "")
+ (match_operand:SI 2 "cris_bdap_operand" ""))]))
+ (set (match_operand:SI 3 "register_operand" "")
+ (plus:SI (match_dup 1)
+ (match_dup 2)))])]
+ "(rtx_equal_p (operands[3], operands[1])
+ || rtx_equal_p (operands[3], operands[2]))"
+ [(set (match_dup 3) (plus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (match_dup 4))]
+{
+ operands[4] = replace_equiv_address (operands[5], operands[3]);
+ cris_order_for_addsi3 (operands, 1);
+})
+
+;; move.S1 ry,[rx=rx+rz.S2]
+
+(define_split
+ [(parallel
+ [(set (match_operator
+ 6 "cris_mem_op"
+ [(plus:SI
+ (mult:SI (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))
+ (match_operand:SI 2 "register_operand" ""))])
+ (match_operand 3 "register_operand" ""))
+ (set (match_operand:SI 4 "register_operand" "")
+ (plus:SI (mult:SI (match_dup 0)
+ (match_dup 1))
+ (match_dup 2)))])]
+ "REG_P (operands[2]) && REG_P (operands[4])
+ && REGNO (operands[4]) == REGNO (operands[2])"
+ [(set (match_dup 4) (plus:SI (mult:SI (match_dup 0) (match_dup 1))
+ (match_dup 2)))
+ (set (match_dup 5) (match_dup 3))]
+ "operands[5] = replace_equiv_address (operands[6], operands[4]);")
+
+;; move.S1 ry,[rx=rx+i]
+
+(define_split
+ [(parallel
+ [(set (match_operator
+ 6 "cris_mem_op"
+ [(plus:SI (match_operand:SI 0 "cris_bdap_operand" "")
+ (match_operand:SI 1 "cris_bdap_operand" ""))])
+ (match_operand 2 "register_operand" ""))
+ (set (match_operand:SI 3 "register_operand" "")
+ (plus:SI (match_dup 0)
+ (match_dup 1)))])]
+ "(rtx_equal_p (operands[3], operands[0])
+ || rtx_equal_p (operands[3], operands[1]))"
+ [(set (match_dup 3) (plus:SI (match_dup 0) (match_dup 1)))
+ (set (match_dup 5) (match_dup 2))]
+{
+ operands[5] = replace_equiv_address (operands[6], operands[3]);
+ cris_order_for_addsi3 (operands, 0);
+})
+
+;; clear.[bwd] [rx=rx+rz.S2]
+
+(define_split
+ [(parallel
+ [(set (mem:BWD (plus:SI
+ (mult:SI (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))
+ (match_operand:SI 2 "register_operand" "")))
+ (const_int 0))
+ (set (match_operand:SI 3 "register_operand" "")
+ (plus:SI (mult:SI (match_dup 0)
+ (match_dup 1))
+ (match_dup 2)))])]
+ "REG_P (operands[2]) && REG_P (operands[3])
+ && REGNO (operands[3]) == REGNO (operands[2])"
+ [(set (match_dup 3) (plus:SI (mult:SI (match_dup 0) (match_dup 1))
+ (match_dup 2)))
+ (set (mem:BWD (match_dup 3)) (const_int 0))]
+ "")
+
+;; clear.[bwd] [rx=rx+i]
+
+(define_split
+ [(parallel
+ [(set (mem:BWD
+ (plus:SI (match_operand:SI 0 "cris_bdap_operand" "")
+ (match_operand:SI 1 "cris_bdap_operand" "")))
+ (const_int 0))
+ (set (match_operand:SI 2 "register_operand" "")
+ (plus:SI (match_dup 0)
+ (match_dup 1)))])]
+ "(rtx_equal_p (operands[0], operands[2])
+ || rtx_equal_p (operands[2], operands[1]))"
+ [(set (match_dup 2) (plus:SI (match_dup 0) (match_dup 1)))
+ (set (mem:BWD (match_dup 2)) (const_int 0))]
+ "cris_order_for_addsi3 (operands, 0);")
+
+;; mov(s|u).S1 [rx=rx+rz.S2],ry
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 5 "cris_extend_operator"
+ [(mem (plus:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "register_operand" "")))]))
+ (set (match_operand:SI 4 "register_operand" "")
+ (plus:SI (mult:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))])]
+ "REG_P (operands[3])
+ && REG_P (operands[4])
+ && REGNO (operands[3]) == REGNO (operands[4])"
+ [(set (match_dup 4) (plus:SI (mult:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0) (match_op_dup 5 [(match_dup 6)]))]
+ "operands[6] = replace_equiv_address (XEXP (operands[5], 0), operands[4]);")
+
+;; mov(s|u).S1 [rx=rx+i],ry
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 4 "cris_extend_operator"
+ [(mem (plus:SI
+ (match_operand:SI 1 "cris_bdap_operand" "")
+ (match_operand:SI 2 "cris_bdap_operand" "")))]))
+ (set (match_operand:SI 3 "register_operand" "")
+ (plus:SI (match_dup 1)
+ (match_dup 2)))])]
+ "(rtx_equal_p (operands[1], operands[3])
+ || rtx_equal_p (operands[2], operands[3]))"
+ [(set (match_dup 3) (plus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (match_op_dup 4 [(match_dup 5)]))]
+{
+ operands[5] = replace_equiv_address (XEXP (operands[4], 0), operands[3]);
+ cris_order_for_addsi3 (operands, 1);
+})
+
+;; op.S1 [rx=rx+i],ry
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 5 "cris_orthogonal_operator"
+ [(match_operand 1 "register_operand" "")
+ (mem (plus:SI
+ (match_operand:SI 2 "cris_bdap_operand" "")
+ (match_operand:SI 3 "cris_bdap_operand" "")))]))
+ (set (match_operand:SI 4 "register_operand" "")
+ (plus:SI (match_dup 2)
+ (match_dup 3)))])]
+ "(rtx_equal_p (operands[4], operands[2])
+ || rtx_equal_p (operands[4], operands[3]))"
+ [(set (match_dup 4) (plus:SI (match_dup 2) (match_dup 3)))
+ (set (match_dup 0) (match_op_dup 5 [(match_dup 1) (match_dup 6)]))]
+{
+ operands[6] = replace_equiv_address (XEXP (operands[5], 1), operands[4]);
+ cris_order_for_addsi3 (operands, 2);
+})
+
+;; op.S1 [rx=rx+rz.S2],ry
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 6 "cris_orthogonal_operator"
+ [(match_operand 1 "register_operand" "")
+ (mem (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (match_operand:SI 4 "register_operand" "")))]))
+ (set (match_operand:SI 5 "register_operand" "")
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 3))
+ (match_dup 4)))])]
+ "REG_P (operands[4])
+ && REG_P (operands[5])
+ && REGNO (operands[5]) == REGNO (operands[4])"
+ [(set (match_dup 5) (plus:SI (mult:SI (match_dup 2) (match_dup 3))
+ (match_dup 4)))
+ (set (match_dup 0) (match_op_dup 6 [(match_dup 1) (match_dup 7)]))]
+ "operands[7] = replace_equiv_address (XEXP (operands[6], 1), operands[5]);")
+
+;; op.S1 [rx=rx+rz.S2],ry (swapped)
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 6 "cris_commutative_orth_op"
+ [(mem (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (match_operand:SI 4 "register_operand" "")))
+ (match_operand 1 "register_operand" "")]))
+ (set (match_operand:SI 5 "register_operand" "")
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 3))
+ (match_dup 4)))])]
+ "REG_P (operands[4])
+ && REG_P (operands[5])
+ && REGNO (operands[5]) == REGNO (operands[4])"
+ [(set (match_dup 5) (plus:SI (mult:SI (match_dup 2) (match_dup 3))
+ (match_dup 4)))
+ (set (match_dup 0) (match_op_dup 6 [(match_dup 7) (match_dup 1)]))]
+ "operands[7] = replace_equiv_address (XEXP (operands[6], 0), operands[5]);")
+
+;; op.S1 [rx=rx+i],ry (swapped)
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 5 "cris_commutative_orth_op"
+ [(mem
+ (plus:SI (match_operand:SI 2 "cris_bdap_operand" "")
+ (match_operand:SI 3 "cris_bdap_operand" "")))
+ (match_operand 1 "register_operand" "")]))
+ (set (match_operand:SI 4 "register_operand" "")
+ (plus:SI (match_dup 2)
+ (match_dup 3)))])]
+ "(rtx_equal_p (operands[4], operands[2])
+ || rtx_equal_p (operands[4], operands[3]))"
+ [(set (match_dup 4) (plus:SI (match_dup 2) (match_dup 3)))
+ (set (match_dup 0) (match_op_dup 5 [(match_dup 6) (match_dup 1)]))]
+{
+ operands[6] = replace_equiv_address (XEXP (operands[5], 0), operands[4]);
+ cris_order_for_addsi3 (operands, 2);
+})
+
+;; op(s|u).S1 [rx=rx+rz.S2],ry
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 6 "cris_operand_extend_operator"
+ [(match_operand 1 "register_operand" "")
+ (match_operator
+ 7 "cris_extend_operator"
+ [(mem (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (match_operand:SI 4 "register_operand" "")))])]))
+ (set (match_operand:SI 5 "register_operand" "")
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 3))
+ (match_dup 4)))])]
+ "REG_P (operands[4])
+ && REG_P (operands[5])
+ && REGNO (operands[5]) == REGNO (operands[4])"
+ [(set (match_dup 5) (plus:SI (mult:SI (match_dup 2) (match_dup 3))
+ (match_dup 4)))
+ (set (match_dup 0) (match_op_dup 6 [(match_dup 1) (match_dup 8)]))]
+ "operands[8] = gen_rtx_fmt_e (GET_CODE (operands[7]), GET_MODE (operands[7]),
+ replace_equiv_address (XEXP (operands[7], 0),
+ operands[5]));")
+
+;; op(s|u).S1 [rx=rx+i],ry
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 5 "cris_operand_extend_operator"
+ [(match_operand 1 "register_operand" "")
+ (match_operator
+ 6 "cris_extend_operator"
+ [(mem
+ (plus:SI (match_operand:SI 2 "cris_bdap_operand" "")
+ (match_operand:SI 3 "cris_bdap_operand" "")
+ ))])]))
+ (set (match_operand:SI 4 "register_operand" "")
+ (plus:SI (match_dup 2)
+ (match_dup 3)))])]
+ "(rtx_equal_p (operands[4], operands[2])
+ || rtx_equal_p (operands[4], operands[3]))"
+ [(set (match_dup 4) (plus:SI (match_dup 2) (match_dup 3)))
+ (set (match_dup 0) (match_op_dup 5 [(match_dup 1) (match_dup 7)]))]
+{
+ operands[7] = gen_rtx_fmt_e (GET_CODE (operands[6]), GET_MODE (operands[6]),
+ replace_equiv_address (XEXP (operands[6], 0),
+ operands[4]));
+ cris_order_for_addsi3 (operands, 2);
+})
+
+;; op(s|u).S1 [rx=rx+rz.S2],ry (swapped, plus or bound)
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 7 "cris_plus_or_bound_operator"
+ [(match_operator
+ 6 "cris_extend_operator"
+ [(mem (plus:SI
+ (mult:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (match_operand:SI 4 "register_operand" "")))])
+ (match_operand 1 "register_operand" "")]))
+ (set (match_operand:SI 5 "register_operand" "")
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 3))
+ (match_dup 4)))])]
+ "REG_P (operands[4]) && REG_P (operands[5])
+ && REGNO (operands[5]) == REGNO (operands[4])"
+ [(set (match_dup 5) (plus:SI (mult:SI (match_dup 2) (match_dup 3))
+ (match_dup 4)))
+ (set (match_dup 0) (match_op_dup 6 [(match_dup 8) (match_dup 1)]))]
+ "operands[8] = gen_rtx_fmt_e (GET_CODE (operands[6]), GET_MODE (operands[6]),
+ replace_equiv_address (XEXP (operands[6], 0),
+ operands[5]));")
+
+;; op(s|u).S1 [rx=rx+i],ry (swapped, plus or bound)
+
+(define_split
+ [(parallel
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 6 "cris_plus_or_bound_operator"
+ [(match_operator
+ 5 "cris_extend_operator"
+ [(mem (plus:SI
+ (match_operand:SI 2 "cris_bdap_operand" "")
+ (match_operand:SI 3 "cris_bdap_operand" "")))])
+ (match_operand 1 "register_operand" "")]))
+ (set (match_operand:SI 4 "register_operand" "")
+ (plus:SI (match_dup 2)
+ (match_dup 3)))])]
+ "(rtx_equal_p (operands[4], operands[2])
+ || rtx_equal_p (operands[4], operands[3]))"
+ [(set (match_dup 4) (plus:SI (match_dup 2) (match_dup 3)))
+ (set (match_dup 0) (match_op_dup 6 [(match_dup 7) (match_dup 1)]))]
+{
+ operands[7] = gen_rtx_fmt_e (GET_CODE (operands[5]), GET_MODE (operands[5]),
+ replace_equiv_address (XEXP (operands[5], 0),
+ operands[4]));
+ cris_order_for_addsi3 (operands, 2);
+})
+
+;; Splits for addressing prefixes that have no side-effects, so we can
+;; fill a delay slot. Never split if we lose something, though.
+
+;; If we have a
+;; move [indirect_ref],rx
+;; where indirect ref = {const, [r+], [r]}, it costs as much as
+;; move indirect_ref,rx
+;; move [rx],rx
+;; Take care not to allow indirect_ref = register.
+
+;; We're not allowed to generate copies of registers with different mode
+;; until after reload; copying pseudos upsets reload. CVS as of
+;; 2001-08-24, unwind-dw2-fde.c, _Unwind_Find_FDE ICE in
+;; cselib_invalidate_regno.
+
+(define_split ; indir_to_reg_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operand 1 "indirect_operand" ""))]
+ "reload_completed
+ && REG_P (operands[0])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && (MEM_P (XEXP (operands[1], 0)) || CONSTANT_P (XEXP (operands[1], 0)))
+ && REGNO (operands[0]) < CRIS_LAST_GENERAL_REGISTER"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 0) (match_dup 3))]
+ "operands[2] = gen_rtx_REG (Pmode, REGNO (operands[0]));
+ operands[3] = replace_equiv_address (operands[1], operands[2]);
+ operands[4] = XEXP (operands[1], 0);")
+
+;; As the above, but MOVS and MOVU.
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 4 "cris_extend_operator"
+ [(match_operand 1 "indirect_operand" "")]))]
+ "reload_completed
+ && REG_P (operands[0])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && (MEM_P (XEXP (operands[1], 0))
+ || CONSTANT_P (XEXP (operands[1], 0)))"
+ [(set (match_dup 2) (match_dup 5))
+ (set (match_dup 0) (match_op_dup 4 [(match_dup 3)]))]
+ "operands[2] = gen_rtx_REG (Pmode, REGNO (operands[0]));
+ operands[3] = replace_equiv_address (XEXP (operands[4], 0), operands[2]);
+ operands[5] = XEXP (operands[1], 0);")
+
+;; Various peephole optimizations.
+;;
+;; Watch out: when you exchange one set of instructions for another, the
+;; condition codes setting must be the same, or you have to CC_INIT or
+;; whatever is appropriate, in the pattern before you emit the
+;; assembly text. This is best done here, not in cris_notice_update_cc,
+;; to keep changes local to their cause.
+;;
+;; Do not add patterns that you do not know will be matched.
+;; Please also add a self-contained testcase.
+
+;; We have trouble with and:s and shifts. Maybe something is broken in
+;; gcc? Or it could just be that bit-field insn expansion is a bit
+;; suboptimal when not having extzv insns.
+;; Testcase for the following four peepholes: gcc.dg/cris-peep2-xsrand.c
+
+(define_peephole2 ; asrandb (peephole casesi+31)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_dup 0)
+ (match_operand:SI 1 "const_int_operand" "")))
+ (set (match_dup 0)
+ (and:SI (match_dup 0)
+ (match_operand 2 "const_int_operand" "")))]
+ "INTVAL (operands[2]) > 31
+ && INTVAL (operands[2]) < 255
+ && INTVAL (operands[1]) > 23
+ /* Check that the and-operation enables us to use logical-shift. */
+ && (INTVAL (operands[2])
+ & ((HOST_WIDE_INT) -1 << (32 - INTVAL (operands[1])))) == 0"
+ [(set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 1)))
+ (set (match_dup 3) (and:QI (match_dup 3) (match_dup 4)))]
+ ;; FIXME: CC0 is valid except for the M bit.
+{
+ operands[3] = gen_rtx_REG (QImode, REGNO (operands[0]));
+ operands[4] = GEN_INT (trunc_int_for_mode (INTVAL (operands[2]), QImode));
+})
+
+(define_peephole2 ; asrandw (peephole casesi+32)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_dup 0)
+ (match_operand:SI 1 "const_int_operand" "")))
+ (set (match_dup 0)
+ (and:SI (match_dup 0) (match_operand 2 "const_int_operand" "")))]
+ "INTVAL (operands[2]) > 31
+ && INTVAL (operands[2]) < 65535
+ && INTVAL (operands[2]) != 255
+ && INTVAL (operands[1]) > 15
+ /* Check that the and-operation enables us to use logical-shift. */
+ && (INTVAL (operands[2])
+ & ((HOST_WIDE_INT) -1 << (32 - INTVAL (operands[1])))) == 0"
+ [(set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 1)))
+ (set (match_dup 3) (and:HI (match_dup 3) (match_dup 4)))]
+ ;; FIXME: CC0 is valid except for the M bit.
+{
+ operands[3] = gen_rtx_REG (HImode, REGNO (operands[0]));
+ operands[4] = GEN_INT (trunc_int_for_mode (INTVAL (operands[2]), HImode));
+})
+
+(define_peephole2 ; lsrandb (peephole casesi+33)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 0)
+ (match_operand:SI 1 "const_int_operand" "")))
+ (set (match_dup 0)
+ (and:SI (match_dup 0) (match_operand 2 "const_int_operand" "")))]
+ "INTVAL (operands[2]) > 31
+ && INTVAL (operands[2]) < 255
+ && INTVAL (operands[1]) > 23"
+ [(set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 1)))
+ (set (match_dup 3) (and:QI (match_dup 3) (match_dup 4)))]
+ ;; FIXME: CC0 is valid except for the M bit.
+{
+ operands[3] = gen_rtx_REG (QImode, REGNO (operands[0]));
+ operands[4] = GEN_INT (trunc_int_for_mode (INTVAL (operands[2]), QImode));
+})
+
+(define_peephole2 ; lsrandw (peephole casesi+34)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 0)
+ (match_operand:SI 1 "const_int_operand" "")))
+ (set (match_dup 0)
+ (and:SI (match_dup 0) (match_operand 2 "const_int_operand" "")))]
+ "INTVAL (operands[2]) > 31 && INTVAL (operands[2]) < 65535
+ && INTVAL (operands[2]) != 255
+ && INTVAL (operands[1]) > 15"
+ [(set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 1)))
+ (set (match_dup 3) (and:HI (match_dup 3) (match_dup 4)))]
+ ;; FIXME: CC0 is valid except for the M bit.
+{
+ operands[3] = gen_rtx_REG (HImode, REGNO (operands[0]));
+ operands[4] = GEN_INT (trunc_int_for_mode (INTVAL (operands[2]), HImode));
+})
+
+
+;; Change
+;; add.d n,rx
+;; move [rx],ry
+;; into
+;; move [rx=rx+n],ry
+;; when -128 <= n <= 127.
+;; This will reduce the size of the assembler code for n = [-128..127],
+;; and speed up accordingly. Don't match if the previous insn is
+;; (set rx rz) because that combination is matched by another peephole.
+;; No stable test-case.
+
+(define_peephole2 ; moversideqi (peephole casesi+35)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand 3 "register_operand" "")
+ (match_operator 4 "cris_mem_op" [(match_dup 0)]))]
+ "GET_MODE_SIZE (GET_MODE (operands[4])) <= UNITS_PER_WORD
+ && REGNO (operands[3]) != REGNO (operands[0])
+ && (cris_base_p (operands[1], true) || cris_base_p (operands[2], true))
+ && !satisfies_constraint_J (operands[2])
+ && !satisfies_constraint_N (operands[2])
+ && (INTVAL (operands[2]) >= -128 && INTVAL (operands[2]) < 128)
+ && TARGET_SIDE_EFFECT_PREFIXES"
+ [(parallel
+ [(set (match_dup 3) (match_dup 5))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])]
+ ;; Checking the previous insn is a bit too awkward for the condition.
+{
+ rtx prev = prev_nonnote_insn (curr_insn);
+ if (prev != NULL_RTX)
+ {
+ rtx set = single_set (prev);
+ if (set != NULL_RTX
+ && REG_S_P (SET_DEST (set))
+ && REGNO (SET_DEST (set)) == REGNO (operands[0])
+ && REG_S_P (SET_SRC (set)))
+ FAIL;
+ }
+ operands[5]
+ = replace_equiv_address (operands[4],
+ gen_rtx_PLUS (SImode,
+ operands[1], operands[2]));
+})
+
+;; Vice versa: move ry,[rx=rx+n]
+
+(define_peephole2 ; movemsideqi (peephole casesi+36)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operator 3 "cris_mem_op" [(match_dup 0)])
+ (match_operand 4 "register_operand" ""))]
+ "GET_MODE_SIZE (GET_MODE (operands[4])) <= UNITS_PER_WORD
+ && REGNO (operands[4]) != REGNO (operands[0])
+ && (cris_base_p (operands[1], true) || cris_base_p (operands[2], true))
+ && !satisfies_constraint_J (operands[2])
+ && !satisfies_constraint_N (operands[2])
+ && (INTVAL (operands[2]) >= -128 && INTVAL (operands[2]) < 128)
+ && TARGET_SIDE_EFFECT_PREFIXES"
+ [(parallel
+ [(set (match_dup 5) (match_dup 4))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])]
+ "operands[5]
+ = replace_equiv_address (operands[3],
+ gen_rtx_PLUS (SImode,
+ operands[1], operands[2]));")
+
+;; As above, change:
+;; add.d n,rx
+;; op.d [rx],ry
+;; into:
+;; op.d [rx=rx+n],ry
+;; Saves when n = [-128..127].
+;;
+;; Splitting and joining combinations for side-effect modes are slightly
+;; out of hand. They probably will not save the time they take typing in,
+;; not to mention the bugs that creep in. FIXME: Get rid of as many of
+;; the splits and peepholes as possible.
+;; No stable test-case.
+
+(define_peephole2 ; mover2side (peephole casesi+37)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand 3 "register_operand" "")
+ (match_operator 4 "cris_orthogonal_operator"
+ [(match_dup 3)
+ (match_operator
+ 5 "cris_mem_op" [(match_dup 0)])]))]
+ ;; FIXME: What about DFmode?
+ ;; Change to GET_MODE_SIZE (GET_MODE (operands[3])) <= UNITS_PER_WORD?
+ "GET_MODE (operands[3]) != DImode
+ && REGNO (operands[0]) != REGNO (operands[3])
+ && !satisfies_constraint_J (operands[2])
+ && !satisfies_constraint_N (operands[2])
+ && INTVAL (operands[2]) >= -128
+ && INTVAL (operands[2]) <= 127
+ && TARGET_SIDE_EFFECT_PREFIXES"
+ [(parallel
+ [(set (match_dup 3) (match_op_dup 4 [(match_dup 3) (match_dup 6)]))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])]
+ "operands[6]
+ = replace_equiv_address (operands[5],
+ gen_rtx_PLUS (SImode,
+ operands[1], operands[2]));")
+
+;; Sometimes, for some reason the pattern
+;; move x,rx
+;; add y,rx
+;; move [rx],rz
+;; will occur. Solve this, and likewise for to-memory.
+;; No stable test-case.
+
+(define_peephole2 ; moverside (peephole casesi+38)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "cris_bdap_biap_operand" ""))
+ (set (match_dup 0)
+ (plus:SI (match_operand:SI 2 "cris_bdap_biap_operand" "")
+ (match_operand:SI 3 "cris_bdap_biap_operand" "")))
+ (set (match_operand 4 "register_operand" "")
+ (match_operator 5 "cris_mem_op" [(match_dup 0)]))]
+ "(rtx_equal_p (operands[2], operands[0])
+ || rtx_equal_p (operands[3], operands[0]))
+ && cris_side_effect_mode_ok (PLUS, operands, 0,
+ (REG_S_P (operands[1])
+ ? 1
+ : (rtx_equal_p (operands[2], operands[0])
+ ? 3 : 2)),
+ (! REG_S_P (operands[1])
+ ? 1
+ : (rtx_equal_p (operands[2], operands[0])
+ ? 3 : 2)),
+ -1, 4)"
+ [(parallel
+ [(set (match_dup 4) (match_dup 6))
+ (set (match_dup 0) (plus:SI (match_dup 7) (match_dup 8)))])]
+{
+ rtx otherop
+ = rtx_equal_p (operands[2], operands[0]) ? operands[3] : operands[2];
+
+ /* Make sure we have canonical RTX so we match the insn pattern -
+ not a constant in the first operand. We also require the order
+ (plus reg mem) to match the final pattern. */
+ if (CONSTANT_P (otherop) || MEM_P (otherop))
+ {
+ operands[7] = operands[1];
+ operands[8] = otherop;
+ }
+ else
+ {
+ operands[7] = otherop;
+ operands[8] = operands[1];
+ }
+ operands[6]
+ = replace_equiv_address (operands[5],
+ gen_rtx_PLUS (SImode,
+ operands[7], operands[8]));
+})
+
+;; As above but to memory.
+;; FIXME: Split movemside and moverside into variants and prune
+;; the ones that don't trig.
+;; No stable test-case.
+
+(define_peephole2 ; movemside (peephole casesi+39)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "cris_bdap_biap_operand" ""))
+ (set (match_dup 0)
+ (plus:SI (match_operand:SI 2 "cris_bdap_biap_operand" "")
+ (match_operand:SI 3 "cris_bdap_biap_operand" "")))
+ (set (match_operator 4 "cris_mem_op" [(match_dup 0)])
+ (match_operand 5 "register_operand" ""))]
+ "(rtx_equal_p (operands[2], operands[0])
+ || rtx_equal_p (operands[3], operands[0]))
+ && cris_side_effect_mode_ok (PLUS, operands, 0,
+ (REG_S_P (operands[1])
+ ? 1
+ : (rtx_equal_p (operands[2], operands[0])
+ ? 3 : 2)),
+ (! REG_S_P (operands[1])
+ ? 1
+ : (rtx_equal_p (operands[2], operands[0])
+ ? 3 : 2)),
+ -1, 5)"
+ [(parallel
+ [(set (match_dup 6) (match_dup 5))
+ (set (match_dup 0) (plus:SI (match_dup 7) (match_dup 8)))])]
+{
+ rtx otherop
+ = rtx_equal_p (operands[2], operands[0]) ? operands[3] : operands[2];
+
+ /* Make sure we have canonical RTX so we match the insn pattern -
+ not a constant in the first operand. We also require the order
+ (plus reg mem) to match the final pattern. */
+ if (CONSTANT_P (otherop) || MEM_P (otherop))
+ {
+ operands[7] = operands[1];
+ operands[8] = otherop;
+ }
+ else
+ {
+ operands[7] = otherop;
+ operands[8] = operands[1];
+ }
+ operands[6]
+ = replace_equiv_address (operands[4],
+ gen_rtx_PLUS (SImode,
+ operands[7], operands[8]));
+})
+
+;; Another spotted bad code:
+;; move rx,ry
+;; move [ry],ry
+;; No stable test-case.
+
+(define_peephole2 ; movei (peephole casesi+42)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "register_operand" ""))
+ (set (match_operand 2 "register_operand" "")
+ (match_operator 3 "cris_mem_op" [(match_dup 0)]))]
+ "REGNO (operands[0]) == REGNO (operands[2])
+ && (REGNO_REG_CLASS (REGNO (operands[0]))
+ == REGNO_REG_CLASS (REGNO (operands[1])))
+ && GET_MODE_SIZE (GET_MODE (operands[2])) <= UNITS_PER_WORD"
+ [(set (match_dup 2) (match_dup 4))]
+ "operands[4] = replace_equiv_address (operands[3], operands[1]);")
+
+;; move.d [r10+16],r9
+;; and.d r12,r9
+;; change to
+;; and.d [r10+16],r12,r9
+;; With generalization of the operation, the size and the addressing mode.
+;; This seems to be the result of a quirk in register allocation
+;; missing the three-operand cases when having different predicates.
+;; Maybe that it matters that it is a commutative operation.
+;; This pattern helps that situation, but there's still the increased
+;; register pressure.
+;; Note that adding the noncommutative variant did not show any matches
+;; in ipps and cc1, so it's not here.
+;; No stable test-case.
+
+(define_peephole2 ; op3 (peephole casesi+44)
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator
+ 6 "cris_mem_op"
+ [(plus:SI
+ (match_operand:SI 1 "cris_bdap_biap_operand" "")
+ (match_operand:SI 2 "cris_bdap_biap_operand" ""))]))
+ (set (match_dup 0)
+ (match_operator
+ 5 "cris_commutative_orth_op"
+ [(match_operand 3 "register_operand" "")
+ (match_operand 4 "register_operand" "")]))]
+ "(rtx_equal_p (operands[3], operands[0])
+ || rtx_equal_p (operands[4], operands[0]))
+ && ! rtx_equal_p (operands[3], operands[4])
+ && (REG_S_P (operands[1]) || REG_S_P (operands[2]))
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD"
+ [(set (match_dup 0) (match_op_dup 5 [(match_dup 7) (match_dup 6)]))]
+ "operands[7]
+ = rtx_equal_p (operands[3], operands[0]) ? operands[4] : operands[3];")
+
+;; There seems to be no other way to make GCC (including 4.8/trunk at
+;; r186932) optimally reload an instruction that looks like
+;; and.d reg_or_mem,const_32__65535,other_reg
+;; where other_reg is the destination.
+;; It should be:
+;; movu.[bw] reg_or_mem,reg_32
+;; and.[bw] trunc_int_for_mode([bw], const_32__65535),reg_32 ;; or andq
+;; but it turns into:
+;; move.d reg_or_mem,reg_32
+;; and.d const_32__65535,reg_32
+;; Fix it with these two peephole2's.
+;; Testcases: gcc.dg/cris-peep2-andu1.c gcc.dg/cris-peep2-andu2.c
+
+(define_peephole2 ; andu (casesi+45)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "nonimmediate_operand" ""))
+ (set (match_operand:SI 2 "register_operand" "")
+ (and:SI (match_dup 0)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ;; Since the size of the memory access could be made different here,
+ ;; don't do this for a mem-volatile access.
+ "REGNO (operands[2]) == REGNO (operands[0])
+ && INTVAL (operands[3]) <= 65535 && INTVAL (operands[3]) >= 0
+ && !satisfies_constraint_I (operands[3])
+ && !side_effects_p (operands[1])
+ && (!REG_P (operands[1])
+ || REGNO (operands[1]) <= CRIS_LAST_GENERAL_REGISTER)"
+ ;; FIXME: CC0 valid except for M (i.e. CC_NOT_NEGATIVE).
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 5) (match_dup 6))]
+{
+ enum machine_mode zmode = INTVAL (operands[3]) <= 255 ? QImode : HImode;
+ enum machine_mode amode
+ = satisfies_constraint_O (operands[3]) ? SImode : zmode;
+ rtx op1
+ = (REG_S_P (operands[1])
+ ? gen_rtx_REG (zmode, REGNO (operands[1]))
+ : adjust_address (operands[1], zmode, 0));
+ operands[4]
+ = gen_rtx_ZERO_EXTEND (SImode, op1);
+ operands[5] = gen_rtx_REG (amode, REGNO (operands[0]));
+ operands[6]
+ = gen_rtx_AND (amode, gen_rtx_REG (amode, REGNO (operands[0])),
+ GEN_INT (trunc_int_for_mode (INTVAL (operands[3]),
+ amode == SImode
+ ? QImode : amode)));
+})
+
+;; Since r186861, gcc.dg/cris-peep2-andu2.c trigs this pattern, with which
+;; we fix up e.g.:
+;; movu.b 254,$r9.
+;; and.d $r10,$r9
+;; into:
+;; movu.b $r10,$r9
+;; andq -2,$r9.
+;; Only do this for values fitting the quick immediate operand.
+(define_peephole2 ; andqu (casesi+46)
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operand:SI 1 "const_int_operand"))
+ (set (match_dup 0)
+ (and:SI (match_dup 0) (match_operand:SI 2 "nonimmediate_operand")))]
+ ;; Since the size of the memory access will be made different here,
+ ;; don't do this for a volatile access or a post-incremented address.
+ "satisfies_constraint_O (operands[1])
+ && !side_effects_p (operands[2])
+ && !reg_overlap_mentioned_p (operands[0], operands[2])"
+ [(set (match_dup 0) (match_dup 3))
+ (set (match_dup 0) (and:SI (match_dup 0) (match_dup 4)))]
+{
+ enum machine_mode zmode = INTVAL (operands[2]) <= 255 ? QImode : HImode;
+ rtx op1
+ = (REG_S_P (operands[2])
+ ? gen_rtx_REG (zmode, REGNO (operands[2]))
+ : adjust_address (operands[2], zmode, 0));
+ operands[3] = gen_rtx_ZERO_EXTEND (SImode, op1);
+ operands[4] = GEN_INT (trunc_int_for_mode (INTVAL (operands[1]), QImode));
+})
+
+;; Try and avoid GOTPLT reads escaping a call: transform them into
+;; PLT. Curiously (but thankfully), peepholes for instructions
+;; *without side-effects* that just feed a call (or call_value) are
+;; not matched neither in a build or test-suite, so those patterns are
+;; omitted.
+
+;; A "normal" move where we don't check the consumer.
+
+(define_peephole2 ; gotplt-to-plt
+ [(set
+ (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI
+ 1 "cris_mem_op"
+ [(plus:SI
+ (reg:SI CRIS_GOT_REGNUM)
+ (const:SI
+ (unspec:SI [(match_operand:SI 2 "cris_general_operand_or_symbol" "")]
+ CRIS_UNSPEC_PLTGOTREAD)))]))]
+ "flag_pic
+ && cris_valid_pic_const (XEXP (XEXP (operands[1], 0), 1), true)
+ && REGNO_REG_CLASS (REGNO (operands[0])) == REGNO_REG_CLASS (0)"
+ [(set (match_dup 0) (const:SI (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLT_GOTREL)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (reg:SI CRIS_GOT_REGNUM)))]
+ "")
+
+;; And one set with a side-effect getting the PLTGOT offset.
+;; First call and call_value variants.
+
+(define_peephole2 ; gotplt-to-plt-side-call
+ [(parallel
+ [(set
+ (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI
+ 1 "cris_mem_op"
+ [(plus:SI
+ (reg:SI CRIS_GOT_REGNUM)
+ (const:SI
+ (unspec:SI [(match_operand:SI
+ 2 "cris_general_operand_or_symbol" "")]
+ CRIS_UNSPEC_PLTGOTREAD)))]))
+ (set (match_operand:SI 3 "register_operand" "")
+ (plus:SI (reg:SI CRIS_GOT_REGNUM)
+ (const:SI
+ (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLTGOTREAD))))])
+ (parallel [(call (mem:QI (match_dup 0))
+ (match_operand 4 "" ""))
+ (clobber (reg:SI CRIS_SRP_REGNUM))])]
+ "flag_pic
+ && cris_valid_pic_const (XEXP (XEXP (operands[1], 0), 1), true)
+ && peep2_reg_dead_p (2, operands[0])"
+ [(parallel [(call (mem:QI (match_dup 1))
+ (match_dup 4))
+ (clobber (reg:SI CRIS_SRP_REGNUM))
+ (set (match_dup 3)
+ (plus:SI (reg:SI CRIS_GOT_REGNUM)
+ (const:SI
+ (unspec:SI [(match_dup 2)]
+ CRIS_UNSPEC_PLTGOTREAD))))])]
+ "")
+
+(define_peephole2 ; gotplt-to-plt-side-call-value
+ [(parallel
+ [(set
+ (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI
+ 1 "cris_mem_op"
+ [(plus:SI
+ (reg:SI CRIS_GOT_REGNUM)
+ (const:SI
+ (unspec:SI [(match_operand:SI
+ 2 "cris_general_operand_or_symbol" "")]
+ CRIS_UNSPEC_PLTGOTREAD)))]))
+ (set (match_operand:SI 3 "register_operand" "")
+ (plus:SI (reg:SI CRIS_GOT_REGNUM)
+ (const:SI
+ (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLTGOTREAD))))])
+ (parallel [(set (match_operand 5 "" "")
+ (call (mem:QI (match_dup 0))
+ (match_operand 4 "" "")))
+ (clobber (reg:SI CRIS_SRP_REGNUM))])]
+ "flag_pic
+ && cris_valid_pic_const (XEXP (XEXP (operands[1], 0), 1), true)
+ && peep2_reg_dead_p (2, operands[0])"
+ [(parallel [(set (match_dup 5)
+ (call (mem:QI (match_dup 1))
+ (match_dup 4)))
+ (clobber (reg:SI CRIS_SRP_REGNUM))
+ (set (match_dup 3)
+ (plus:SI (reg:SI CRIS_GOT_REGNUM)
+ (const:SI
+ (unspec:SI [(match_dup 2)]
+ CRIS_UNSPEC_PLTGOTREAD))))])]
+ "")
+
+(define_peephole2 ; gotplt-to-plt-side
+ [(parallel
+ [(set
+ (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI
+ 1 "cris_mem_op"
+ [(plus:SI
+ (reg:SI CRIS_GOT_REGNUM)
+ (const:SI
+ (unspec:SI [(match_operand:SI
+ 2 "cris_general_operand_or_symbol" "")]
+ CRIS_UNSPEC_PLTGOTREAD)))]))
+ (set (match_operand:SI 3 "register_operand" "")
+ (plus:SI (reg:SI CRIS_GOT_REGNUM)
+ (const:SI
+ (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLTGOTREAD))))])]
+ "flag_pic
+ && cris_valid_pic_const (XEXP (XEXP (operands[1], 0), 1), true)
+ && REGNO_REG_CLASS (REGNO (operands[0])) == REGNO_REG_CLASS (0)"
+ [(set (match_dup 3)
+ (const:SI (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLTGOTREAD)))
+ (set (match_dup 3) (plus:SI (match_dup 3) (reg:SI CRIS_GOT_REGNUM)))
+ (set (match_dup 0)
+ (const:SI (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLT_GOTREL)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (reg:SI CRIS_GOT_REGNUM)))]
+ "")
+
+;; Local variables:
+;; mode:emacs-lisp
+;; comment-start: ";; "
+;; eval: (set-syntax-table (copy-sequence (syntax-table)))
+;; eval: (modify-syntax-entry ?[ "(]")
+;; eval: (modify-syntax-entry ?] ")[")
+;; eval: (modify-syntax-entry ?{ "(}")
+;; eval: (modify-syntax-entry ?} "){")
+;; eval: (setq indent-tabs-mode t)
+;; End:
diff --git a/gcc-4.8/gcc/config/cris/cris.opt b/gcc-4.8/gcc/config/cris/cris.opt
new file mode 100644
index 000000000..ad6a2789a
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/cris.opt
@@ -0,0 +1,202 @@
+; Options for the CRIS port of the compiler.
+
+; Copyright (C) 2005-2013 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+; TARGET_MUL_BUG: Whether or not to work around multiplication
+; instruction hardware bug when generating code for models where
+; it may be present. From the trouble report for Etrax 100 LX:
+; "A multiply operation may cause incorrect cache behaviour
+; under some specific circumstances. The problem can occur if
+; the instruction following the multiply instruction causes a
+; cache miss, and multiply operand 1 (source operand) bits
+; [31:27] matches the logical mapping of the mode register
+; address (0xb0....), and bits [9:2] of operand 1 matches the
+; TLB register address (0x258-0x25f). There is such a mapping
+; in kernel mode or when the MMU is off. Normally there is no
+; such mapping in user mode, and the problem will therefore
+; probably not occur in Linux user mode programs."
+;
+; We have no sure-fire way to know from within GCC that we're
+; compiling a user program. For example, -fpic/PIC is used in
+; libgcc which is linked into the kernel. However, the
+; workaround option -mno-mul-bug can be safely used per-package
+; when compiling programs. The same goes for general user-only
+; libraries such as glibc, since there's no user-space
+; driver-like program that gets a mapping of I/O registers (all
+; on the same page, including the TLB registers).
+mmul-bug-workaround
+Target Report Mask(MUL_BUG)
+Work around bug in multiplication instruction
+
+; TARGET_ETRAX4_ADD: Instruction-set additions from Etrax 4 and up.
+; (Just "lz".)
+metrax4
+Target Report Mask(ETRAX4_ADD)
+Compile for ETRAX 4 (CRIS v3)
+
+; See cris_handle_option.
+metrax100
+Target Report RejectNegative
+Compile for ETRAX 100 (CRIS v8)
+
+; See cris_handle_option.
+mno-etrax100
+Target Report RejectNegative Undocumented
+
+mpdebug
+Target Report Mask(PDEBUG)
+Emit verbose debug information in assembly code
+
+; TARGET_CCINIT: Whether to use condition-codes generated by
+; insns other than the immediately preceding compare/test insn.
+; Used to check for errors in notice_update_cc.
+mcc-init
+Target Report Mask(CCINIT)
+Do not use condition codes from normal instructions
+
+; TARGET_SIDE_EFFECT_PREFIXES: Whether to use side-effect
+; patterns. Used to debug the [rx=ry+i] type patterns.
+mside-effects
+Target Report RejectNegative Mask(SIDE_EFFECT_PREFIXES) Undocumented
+
+mno-side-effects
+Target Report RejectNegative InverseMask(SIDE_EFFECT_PREFIXES)
+Do not emit addressing modes with side-effect assignment
+
+; TARGET_STACK_ALIGN: Whether to *keep* (not force) alignment of
+; stack at 16 (or 32, depending on TARGET_ALIGN_BY_32) bits.
+mstack-align
+Target Report RejectNegative Mask(STACK_ALIGN) Undocumented
+
+mno-stack-align
+Target Report RejectNegative InverseMask(STACK_ALIGN)
+Do not tune stack alignment
+
+; TARGET_DATA_ALIGN: Whether to do alignment on individual
+; modifiable objects.
+mdata-align
+Target Report RejectNegative Mask(DATA_ALIGN) Undocumented
+
+mno-data-align
+Target Report RejectNegative InverseMask(DATA_ALIGN)
+Do not tune writable data alignment
+
+; TARGET_CONST_ALIGN: Whether to do alignment on individual
+; non-modifiable objects.
+mconst-align
+Target Report RejectNegative Mask(CONST_ALIGN) Undocumented
+
+mno-const-align
+Target Report RejectNegative InverseMask(CONST_ALIGN)
+Do not tune code and read-only data alignment
+
+; See cris_handle_option.
+m32-bit
+Target Report RejectNegative Undocumented
+
+; See cris_handle_option.
+m32bit
+Target Report RejectNegative
+Align code and data to 32 bits
+
+; See cris_handle_option.
+m16-bit
+Target Report RejectNegative Undocumented
+
+; See cris_handle_option.
+m16bit
+Target Report RejectNegative Undocumented
+
+; See cris_handle_option.
+m8-bit
+Target Report RejectNegative Undocumented
+
+; See cris_handle_option.
+m8bit
+Target Report RejectNegative
+Don't align items in code or data
+
+; TARGET_PROLOGUE_EPILOGUE: Whether or not to omit function
+; prologue and epilogue.
+mprologue-epilogue
+Target Report RejectNegative Mask(PROLOGUE_EPILOGUE) Undocumented
+
+mno-prologue-epilogue
+Target Report RejectNegative InverseMask(PROLOGUE_EPILOGUE)
+Do not emit function prologue or epilogue
+
+; We have to handle this m-option here since we can't wash it
+; off in both CC1_SPEC and CC1PLUS_SPEC.
+
+mbest-lib-options
+Target Report RejectNegative
+Use the most feature-enabling options allowed by other options
+
+; FIXME: The following comment relates to gcc before cris.opt.
+; Check if it's still valid:
+; We must call it "override-" since calling it "no-" will cause
+; gcc.c to forget it, if there's a "later" -mbest-lib-options.
+; Kludgy, but needed for some multilibbed files.
+moverride-best-lib-options
+Target Report RejectNegative
+Override -mbest-lib-options
+
+mcpu=
+Target Report RejectNegative Joined Undocumented Var(cris_cpu_str)
+
+march=
+Target Report RejectNegative Joined Var(cris_cpu_str)
+-march=ARCH Generate code for the specified chip or CPU version
+
+mtune=
+Target Report RejectNegative Joined Var(cris_tune_str)
+-mtune=ARCH Tune alignment for the specified chip or CPU version
+
+mmax-stackframe=
+Target Report RejectNegative Joined Var(cris_max_stackframe_str)
+-mmax-stackframe=SIZE Warn when a stackframe is larger than the specified size
+
+max-stackframe=
+Target Report RejectNegative Joined Undocumented Var(cris_max_stackframe_str)
+
+mtrap-using-break8
+Target Report Var(cris_trap_using_break8) Init(2)
+Emit traps as \"break 8\", default for CRIS v3 and up. If disabled, calls to abort() are used.
+
+mtrap-unaligned-atomic
+Target Report Var(cris_trap_unaligned_atomic) Init(2)
+Emit checks causing \"break 8\" instructions to execute when applying atomic builtins on misaligned memory
+
+munaligned-atomic-may-use-library
+Target Report Var(cris_atomics_calling_libfunc) Init(2)
+Handle atomic builtins that may be applied to unaligned data by calling library functions. Overrides -mtrap-unaligned-atomic.
+
+; TARGET_SVINTO: Currently this just affects alignment. FIXME:
+; Redundant with TARGET_ALIGN_BY_32, or put machine stuff here?
+; This and the others below could just as well be variables and
+; TARGET_* defines in cris.h.
+Mask(SVINTO)
+
+; TARGET_ALIGN_BY_32: Say that all alignment specifications say
+; to prefer 32 rather than 16 bits.
+Mask(ALIGN_BY_32)
+
+; TARGET_AVOID_GOTPLT is referred to in the .c and the .md so we
+; need to allocate the flag and macros here.
+Mask(AVOID_GOTPLT)
diff --git a/gcc-4.8/gcc/config/cris/elf.opt b/gcc-4.8/gcc/config/cris/elf.opt
new file mode 100644
index 000000000..7147f8d46
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/elf.opt
@@ -0,0 +1,25 @@
+; ELF-specific options for the CRIS port of the compiler.
+
+; Copyright (C) 2005-2013 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+melf
+Target Report RejectNegative Undocumented
+
+sim
+Driver JoinedOrMissing
diff --git a/gcc-4.8/gcc/config/cris/linux.h b/gcc-4.8/gcc/config/cris/linux.h
new file mode 100644
index 000000000..8d0941c65
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/linux.h
@@ -0,0 +1,150 @@
+/* Definitions for GCC. Part of the machine description for CRIS.
+ Copyright (C) 2001-2013 Free Software Foundation, Inc.
+ Contributed by Axis Communications. Written by Hans-Peter Nilsson.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* After the first "Node:" comment comes all preprocessor directives and
+ attached declarations described in the info files, the "Using and
+ Porting GCC" manual (uapgcc), in the same order as found in the "Target
+ macros" section in the gcc-2.9x CVS edition of 2000-03-17. FIXME: Not
+ really, but needs an update anyway.
+
+ There is no generic copy-of-uapgcc comment, you'll have to see uapgcc
+ for that. If applicable, there is a CRIS-specific comment. The order
+ of macro definitions follow the order in the manual. Every section in
+ the manual (node in the info pages) has an introductory `Node:
+ <subchapter>' comment. If no macros are defined for a section, only
+ the section-comment is present. */
+
+/* This file defines the macros for cris-axis-linux-gnu that are not
+ covered by cris.h, elfos.h and (config/)linux.h. */
+
+/* Make sure we have a valid TARGET_CPU_DEFAULT, so we can assume it
+ and take shortcuts below. */
+#ifndef TARGET_CPU_DEFAULT
+#error "TARGET_CPU_DEFAULT not defined"
+#elif (TARGET_CPU_DEFAULT+0) != 10 && (TARGET_CPU_DEFAULT+0) != 32
+#error "TARGET_CPU_DEFAULT must be 10 or 32, or this file be updated"
+#endif
+
+/* Node: Instruction Output */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/* Node: Driver */
+/* These macros are CRIS-specific, but used in target driver macros. */
+
+#undef CRIS_CPP_SUBTARGET_SPEC
+#if TARGET_CPU_DEFAULT == 32
+# define CRIS_CPP_SUBTARGET_SPEC \
+ "%{pthread:-D_REENTRANT}\
+ %{!march=*:%{!mcpu=*:-D__arch_v32 -D__CRIS_arch_version=32}}"
+#else
+# define CRIS_CPP_SUBTARGET_SPEC \
+ "%{pthread:-D_REENTRANT}\
+ %{!march=*:%{!mcpu=*:-D__arch_v10 -D__CRIS_arch_version=10}}"
+#endif
+
+#undef CRIS_CC1_SUBTARGET_SPEC
+#if TARGET_CPU_DEFAULT == 32
+# define CRIS_CC1_SUBTARGET_SPEC \
+ "%{!march=*:%{!mcpu=*:-march=v32}}"
+#define CRIS_SUBTARGET_DEFAULT_ARCH MASK_AVOID_GOTPLT
+#else
+# define CRIS_CC1_SUBTARGET_SPEC \
+ "%{!march=*:%{!mcpu=*:-march=v10}}"
+#define CRIS_SUBTARGET_DEFAULT_ARCH 0
+#endif
+
+#undef CRIS_ASM_SUBTARGET_SPEC
+#if TARGET_CPU_DEFAULT == 32
+# define CRIS_ASM_SUBTARGET_SPEC \
+ "--em=criself \
+ %{!march=*:%{!mcpu=*:--march=v32}} \
+ %{!fleading-underscore:--no-underscore}\
+ %{fPIC|fpic|fPIE|fpie: --pic}"
+#else
+# define CRIS_ASM_SUBTARGET_SPEC \
+ "--em=criself \
+ %{!march=*:%{!mcpu=*:--march=v10}} \
+ %{!fleading-underscore:--no-underscore}\
+ %{fPIC|fpic|fPIE|fpie: --pic}"
+#endif
+
+/* Previously controlled by target_flags. */
+#undef TARGET_LINUX
+#define TARGET_LINUX 1
+
+#undef CRIS_SUBTARGET_DEFAULT
+#define CRIS_SUBTARGET_DEFAULT \
+ (MASK_SVINTO \
+ + MASK_ETRAX4_ADD \
+ + MASK_ALIGN_BY_32 \
+ + CRIS_SUBTARGET_DEFAULT_ARCH)
+
+#undef CRIS_DEFAULT_CPU_VERSION
+#define CRIS_DEFAULT_CPU_VERSION CRIS_CPU_NG
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
+
+#undef CRIS_LINK_SUBTARGET_SPEC
+#define CRIS_LINK_SUBTARGET_SPEC \
+ "-mcrislinux\
+ %{shared} %{static}\
+ %{symbolic:-Bdynamic} %{static:-Bstatic}\
+ %{!shared:%{!static:\
+ %{rdynamic:-export-dynamic}\
+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}\
+ %{!r:%{O2|O3: --gc-sections}}"
+
+
+/* Node: Run-time Target */
+
+/* For the cris-*-linux* subtarget. */
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
+ if (flag_leading_underscore <= 0) \
+ builtin_define ("__NO_UNDERSCORES__"); \
+ } \
+ while (0)
+
+/* Node: Type Layout */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+/* Node: Sections */
+
+/* GNU/Linux has crti and crtn and does not need the
+ CRT_CALL_STATIC_FUNCTION trick in cris.h. */
+#undef CRT_CALL_STATIC_FUNCTION
+
+/*
+ * Local variables:
+ * eval: (c-set-style "gnu")
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/gcc-4.8/gcc/config/cris/linux.opt b/gcc-4.8/gcc/config/cris/linux.opt
new file mode 100644
index 000000000..bef56a01e
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/linux.opt
@@ -0,0 +1,33 @@
+; GNU/Linux-specific options for the CRIS port of the compiler.
+
+; Copyright (C) 2005-2013 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+; Provide a legacy -mlinux option.
+mlinux
+Target Report RejectNegative Undocumented
+
+mno-gotplt
+Target Report RejectNegative Mask(AVOID_GOTPLT)
+Together with -fpic and -fPIC, do not use GOTPLT references
+
+; There's a small added setup cost with using GOTPLT references
+; for the first (resolving) call, but should in total be a win
+; both in code-size and execution-time.
+mgotplt
+Target Report RejectNegative InverseMask(AVOID_GOTPLT) Undocumented
diff --git a/gcc-4.8/gcc/config/cris/predicates.md b/gcc-4.8/gcc/config/cris/predicates.md
new file mode 100644
index 000000000..040482ba9
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/predicates.md
@@ -0,0 +1,174 @@
+;; Operand and operator predicates for the GCC CRIS port.
+;; Copyright (C) 2005-2013 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; Operator predicates.
+
+(define_predicate "cris_orthogonal_operator"
+ (match_code "plus, minus, ior, and, umin"))
+
+(define_predicate "cris_commutative_orth_op"
+ (match_code "plus, ior, and, umin"))
+
+;; By the name, you might think we should include MULT. We don't because
+;; it doesn't accept the same addressing modes as the others (only
+;; registers) and there's also the problem of handling TARGET_MUL_BUG.
+
+(define_predicate "cris_operand_extend_operator"
+ (match_code "plus, minus, umin"))
+
+(define_predicate "cris_additive_operand_extend_operator"
+ (match_code "plus, minus"))
+
+(define_predicate "cris_extend_operator"
+ (match_code "zero_extend, sign_extend"))
+
+(define_predicate "cris_plus_or_bound_operator"
+ (match_code "plus, umin"))
+
+;; Used as an operator to get a handle on a already-known-valid MEM rtx:es
+;; (no need to validate the address), where some address expression parts
+;; have their own match_operand.
+
+(define_predicate "cris_mem_op"
+ (match_code "mem"))
+
+(define_predicate "cris_load_multiple_op"
+ (and (match_code "parallel")
+ (match_test "cris_movem_load_rest_p (op, 0)")))
+
+(define_predicate "cris_store_multiple_op"
+ (and (match_code "parallel")
+ (match_test "cris_store_multiple_op_p (op)")))
+
+
+;; Operand helper predicates.
+
+(define_predicate "cris_bdap_const_operand"
+ (and (match_code "label_ref, symbol_ref, const_int, const_double, const")
+ (ior (not (match_test "flag_pic"))
+ (match_test "cris_valid_pic_const (op, true)"))))
+
+(define_predicate "cris_simple_address_operand"
+ (ior (match_operand:SI 0 "register_operand")
+ (and (match_code "post_inc")
+ (match_test "register_operand (XEXP (op, 0), Pmode)"))))
+
+(define_predicate "cris_simple_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "mem")
+ (match_test "cris_simple_address_operand (XEXP (op, 0),
+ Pmode)"))))
+
+;; The caller needs to use :SI.
+(define_predicate "cris_bdap_sign_extend_operand"
+; Disabled until <URL:http://gcc.gnu.org/ml/gcc-patches/2005-10/msg01376.html>
+; or <URL:http://gcc.gnu.org/ml/gcc-patches/2005-10/msg00940.html> is committed.
+ (match_test "0"))
+; (and (match_code "sign_extend")
+; (and (match_test "MEM_P (XEXP (op, 0))")
+; (match_test "cris_simple_address_operand (XEXP (XEXP (op, 0), 0),
+; Pmode)"))))
+
+;; FIXME: Should not have to test for 1.
+(define_predicate "cris_scale_int_operand"
+ (and (match_code "const_int")
+ (ior (ior (match_test "op == GEN_INT (4)")
+ (match_test "op == const2_rtx"))
+ (match_test "op == const1_rtx"))))
+
+;; FIXME: Should be able to assume (reg int).
+(define_predicate "cris_biap_mult_operand"
+ (and (match_code "mult")
+ (ior (and (match_test "register_operand (XEXP (op, 0), Pmode)")
+ (match_test "cris_scale_int_operand (XEXP (op, 1), Pmode)"))
+ (and (match_test "cris_scale_int_operand (XEXP (op, 0), Pmode)")
+ (match_test "register_operand (XEXP (op, 1), Pmode)")))))
+
+
+;; Operand predicates.
+
+;; This checks a part of an address, the one that is not a plain register
+;; for an addressing mode using BDAP.
+;; Allowed operands are either:
+;; a) a register
+;; b) a CONST operand (but not a symbol when generating PIC)
+;; c) a [r] or [r+] in SImode, or sign-extend from HI or QI.
+
+(define_predicate "cris_bdap_operand"
+ (ior (match_operand 0 "cris_bdap_const_operand")
+ (ior (match_operand:SI 0 "cris_simple_operand")
+ (match_operand:SI 0 "cris_bdap_sign_extend_operand"))))
+
+;; This is similar to cris_bdap_operand:
+;; It checks a part of an address, the one that is not a plain register
+;; for an addressing mode using BDAP or BIAP.
+;; Allowed operands are either:
+;; a) a register
+;; b) a CONST operand (but not a symbol when generating PIC)
+;; c) a mult of (1, 2 or 4) and a register
+;; d) a [r] or [r+] in SImode, or sign-extend from HI or QI. */
+
+(define_predicate "cris_bdap_biap_operand"
+ (ior (match_operand 0 "cris_bdap_operand")
+ (match_operand 0 "cris_biap_mult_operand")))
+
+;; Since with -fPIC, not all symbols are valid PIC symbols or indeed
+;; general_operands, we have to have a predicate that matches it for the
+;; "movsi" expander.
+;; FIXME: Can s/special_// when PR 20413 is fixed.
+
+(define_special_predicate "cris_general_operand_or_symbol"
+ (ior (match_operand 0 "general_operand")
+ (and (match_code "const, symbol_ref, label_ref")
+ ; The following test is actually just an assertion.
+ (match_test "cris_pic_symbol_type_of (op) != cris_no_symbol"))))
+
+;; A predicate for the anon movsi expansion, one that fits a PCREL
+;; operand as well as general_operand.
+
+(define_special_predicate "cris_general_operand_or_pic_source"
+ (ior (match_operand 0 "general_operand")
+ (and (match_test "flag_pic")
+ (match_test "cris_valid_pic_const (op, false)"))))
+
+;; Since a PLT symbol is not a general_operand, we have to have a
+;; predicate that matches it when we need it. We use this in the expanded
+;; "call" and "call_value" anonymous patterns.
+
+(define_predicate "cris_nonmemory_operand_or_callable_symbol"
+ (ior (match_operand 0 "nonmemory_operand")
+ (and (match_code "const")
+ (and
+ (match_test "GET_CODE (XEXP (op, 0)) == UNSPEC")
+ (ior
+ (match_test "XINT (XEXP (op, 0), 1) == CRIS_UNSPEC_PLT_PCREL")
+ (match_test "XINT (XEXP (op, 0), 1) == CRIS_UNSPEC_PCREL"))))))
+
+;; This matches a (MEM (general_operand)) or
+;; (MEM (cris_general_operand_or_symbol)). The second one isn't a valid
+;; memory_operand, so we need this predicate to recognize call
+;; destinations before we change them to a PLT operand (by wrapping in
+;; UNSPEC CRIS_UNSPEC_PLT).
+
+(define_predicate "cris_mem_call_operand"
+ (and (match_code "mem")
+ (ior (match_operand 0 "memory_operand")
+ (match_test "cris_general_operand_or_symbol (XEXP (op, 0),
+ Pmode)"))))
diff --git a/gcc-4.8/gcc/config/cris/sync.md b/gcc-4.8/gcc/config/cris/sync.md
new file mode 100644
index 000000000..a746431fd
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/sync.md
@@ -0,0 +1,314 @@
+;; GCC machine description for CRIS atomic memory sequences.
+;; Copyright (C) 2012-2013 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; The CRIS atomic support yields code in three flavors, depending on
+;; the CPU for which code is generated:
+;;
+;; - Plain old CRIS v0 (..v8)
+;; - CRIS v10 (as used in ETRAX 100 LX)
+;; - CRIS v32 (as used in ETRAX FS)
+;;
+;; The last two alternatives are similar, of LL/SC type. They may
+;; fail for other reasons; an exception, a cache miss or a bus request
+;; from other parts of the system. The difference between them is
+;; just in what condition-codes are used to track LL and success or
+;; failure for the store. See the chapter on integral read-write
+;; operations, chapter 1.13 in "ETRAX 100LX Programmers Manual",
+;; <http://www.axis.com/files/tech_notes/etrax_100lx_prog_man-050519.pdf>
+;; and chapter 2.1 in "ETRAX FS Designer's reference",
+;; <http://www.axis.com/files/manuals/etrax_fs_des_ref-070821.pdf>.
+;; Note that the datum being stored has to be contained fully within a
+;; cache-line to be integral. A failure to store the data integrally
+;; will be flagged, but the store may still have happened in part,
+;; which translates most usefully into the data having to be
+;; "naturally aligned" to work. Natural alignment is verified in the
+;; generated code and will by default cause for unaligned pointers a
+;; "break 8" to be executed or optionally a call to abort(). Beware
+;; that options -m16bit and -m8bit may cause data to be unaligned
+;; where it was otherwise aligned. Data has a better chance of being
+;; aligned if it is declared with e.g. __attribute__ ((__align__ (4))).
+;;
+;; The "plain old v0..v8 flavor" just assumes there's a single CPU in
+;; the system, that no other parts of the system have access to memory
+;; used for atomic accesses and since there's no user mode without
+;; access to interrupt flags (another assumption), it just turns off
+;; interrupts while doing the access. Here, alignment is neither
+;; required nor asserted.
+
+(define_c_enum ""
+ [
+ CRIS_UNSPEC_ATOMIC_OP
+ CRIS_UNSPEC_ATOMIC_SWAP_MEM
+ CRIS_UNSPEC_ATOMIC_SWAP_BOOL
+ ])
+
+(define_constants [(CRIS_CCR_INTERRUPT_BIT 5)])
+
+;; We use "mult" as a placeholder for "nand" (which does not have a
+;; separate binary rtx operation) so we can use an iterator in the
+;; define_expand and define_insn and avoid having a separate
+;; mostly-identical copy. You will see the "mult" operator in rtl
+;; dumps, but it shouldn't matter as its use has one of its operands
+;; inside an unspec_volatile.
+
+(define_code_iterator atomic_op [plus minus ior and xor mult])
+
+(define_code_attr atomic_op_name
+ [(plus "add") (minus "sub") (and "and") (ior "or") (xor "xor") (mult "nand")])
+
+;; The operator nonatomic-operand can be memory, constant or register
+;; for all but xor. We can't use memory or addressing modes with
+;; side-effects though, so just use registers and literal constants.
+(define_code_attr atomic_op_op_cnstr
+ [(plus "ri") (minus "ri") (and "ri") (ior "ri") (xor "r") (mult "ri")])
+
+(define_code_attr atomic_op_op_pred
+ [(plus "nonmemory_operand") (minus "nonmemory_operand")
+ (and "nonmemory_operand") (ior "nonmemory_operand")
+ (xor "register_operand") (mult "nonmemory_operand")])
+
+;; Pairs of these are used to insert the "not" after the "and" for nand.
+(define_code_attr atomic_op_mnem_pre_op2 ;; Upper-case only to simplify testing.
+ [(plus "%P2") (minus "Sub.d %2") (and "And%q2 %2") (ior "Or%q2 %2") (xor "Xor %2")
+ (mult "aNd%q2 %2")])
+
+(define_code_attr atomic_op_mnem_post_op3
+ [(plus "") (minus "") (and "") (ior "") (xor "") (mult "not %3\;")])
+
+;; For SImode, emit "q" for operands -31..31.
+(define_mode_attr qm3 [(SI "%q3") (HI ".w") (QI ".b")])
+
+(define_expand "atomic_fetch_<atomic_op_name><mode>"
+ [(match_operand:BWD 0 "register_operand")
+ (match_operand:BWD 1 "memory_operand")
+ (match_operand:BWD 2 "<atomic_op_op_pred>")
+ (match_operand 3)
+ (atomic_op:BWD (match_dup 0) (match_dup 1))]
+ "<MODE>mode == QImode || !TARGET_ATOMICS_MAY_CALL_LIBFUNCS"
+{
+ enum memmodel mmodel = (enum memmodel) INTVAL (operands[3]);
+
+ if (<MODE>mode != QImode && TARGET_TRAP_UNALIGNED_ATOMIC)
+ cris_emit_trap_for_misalignment (operands[1]);
+
+ if (need_atomic_barrier_p (mmodel, true))
+ expand_mem_thread_fence (mmodel);
+
+ emit_insn (gen_cris_atomic_fetch_<atomic_op_name><mode>_1 (operands[0],
+ operands[1],
+ operands[2]));
+ if (need_atomic_barrier_p (mmodel, false))
+ expand_mem_thread_fence (mmodel);
+
+ DONE;
+})
+
+(define_insn "cris_atomic_fetch_<atomic_op_name><mode>_1"
+ [(set (match_operand:BWD 1 "memory_operand" "+Q")
+ (atomic_op:BWD
+ (unspec_volatile:BWD [(match_dup 1)] CRIS_UNSPEC_ATOMIC_OP)
+ ;; FIXME: improve constants more for plus, minus, and, ior.
+ ;; FIXME: handle memory operands without side-effects.
+ (match_operand:BWD 2 "<atomic_op_op_pred>" "<atomic_op_op_cnstr>")))
+ (set (match_operand:BWD 0 "register_operand" "=&r")
+ (match_dup 1))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "<MODE>mode == QImode || !TARGET_ATOMICS_MAY_CALL_LIBFUNCS"
+{
+ /* Can't be too sure; better ICE if this happens. */
+ gcc_assert (!reg_overlap_mentioned_p (operands[2], operands[1]));
+
+ if (TARGET_V32)
+ return
+ "clearf p\n"
+ ".Lsync.%=:\;"
+ "move<m> %1,%0\;"
+ "move.d %0,%3\;"
+ "<atomic_op_mnem_pre_op2>,%3\;<atomic_op_mnem_post_op3>"
+ "ax\;"
+ "move<m> %3,%1\;"
+ "bcs .Lsync.%=\;"
+ "clearf p";
+ else if (cris_cpu_version == 10)
+ return
+ "clearf\n"
+ ".Lsync.%=:\;"
+ "move<m> %1,%0\;"
+ "move.d %0,%3\;"
+ "<atomic_op_mnem_pre_op2>,%3\;<atomic_op_mnem_post_op3>"
+ "ax\;"
+ "move<m> %3,%1\;"
+ "bwf .Lsync.%=\;"
+ "clearf";
+ else
+ {
+ /* This one is for CRIS versions without load-locked-store-conditional
+ machinery; assume single-core-non-shared-memory without user
+ mode/supervisor mode distinction, and just disable interrupts
+ while performing the operation.
+ Rather than making this pattern more complex by freeing another
+ register or stack position to save condition codes (the value
+ of the interrupt-enabled bit), we check whether interrupts were
+ enabled before we disabled them and branch to a version
+ with/without afterwards re-enabling them. */
+ rtx ops[5];
+
+ /* We have no available macro to stringify CRIS_CCR_INTERRUPT_BIT. */
+ memcpy (ops, operands, sizeof(ops));
+ ops[4] = GEN_INT (CRIS_CCR_INTERRUPT_BIT);
+
+ output_asm_insn ("move $ccr,%3\;"
+ "di\;"
+ "move<m> %1,%0\;"
+ "btstq %4,%3",
+ ops);
+ return
+ "bmi .Lsync.irqon.%=\;"
+ "move.d %0,%3\;"
+
+ "<atomic_op_mnem_pre_op2>,%3\;<atomic_op_mnem_post_op3>"
+ "ba .Lsync.irqoff.%=\;"
+ "move<m> %3,%1\n"
+
+ ".Lsync.irqon.%=:\;"
+ "<atomic_op_mnem_pre_op2>,%3\;<atomic_op_mnem_post_op3>"
+ "move<m> %3,%1\;"
+ "ei\n"
+ ".Lsync.irqoff.%=:";
+ }
+})
+
+;; This pattern is more-or-less assumed to always exist if any of the
+;; other atomic patterns exist (see e.g. comment at the
+;; can_compare_and_swap_p call in omp-low.c, 4.8 era). We'd slightly
+;; prefer atomic_exchange<mode> over this, but having both would be
+;; redundant.
+;; FIXME: handle memory without side-effects for operand[3].
+(define_expand "atomic_compare_and_swap<mode>"
+ [(match_operand:SI 0 "register_operand")
+ (match_operand:BWD 1 "register_operand")
+ (match_operand:BWD 2 "memory_operand")
+ (match_operand:BWD 3 "nonmemory_operand")
+ (match_operand:BWD 4 "register_operand")
+ (match_operand 5)
+ (match_operand 6)
+ (match_operand 7)]
+ "<MODE>mode == QImode || !TARGET_ATOMICS_MAY_CALL_LIBFUNCS"
+{
+ enum memmodel mmodel = (enum memmodel) INTVAL (operands[6]);
+
+ if (<MODE>mode != QImode && TARGET_TRAP_UNALIGNED_ATOMIC)
+ cris_emit_trap_for_misalignment (operands[2]);
+
+ if (need_atomic_barrier_p (mmodel, true))
+ expand_mem_thread_fence (mmodel);
+
+ emit_insn (gen_cris_atomic_compare_and_swap<mode>_1 (operands[0],
+ operands[1],
+ operands[2],
+ operands[3],
+ operands[4]));
+ if (need_atomic_barrier_p (mmodel, false))
+ expand_mem_thread_fence (mmodel);
+
+ DONE;
+})
+
+(define_insn "cris_atomic_compare_and_swap<mode>_1"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI
+ [(match_operand:BWD 2 "memory_operand" "+Q")
+ (match_operand:BWD 3 "nonmemory_operand" "ri")]
+ CRIS_UNSPEC_ATOMIC_SWAP_BOOL))
+ (set (match_operand:BWD 1 "register_operand" "=&r") (match_dup 2))
+ (set (match_dup 2)
+ (unspec_volatile:BWD
+ [(match_dup 2)
+ (match_dup 3)
+ (match_operand:BWD 4 "register_operand" "r")]
+ CRIS_UNSPEC_ATOMIC_SWAP_MEM))]
+ "<MODE>mode == QImode || !TARGET_ATOMICS_MAY_CALL_LIBFUNCS"
+{
+ if (TARGET_V32)
+ return
+ "\n.Lsync.repeat.%=:\;"
+ "clearf p\;"
+ "move<m> %2,%1\;"
+ "cmp<qm3> %3,%1\;"
+ "bne .Lsync.after.%=\;"
+ "ax\;"
+
+ "move<m> %4,%2\;"
+ "bcs .Lsync.repeat.%=\n"
+ ".Lsync.after.%=:\;"
+ "seq %0";
+ else if (cris_cpu_version == 10)
+ return
+ "\n.Lsync.repeat.%=:\;"
+ "clearf\;"
+ "move<m> %2,%1\;"
+ "cmp<qm3> %3,%1\;"
+ "bne .Lsync.after.%=\;"
+ "ax\;"
+
+ "move<m> %4,%2\;"
+ "bwf .Lsync.repeat.%=\n"
+ ".Lsync.after.%=:\;"
+ "seq %0";
+ else
+ {
+ /* This one is for CRIS versions without load-locked-store-conditional
+ machinery; assume single-core-non-shared-memory without user
+ mode/supervisor mode distinction, and just disable interrupts
+ while performing the operation.
+ Rather than making this pattern more complex by freeing another
+ register or stack position to save condition codes (the value
+ of the interrupt-enabled bit), we check whether interrupts were
+ enabled before we disabled them and branch to a version
+ with/without afterwards re-enabling them. */
+ rtx ops[4];
+
+ /* We have no available macro to stringify CRIS_CCR_INTERRUPT_BIT. */
+ memcpy (ops, operands, sizeof(ops));
+ ops[3] = GEN_INT (CRIS_CCR_INTERRUPT_BIT);
+
+ output_asm_insn ("move $ccr,%0\;"
+ "di\;"
+ "move<m> %2,%1\;"
+ "btstq %3,%0",
+ ops);
+ return
+ "bmi .Lsync.irqon.%=\;"
+ "nop\;"
+
+ "cmp<qm3> %3,%1\;"
+ "bne .Lsync.after.%=\;"
+ "seq %0\;"
+ "ba .Lsync.after.%=\;"
+ "move<m> %4,%2\n"
+
+ ".Lsync.irqon.%=:\;"
+ "cmp<qm3> %3,%1\;"
+ "bne .Lsync.after.%=\;"
+ "seq %0\;"
+ "move<m> %4,%2\;"
+ "ei\n"
+ ".Lsync.after.%=:";
+ }
+})
diff --git a/gcc-4.8/gcc/config/cris/t-cris b/gcc-4.8/gcc/config/cris/t-cris
new file mode 100644
index 000000000..98fd36a84
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/t-cris
@@ -0,0 +1,29 @@
+#
+# t-cris
+#
+# The Makefile fragment to include when compiling gcc et al for CRIS.
+#
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+#
+# The makefile macros etc. are included in the order found in the
+# section "Target Fragment" in the gcc info-files (or the paper copy) of
+# "Using and Porting GCC"
+
+$(out_object_file): gt-cris.h
+gt-cris.h : s-gtype ; @true
diff --git a/gcc-4.8/gcc/config/cris/t-elfmulti b/gcc-4.8/gcc/config/cris/t-elfmulti
new file mode 100644
index 000000000..f4dfb8508
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/t-elfmulti
@@ -0,0 +1,30 @@
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+MULTILIB_OPTIONS = march=v10/march=v32
+MULTILIB_DIRNAMES = v10 v32
+MULTILIB_MATCHES = \
+ march?v10=mcpu?etrax100lx \
+ march?v10=mcpu?ng \
+ march?v10=march?etrax100lx \
+ march?v10=march?ng \
+ march?v10=march?v11 \
+ march?v10=mcpu?v11 \
+ march?v10=mcpu?v10 \
+ march?v32=mcpu?v32
+MULTILIB_EXTRA_OPTS = mbest-lib-options
diff --git a/gcc-4.8/gcc/config/cris/t-linux b/gcc-4.8/gcc/config/cris/t-linux
new file mode 100644
index 000000000..71a964936
--- /dev/null
+++ b/gcc-4.8/gcc/config/cris/t-linux
@@ -0,0 +1,5 @@
+# We *know* we have a limits.h in the glibc library, with extra
+# definitions needed for e.g. libgfortran.
+ifneq ($(inhibit_libc),true)
+LIMITS_H_TEST = :
+endif