aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.2.1/gcc/config/alpha
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.2.1/gcc/config/alpha')
-rw-r--r--gcc-4.2.1/gcc/config/alpha/alpha-modes.def28
-rw-r--r--gcc-4.2.1/gcc/config/alpha/alpha-protos.h137
-rw-r--r--gcc-4.2.1/gcc/config/alpha/alpha.c10805
-rw-r--r--gcc-4.2.1/gcc/config/alpha/alpha.h1593
-rw-r--r--gcc-4.2.1/gcc/config/alpha/alpha.md8122
-rw-r--r--gcc-4.2.1/gcc/config/alpha/alpha.opt135
-rw-r--r--gcc-4.2.1/gcc/config/alpha/crtfastmath.c46
-rw-r--r--gcc-4.2.1/gcc/config/alpha/elf.h426
-rw-r--r--gcc-4.2.1/gcc/config/alpha/ev4.md162
-rw-r--r--gcc-4.2.1/gcc/config/alpha/ev5.md195
-rw-r--r--gcc-4.2.1/gcc/config/alpha/ev6.md178
-rw-r--r--gcc-4.2.1/gcc/config/alpha/freebsd.h83
-rw-r--r--gcc-4.2.1/gcc/config/alpha/gnu.h26
-rw-r--r--gcc-4.2.1/gcc/config/alpha/lib1funcs.asm320
-rw-r--r--gcc-4.2.1/gcc/config/alpha/libgcc-alpha-ldbl.ver32
-rw-r--r--gcc-4.2.1/gcc/config/alpha/linux-elf.h56
-rw-r--r--gcc-4.2.1/gcc/config/alpha/linux-unwind.h82
-rw-r--r--gcc-4.2.1/gcc/config/alpha/linux.h85
-rw-r--r--gcc-4.2.1/gcc/config/alpha/netbsd.h83
-rw-r--r--gcc-4.2.1/gcc/config/alpha/openbsd.h99
-rw-r--r--gcc-4.2.1/gcc/config/alpha/osf.h213
-rw-r--r--gcc-4.2.1/gcc/config/alpha/osf5.h57
-rw-r--r--gcc-4.2.1/gcc/config/alpha/predicates.md589
-rw-r--r--gcc-4.2.1/gcc/config/alpha/qrnnd.asm167
-rw-r--r--gcc-4.2.1/gcc/config/alpha/sync.md315
-rw-r--r--gcc-4.2.1/gcc/config/alpha/t-alpha2
-rw-r--r--gcc-4.2.1/gcc/config/alpha/t-crtfm5
-rw-r--r--gcc-4.2.1/gcc/config/alpha/t-ieee2
-rw-r--r--gcc-4.2.1/gcc/config/alpha/t-linux1
-rw-r--r--gcc-4.2.1/gcc/config/alpha/t-osf-pthread5
-rw-r--r--gcc-4.2.1/gcc/config/alpha/t-osf432
-rw-r--r--gcc-4.2.1/gcc/config/alpha/t-unicosmk2
-rw-r--r--gcc-4.2.1/gcc/config/alpha/t-vms26
-rw-r--r--gcc-4.2.1/gcc/config/alpha/t-vms648
-rw-r--r--gcc-4.2.1/gcc/config/alpha/unicosmk.h441
-rw-r--r--gcc-4.2.1/gcc/config/alpha/va_list.h19
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms-cc.c356
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms-crt0-64.c99
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms-crt0.c71
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms-dwarf2.asm82
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms-dwarf2eh.asm37
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms-ld.c764
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms-psxcrt0-64.c128
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms-psxcrt0.c99
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms-unwind.h72
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms.h369
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms64.h32
-rw-r--r--gcc-4.2.1/gcc/config/alpha/vms_tramp.asm52
-rw-r--r--gcc-4.2.1/gcc/config/alpha/x-vms21
-rw-r--r--gcc-4.2.1/gcc/config/alpha/xm-vms.h57
50 files changed, 26816 insertions, 0 deletions
diff --git a/gcc-4.2.1/gcc/config/alpha/alpha-modes.def b/gcc-4.2.1/gcc/config/alpha/alpha-modes.def
new file mode 100644
index 000000000..1fcf611ea
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/alpha-modes.def
@@ -0,0 +1,28 @@
+/* Alpha extra machine modes.
+ Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* 128-bit floating point. This gets reset in alpha_override_options
+ if VAX float format is in use. */
+FLOAT_MODE (TF, 16, ieee_quad_format);
+
+/* Vector modes. */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
+VECTOR_MODE (INT, QI, 4); /* V4QI */
+VECTOR_MODE (INT, QI, 2); /* V2QI */
diff --git a/gcc-4.2.1/gcc/config/alpha/alpha-protos.h b/gcc-4.2.1/gcc/config/alpha/alpha-protos.h
new file mode 100644
index 000000000..ca284fb81
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/alpha-protos.h
@@ -0,0 +1,137 @@
+/* Prototypes for alpha.c functions used in the md file & elsewhere.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+extern int alpha_next_sequence_number;
+
+extern void literal_section (void);
+extern void override_options (void);
+extern int zap_mask (HOST_WIDE_INT);
+extern int direct_return (void);
+
+extern int alpha_sa_size (void);
+extern HOST_WIDE_INT alpha_initial_elimination_offset (unsigned int,
+ unsigned int);
+extern int alpha_pv_save_size (void);
+extern int alpha_using_fp (void);
+extern void alpha_expand_prologue (void);
+extern void alpha_expand_epilogue (void);
+extern void alpha_output_filename (FILE *, const char *);
+
+extern bool alpha_const_ok_for_letter_p (HOST_WIDE_INT, int);
+extern bool alpha_const_double_ok_for_letter_p (rtx, int);
+extern bool alpha_extra_constraint (rtx, int);
+
+extern rtx alpha_tablejump_addr_vec (rtx);
+extern rtx alpha_tablejump_best_label (rtx);
+
+extern bool alpha_legitimate_constant_p (rtx);
+extern bool alpha_legitimate_address_p (enum machine_mode, rtx, int);
+extern rtx alpha_legitimize_address (rtx, rtx, enum machine_mode);
+extern rtx alpha_legitimize_reload_address (rtx, enum machine_mode,
+ int, int, int);
+
+extern rtx split_small_symbolic_operand (rtx);
+
+extern void get_aligned_mem (rtx, rtx *, rtx *);
+extern rtx get_unaligned_address (rtx);
+extern rtx get_unaligned_offset (rtx, HOST_WIDE_INT);
+extern enum reg_class alpha_preferred_reload_class (rtx, enum reg_class);
+extern enum reg_class alpha_secondary_reload_class (enum reg_class,
+ enum machine_mode, rtx,
+ int);
+
+extern void alpha_set_memflags (rtx, rtx);
+extern bool alpha_split_const_mov (enum machine_mode, rtx *);
+extern bool alpha_expand_mov (enum machine_mode, rtx *);
+extern bool alpha_expand_mov_nobwx (enum machine_mode, rtx *);
+extern void alpha_expand_movmisalign (enum machine_mode, rtx *);
+extern void alpha_emit_floatuns (rtx[]);
+extern rtx alpha_emit_conditional_move (rtx, enum machine_mode);
+extern void alpha_split_tmode_pair (rtx[], enum machine_mode, bool);
+extern void alpha_split_tfmode_frobsign (rtx[], rtx (*)(rtx, rtx, rtx));
+extern void alpha_expand_unaligned_load (rtx, rtx, HOST_WIDE_INT,
+ HOST_WIDE_INT, int);
+extern void alpha_expand_unaligned_store (rtx, rtx, HOST_WIDE_INT,
+ HOST_WIDE_INT);
+extern int alpha_expand_block_move (rtx []);
+extern int alpha_expand_block_clear (rtx []);
+extern rtx alpha_expand_zap_mask (HOST_WIDE_INT);
+extern void alpha_expand_builtin_vector_binop (rtx (*)(rtx, rtx, rtx),
+ enum machine_mode,
+ rtx, rtx, rtx);
+extern rtx alpha_return_addr (int, rtx);
+extern rtx alpha_gp_save_rtx (void);
+extern void print_operand (FILE *, rtx, int);
+extern void print_operand_address (FILE *, rtx);
+extern void alpha_initialize_trampoline (rtx, rtx, rtx, int, int, int);
+
+extern void alpha_va_start (tree, rtx);
+extern rtx alpha_va_arg (tree, tree);
+extern rtx function_arg (CUMULATIVE_ARGS, enum machine_mode, tree, int);
+extern rtx function_value (tree, tree, enum machine_mode);
+
+extern void alpha_start_function (FILE *, const char *, tree);
+extern void alpha_end_function (FILE *, const char *, tree);
+
+extern int alpha_find_lo_sum_using_gp (rtx);
+
+#ifdef REAL_VALUE_TYPE
+extern int check_float_value (enum machine_mode, REAL_VALUE_TYPE *, int);
+#endif
+
+#ifdef RTX_CODE
+extern rtx alpha_emit_conditional_branch (enum rtx_code);
+extern rtx alpha_emit_setcc (enum rtx_code);
+extern int alpha_split_conditional_move (enum rtx_code, rtx, rtx, rtx, rtx);
+extern void alpha_emit_xfloating_arith (enum rtx_code, rtx[]);
+extern void alpha_emit_xfloating_cvt (enum rtx_code, rtx[]);
+extern void alpha_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
+extern void alpha_split_compare_and_swap (rtx, rtx, rtx, rtx, rtx);
+extern void alpha_expand_compare_and_swap_12 (rtx, rtx, rtx, rtx);
+extern void alpha_split_compare_and_swap_12 (enum machine_mode, rtx, rtx,
+ rtx, rtx, rtx, rtx, rtx);
+extern void alpha_split_lock_test_and_set (rtx, rtx, rtx, rtx);
+extern void alpha_expand_lock_test_and_set_12 (rtx, rtx, rtx);
+extern void alpha_split_lock_test_and_set_12 (enum machine_mode, rtx, rtx,
+ rtx, rtx, rtx);
+#endif
+
+extern rtx alpha_need_linkage (const char *, int);
+extern rtx alpha_use_linkage (rtx, tree, int, int);
+
+#if TARGET_ABI_OPEN_VMS
+extern enum avms_arg_type alpha_arg_type (enum machine_mode);
+extern rtx alpha_arg_info_reg_val (CUMULATIVE_ARGS);
+#endif
+
+extern rtx unicosmk_add_call_info_word (rtx);
+
+#if TARGET_ABI_UNICOSMK
+extern void unicosmk_defer_case_vector (rtx, rtx);
+extern void unicosmk_add_extern (const char *);
+extern void unicosmk_output_align (FILE *, int);
+extern void unicosmk_output_common (FILE *, const char *, int, int);
+extern int unicosmk_initial_elimination_offset (int, int);
+#endif
+
+extern int some_small_symbolic_operand_int (rtx *, void *);
+extern int tls_symbolic_operand_1 (rtx, int, int);
+extern rtx resolve_reload_operand (rtx);
diff --git a/gcc-4.2.1/gcc/config/alpha/alpha.c b/gcc-4.2.1/gcc/config/alpha/alpha.c
new file mode 100644
index 000000000..a9ae7e532
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/alpha.c
@@ -0,0 +1,10805 @@
+/* Subroutines used for code generation on the DEC Alpha.
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "recog.h"
+#include "expr.h"
+#include "optabs.h"
+#include "reload.h"
+#include "obstack.h"
+#include "except.h"
+#include "function.h"
+#include "toplev.h"
+#include "ggc.h"
+#include "integrate.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "debug.h"
+#include "langhooks.h"
+#include <splay-tree.h>
+#include "cfglayout.h"
+#include "tree-gimple.h"
+#include "tree-flow.h"
+#include "tree-stdarg.h"
+
+/* Specify which cpu to schedule for. */
+enum processor_type alpha_tune;
+
+/* Which cpu we're generating code for. */
+enum processor_type alpha_cpu;
+
+static const char * const alpha_cpu_name[] =
+{
+ "ev4", "ev5", "ev6"
+};
+
+/* Specify how accurate floating-point traps need to be. */
+
+enum alpha_trap_precision alpha_tp;
+
+/* Specify the floating-point rounding mode. */
+
+enum alpha_fp_rounding_mode alpha_fprm;
+
+/* Specify which things cause traps. */
+
+enum alpha_fp_trap_mode alpha_fptm;
+
+/* Save information from a "cmpxx" operation until the branch or scc is
+ emitted. */
+
+struct alpha_compare alpha_compare;
+
+/* Nonzero if inside of a function, because the Alpha asm can't
+ handle .files inside of functions. */
+
+static int inside_function = FALSE;
+
+/* The number of cycles of latency we should assume on memory reads. */
+
+int alpha_memory_latency = 3;
+
+/* Whether the function needs the GP. */
+
+static int alpha_function_needs_gp;
+
+/* The alias set for prologue/epilogue register save/restore. */
+
+static GTY(()) int alpha_sr_alias_set;
+
+/* The assembler name of the current function. */
+
+static const char *alpha_fnname;
+
+/* The next explicit relocation sequence number. */
+extern GTY(()) int alpha_next_sequence_number;
+int alpha_next_sequence_number = 1;
+
+/* The literal and gpdisp sequence numbers for this insn, as printed
+ by %# and %* respectively. */
+extern GTY(()) int alpha_this_literal_sequence_number;
+extern GTY(()) int alpha_this_gpdisp_sequence_number;
+int alpha_this_literal_sequence_number;
+int alpha_this_gpdisp_sequence_number;
+
+/* Costs of various operations on the different architectures. */
+
+struct alpha_rtx_cost_data
+{
+ unsigned char fp_add;
+ unsigned char fp_mult;
+ unsigned char fp_div_sf;
+ unsigned char fp_div_df;
+ unsigned char int_mult_si;
+ unsigned char int_mult_di;
+ unsigned char int_shift;
+ unsigned char int_cmov;
+ unsigned short int_div;
+};
+
+static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
+{
+ { /* EV4 */
+ COSTS_N_INSNS (6), /* fp_add */
+ COSTS_N_INSNS (6), /* fp_mult */
+ COSTS_N_INSNS (34), /* fp_div_sf */
+ COSTS_N_INSNS (63), /* fp_div_df */
+ COSTS_N_INSNS (23), /* int_mult_si */
+ COSTS_N_INSNS (23), /* int_mult_di */
+ COSTS_N_INSNS (2), /* int_shift */
+ COSTS_N_INSNS (2), /* int_cmov */
+ COSTS_N_INSNS (97), /* int_div */
+ },
+ { /* EV5 */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult */
+ COSTS_N_INSNS (15), /* fp_div_sf */
+ COSTS_N_INSNS (22), /* fp_div_df */
+ COSTS_N_INSNS (8), /* int_mult_si */
+ COSTS_N_INSNS (12), /* int_mult_di */
+ COSTS_N_INSNS (1) + 1, /* int_shift */
+ COSTS_N_INSNS (1), /* int_cmov */
+ COSTS_N_INSNS (83), /* int_div */
+ },
+ { /* EV6 */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult */
+ COSTS_N_INSNS (12), /* fp_div_sf */
+ COSTS_N_INSNS (15), /* fp_div_df */
+ COSTS_N_INSNS (7), /* int_mult_si */
+ COSTS_N_INSNS (7), /* int_mult_di */
+ COSTS_N_INSNS (1), /* int_shift */
+ COSTS_N_INSNS (2), /* int_cmov */
+ COSTS_N_INSNS (86), /* int_div */
+ },
+};
+
+/* Similar but tuned for code size instead of execution latency. The
+ extra +N is fractional cost tuning based on latency. It's used to
+ encourage use of cheaper insns like shift, but only if there's just
+ one of them. */
+
+static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
+{
+ COSTS_N_INSNS (1), /* fp_add */
+ COSTS_N_INSNS (1), /* fp_mult */
+ COSTS_N_INSNS (1), /* fp_div_sf */
+ COSTS_N_INSNS (1) + 1, /* fp_div_df */
+ COSTS_N_INSNS (1) + 1, /* int_mult_si */
+ COSTS_N_INSNS (1) + 2, /* int_mult_di */
+ COSTS_N_INSNS (1), /* int_shift */
+ COSTS_N_INSNS (1), /* int_cmov */
+ COSTS_N_INSNS (6), /* int_div */
+};
+
+/* Get the number of args of a function in one of two ways. */
+#if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
+#define NUM_ARGS current_function_args_info.num_args
+#else
+#define NUM_ARGS current_function_args_info
+#endif
+
+#define REG_PV 27
+#define REG_RA 26
+
+/* Declarations of static functions. */
+static struct machine_function *alpha_init_machine_status (void);
+static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
+
+#if TARGET_ABI_OPEN_VMS
+static void alpha_write_linkage (FILE *, const char *, tree);
+#endif
+
+static void unicosmk_output_deferred_case_vectors (FILE *);
+static void unicosmk_gen_dsib (unsigned long *);
+static void unicosmk_output_ssib (FILE *, const char *);
+static int unicosmk_need_dex (rtx);
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+alpha_handle_option (size_t code, const char *arg, int value)
+{
+ switch (code)
+ {
+ case OPT_mfp_regs:
+ if (value == 0)
+ target_flags |= MASK_SOFT_FP;
+ break;
+
+ case OPT_mieee:
+ case OPT_mieee_with_inexact:
+ target_flags |= MASK_IEEE_CONFORMANT;
+ break;
+
+ case OPT_mtls_size_:
+ if (value != 16 && value != 32 && value != 64)
+ error ("bad value %qs for -mtls-size switch", arg);
+ break;
+ }
+
+ return true;
+}
+
+#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
+/* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
+
+static const char *
+alpha_mangle_fundamental_type (tree type)
+{
+ if (TYPE_MAIN_VARIANT (type) == long_double_type_node
+ && TARGET_LONG_DOUBLE_128)
+ return "g";
+
+ /* For all other types, use normal C++ mangling. */
+ return NULL;
+}
+#endif
+
+/* Parse target option strings. */
+
+void
+override_options (void)
+{
+ static const struct cpu_table {
+ const char *const name;
+ const enum processor_type processor;
+ const int flags;
+ } cpu_table[] = {
+ { "ev4", PROCESSOR_EV4, 0 },
+ { "ev45", PROCESSOR_EV4, 0 },
+ { "21064", PROCESSOR_EV4, 0 },
+ { "ev5", PROCESSOR_EV5, 0 },
+ { "21164", PROCESSOR_EV5, 0 },
+ { "ev56", PROCESSOR_EV5, MASK_BWX },
+ { "21164a", PROCESSOR_EV5, MASK_BWX },
+ { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
+ { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
+ { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
+ { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
+ { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
+ { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
+ { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
+ { 0, 0, 0 }
+ };
+
+ int i;
+
+ /* Unicos/Mk doesn't have shared libraries. */
+ if (TARGET_ABI_UNICOSMK && flag_pic)
+ {
+ warning (0, "-f%s ignored for Unicos/Mk (not supported)",
+ (flag_pic > 1) ? "PIC" : "pic");
+ flag_pic = 0;
+ }
+
+ /* On Unicos/Mk, the native compiler consistently generates /d suffices for
+ floating-point instructions. Make that the default for this target. */
+ if (TARGET_ABI_UNICOSMK)
+ alpha_fprm = ALPHA_FPRM_DYN;
+ else
+ alpha_fprm = ALPHA_FPRM_NORM;
+
+ alpha_tp = ALPHA_TP_PROG;
+ alpha_fptm = ALPHA_FPTM_N;
+
+ /* We cannot use su and sui qualifiers for conversion instructions on
+ Unicos/Mk. I'm not sure if this is due to assembler or hardware
+ limitations. Right now, we issue a warning if -mieee is specified
+ and then ignore it; eventually, we should either get it right or
+ disable the option altogether. */
+
+ if (TARGET_IEEE)
+ {
+ if (TARGET_ABI_UNICOSMK)
+ warning (0, "-mieee not supported on Unicos/Mk");
+ else
+ {
+ alpha_tp = ALPHA_TP_INSN;
+ alpha_fptm = ALPHA_FPTM_SU;
+ }
+ }
+
+ if (TARGET_IEEE_WITH_INEXACT)
+ {
+ if (TARGET_ABI_UNICOSMK)
+ warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
+ else
+ {
+ alpha_tp = ALPHA_TP_INSN;
+ alpha_fptm = ALPHA_FPTM_SUI;
+ }
+ }
+
+ if (alpha_tp_string)
+ {
+ if (! strcmp (alpha_tp_string, "p"))
+ alpha_tp = ALPHA_TP_PROG;
+ else if (! strcmp (alpha_tp_string, "f"))
+ alpha_tp = ALPHA_TP_FUNC;
+ else if (! strcmp (alpha_tp_string, "i"))
+ alpha_tp = ALPHA_TP_INSN;
+ else
+ error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
+ }
+
+ if (alpha_fprm_string)
+ {
+ if (! strcmp (alpha_fprm_string, "n"))
+ alpha_fprm = ALPHA_FPRM_NORM;
+ else if (! strcmp (alpha_fprm_string, "m"))
+ alpha_fprm = ALPHA_FPRM_MINF;
+ else if (! strcmp (alpha_fprm_string, "c"))
+ alpha_fprm = ALPHA_FPRM_CHOP;
+ else if (! strcmp (alpha_fprm_string,"d"))
+ alpha_fprm = ALPHA_FPRM_DYN;
+ else
+ error ("bad value %qs for -mfp-rounding-mode switch",
+ alpha_fprm_string);
+ }
+
+ if (alpha_fptm_string)
+ {
+ if (strcmp (alpha_fptm_string, "n") == 0)
+ alpha_fptm = ALPHA_FPTM_N;
+ else if (strcmp (alpha_fptm_string, "u") == 0)
+ alpha_fptm = ALPHA_FPTM_U;
+ else if (strcmp (alpha_fptm_string, "su") == 0)
+ alpha_fptm = ALPHA_FPTM_SU;
+ else if (strcmp (alpha_fptm_string, "sui") == 0)
+ alpha_fptm = ALPHA_FPTM_SUI;
+ else
+ error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
+ }
+
+ if (alpha_cpu_string)
+ {
+ for (i = 0; cpu_table [i].name; i++)
+ if (! strcmp (alpha_cpu_string, cpu_table [i].name))
+ {
+ alpha_tune = alpha_cpu = cpu_table [i].processor;
+ target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
+ target_flags |= cpu_table [i].flags;
+ break;
+ }
+ if (! cpu_table [i].name)
+ error ("bad value %qs for -mcpu switch", alpha_cpu_string);
+ }
+
+ if (alpha_tune_string)
+ {
+ for (i = 0; cpu_table [i].name; i++)
+ if (! strcmp (alpha_tune_string, cpu_table [i].name))
+ {
+ alpha_tune = cpu_table [i].processor;
+ break;
+ }
+ if (! cpu_table [i].name)
+ error ("bad value %qs for -mcpu switch", alpha_tune_string);
+ }
+
+ /* Do some sanity checks on the above options. */
+
+ if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
+ {
+ warning (0, "trap mode not supported on Unicos/Mk");
+ alpha_fptm = ALPHA_FPTM_N;
+ }
+
+ if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
+ && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
+ {
+ warning (0, "fp software completion requires -mtrap-precision=i");
+ alpha_tp = ALPHA_TP_INSN;
+ }
+
+ if (alpha_cpu == PROCESSOR_EV6)
+ {
+ /* Except for EV6 pass 1 (not released), we always have precise
+ arithmetic traps. Which means we can do software completion
+ without minding trap shadows. */
+ alpha_tp = ALPHA_TP_PROG;
+ }
+
+ if (TARGET_FLOAT_VAX)
+ {
+ if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
+ {
+ warning (0, "rounding mode not supported for VAX floats");
+ alpha_fprm = ALPHA_FPRM_NORM;
+ }
+ if (alpha_fptm == ALPHA_FPTM_SUI)
+ {
+ warning (0, "trap mode not supported for VAX floats");
+ alpha_fptm = ALPHA_FPTM_SU;
+ }
+ if (target_flags_explicit & MASK_LONG_DOUBLE_128)
+ warning (0, "128-bit long double not supported for VAX floats");
+ target_flags &= ~MASK_LONG_DOUBLE_128;
+ }
+
+ {
+ char *end;
+ int lat;
+
+ if (!alpha_mlat_string)
+ alpha_mlat_string = "L1";
+
+ if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
+ && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
+ ;
+ else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
+ && ISDIGIT ((unsigned char)alpha_mlat_string[1])
+ && alpha_mlat_string[2] == '\0')
+ {
+ static int const cache_latency[][4] =
+ {
+ { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
+ { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
+ { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
+ };
+
+ lat = alpha_mlat_string[1] - '0';
+ if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
+ {
+ warning (0, "L%d cache latency unknown for %s",
+ lat, alpha_cpu_name[alpha_tune]);
+ lat = 3;
+ }
+ else
+ lat = cache_latency[alpha_tune][lat-1];
+ }
+ else if (! strcmp (alpha_mlat_string, "main"))
+ {
+ /* Most current memories have about 370ns latency. This is
+ a reasonable guess for a fast cpu. */
+ lat = 150;
+ }
+ else
+ {
+ warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
+ lat = 3;
+ }
+
+ alpha_memory_latency = lat;
+ }
+
+ /* Default the definition of "small data" to 8 bytes. */
+ if (!g_switch_set)
+ g_switch_value = 8;
+
+ /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
+ if (flag_pic == 1)
+ target_flags |= MASK_SMALL_DATA;
+ else if (flag_pic == 2)
+ target_flags &= ~MASK_SMALL_DATA;
+
+ /* Align labels and loops for optimal branching. */
+ /* ??? Kludge these by not doing anything if we don't optimize and also if
+ we are writing ECOFF symbols to work around a bug in DEC's assembler. */
+ if (optimize > 0 && write_symbols != SDB_DEBUG)
+ {
+ if (align_loops <= 0)
+ align_loops = 16;
+ if (align_jumps <= 0)
+ align_jumps = 16;
+ }
+ if (align_functions <= 0)
+ align_functions = 16;
+
+ /* Acquire a unique set number for our register saves and restores. */
+ alpha_sr_alias_set = new_alias_set ();
+
+ /* Register variables and functions with the garbage collector. */
+
+ /* Set up function hooks. */
+ init_machine_status = alpha_init_machine_status;
+
+ /* Tell the compiler when we're using VAX floating point. */
+ if (TARGET_FLOAT_VAX)
+ {
+ REAL_MODE_FORMAT (SFmode) = &vax_f_format;
+ REAL_MODE_FORMAT (DFmode) = &vax_g_format;
+ REAL_MODE_FORMAT (TFmode) = NULL;
+ }
+
+#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
+ if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
+ target_flags |= MASK_LONG_DOUBLE_128;
+#endif
+}
+
+/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
+
+int
+zap_mask (HOST_WIDE_INT value)
+{
+ int i;
+
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
+ i++, value >>= 8)
+ if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
+ return 0;
+
+ return 1;
+}
+
+/* Return true if OP is valid for a particular TLS relocation.
+ We are already guaranteed that OP is a CONST. */
+
+int
+tls_symbolic_operand_1 (rtx op, int size, int unspec)
+{
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
+ return 0;
+ op = XVECEXP (op, 0, 0);
+
+ if (GET_CODE (op) != SYMBOL_REF)
+ return 0;
+
+ switch (SYMBOL_REF_TLS_MODEL (op))
+ {
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
+ case TLS_MODEL_INITIAL_EXEC:
+ return unspec == UNSPEC_TPREL && size == 64;
+ case TLS_MODEL_LOCAL_EXEC:
+ return unspec == UNSPEC_TPREL && size == alpha_tls_size;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Used by aligned_memory_operand and unaligned_memory_operand to
+ resolve what reload is going to do with OP if it's a register. */
+
+rtx
+resolve_reload_operand (rtx op)
+{
+ if (reload_in_progress)
+ {
+ rtx tmp = op;
+ if (GET_CODE (tmp) == SUBREG)
+ tmp = SUBREG_REG (tmp);
+ if (GET_CODE (tmp) == REG
+ && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
+ {
+ op = reg_equiv_memory_loc[REGNO (tmp)];
+ if (op == 0)
+ return 0;
+ }
+ }
+ return op;
+}
+
+/* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
+ the range defined for C in [I-P]. */
+
+bool
+alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
+{
+ switch (c)
+ {
+ case 'I':
+ /* An unsigned 8 bit constant. */
+ return (unsigned HOST_WIDE_INT) value < 0x100;
+ case 'J':
+ /* The constant zero. */
+ return value == 0;
+ case 'K':
+ /* A signed 16 bit constant. */
+ return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
+ case 'L':
+ /* A shifted signed 16 bit constant appropriate for LDAH. */
+ return ((value & 0xffff) == 0
+ && ((value) >> 31 == -1 || value >> 31 == 0));
+ case 'M':
+ /* A constant that can be AND'ed with using a ZAP insn. */
+ return zap_mask (value);
+ case 'N':
+ /* A complemented unsigned 8 bit constant. */
+ return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
+ case 'O':
+ /* A negated unsigned 8 bit constant. */
+ return (unsigned HOST_WIDE_INT) (- value) < 0x100;
+ case 'P':
+ /* The constant 1, 2 or 3. */
+ return value == 1 || value == 2 || value == 3;
+
+ default:
+ return false;
+ }
+}
+
+/* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
+ matches for C in [GH]. */
+
+bool
+alpha_const_double_ok_for_letter_p (rtx value, int c)
+{
+ switch (c)
+ {
+ case 'G':
+ /* The floating point zero constant. */
+ return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
+ && value == CONST0_RTX (GET_MODE (value)));
+
+ case 'H':
+ /* A valid operand of a ZAP insn. */
+ return (GET_MODE (value) == VOIDmode
+ && zap_mask (CONST_DOUBLE_LOW (value))
+ && zap_mask (CONST_DOUBLE_HIGH (value)));
+
+ default:
+ return false;
+ }
+}
+
+/* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
+ matches for C. */
+
+bool
+alpha_extra_constraint (rtx value, int c)
+{
+ switch (c)
+ {
+ case 'Q':
+ return normal_memory_operand (value, VOIDmode);
+ case 'R':
+ return direct_call_operand (value, Pmode);
+ case 'S':
+ return (GET_CODE (value) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
+ case 'T':
+ return GET_CODE (value) == HIGH;
+ case 'U':
+ return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
+ case 'W':
+ return (GET_CODE (value) == CONST_VECTOR
+ && value == CONST0_RTX (GET_MODE (value)));
+ default:
+ return false;
+ }
+}
+
+/* The scalar modes supported differs from the default check-what-c-supports
+ version in that sometimes TFmode is available even when long double
+ indicates only DFmode. On unicosmk, we have the situation that HImode
+ doesn't map to any C type, but of course we still support that. */
+
+static bool
+alpha_scalar_mode_supported_p (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case QImode:
+ case HImode:
+ case SImode:
+ case DImode:
+ case TImode: /* via optabs.c */
+ return true;
+
+ case SFmode:
+ case DFmode:
+ return true;
+
+ case TFmode:
+ return TARGET_HAS_XFLOATING_LIBS;
+
+ default:
+ return false;
+ }
+}
+
+/* Alpha implements a couple of integer vector mode operations when
+ TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
+ which allows the vectorizer to operate on e.g. move instructions,
+ or when expand_vector_operations can do something useful. */
+
+static bool
+alpha_vector_mode_supported_p (enum machine_mode mode)
+{
+ return mode == V8QImode || mode == V4HImode || mode == V2SImode;
+}
+
+/* Return 1 if this function can directly return via $26. */
+
+int
+direct_return (void)
+{
+ return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
+ && reload_completed
+ && alpha_sa_size () == 0
+ && get_frame_size () == 0
+ && current_function_outgoing_args_size == 0
+ && current_function_pretend_args_size == 0);
+}
+
+/* Return the ADDR_VEC associated with a tablejump insn. */
+
+rtx
+alpha_tablejump_addr_vec (rtx insn)
+{
+ rtx tmp;
+
+ tmp = JUMP_LABEL (insn);
+ if (!tmp)
+ return NULL_RTX;
+ tmp = NEXT_INSN (tmp);
+ if (!tmp)
+ return NULL_RTX;
+ if (GET_CODE (tmp) == JUMP_INSN
+ && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
+ return PATTERN (tmp);
+ return NULL_RTX;
+}
+
+/* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
+
+rtx
+alpha_tablejump_best_label (rtx insn)
+{
+ rtx jump_table = alpha_tablejump_addr_vec (insn);
+ rtx best_label = NULL_RTX;
+
+ /* ??? Once the CFG doesn't keep getting completely rebuilt, look
+ there for edge frequency counts from profile data. */
+
+ if (jump_table)
+ {
+ int n_labels = XVECLEN (jump_table, 1);
+ int best_count = -1;
+ int i, j;
+
+ for (i = 0; i < n_labels; i++)
+ {
+ int count = 1;
+
+ for (j = i + 1; j < n_labels; j++)
+ if (XEXP (XVECEXP (jump_table, 1, i), 0)
+ == XEXP (XVECEXP (jump_table, 1, j), 0))
+ count++;
+
+ if (count > best_count)
+ best_count = count, best_label = XVECEXP (jump_table, 1, i);
+ }
+ }
+
+ return best_label ? best_label : const0_rtx;
+}
+
+/* Return the TLS model to use for SYMBOL. */
+
+static enum tls_model
+tls_symbolic_operand_type (rtx symbol)
+{
+ enum tls_model model;
+
+ if (GET_CODE (symbol) != SYMBOL_REF)
+ return 0;
+ model = SYMBOL_REF_TLS_MODEL (symbol);
+
+ /* Local-exec with a 64-bit size is the same code as initial-exec. */
+ if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
+ model = TLS_MODEL_INITIAL_EXEC;
+
+ return model;
+}
+
+/* Return true if the function DECL will share the same GP as any
+ function in the current unit of translation. */
+
+static bool
+decl_has_samegp (tree decl)
+{
+ /* Functions that are not local can be overridden, and thus may
+ not share the same gp. */
+ if (!(*targetm.binds_local_p) (decl))
+ return false;
+
+ /* If -msmall-data is in effect, assume that there is only one GP
+ for the module, and so any local symbol has this property. We
+ need explicit relocations to be able to enforce this for symbols
+ not defined in this unit of translation, however. */
+ if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
+ return true;
+
+ /* Functions that are not external are defined in this UoT. */
+ /* ??? Irritatingly, static functions not yet emitted are still
+ marked "external". Apply this to non-static functions only. */
+ return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
+}
+
+/* Return true if EXP should be placed in the small data section. */
+
+static bool
+alpha_in_small_data_p (tree exp)
+{
+ /* We want to merge strings, so we never consider them small data. */
+ if (TREE_CODE (exp) == STRING_CST)
+ return false;
+
+ /* Functions are never in the small data area. Duh. */
+ if (TREE_CODE (exp) == FUNCTION_DECL)
+ return false;
+
+ if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
+ {
+ const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
+ if (strcmp (section, ".sdata") == 0
+ || strcmp (section, ".sbss") == 0)
+ return true;
+ }
+ else
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
+
+ /* If this is an incomplete type with size 0, then we can't put it
+ in sdata because it might be too big when completed. */
+ if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
+ return true;
+ }
+
+ return false;
+}
+
+#if TARGET_ABI_OPEN_VMS
+static bool
+alpha_linkage_symbol_p (const char *symname)
+{
+ int symlen = strlen (symname);
+
+ if (symlen > 4)
+ return strcmp (&symname [symlen - 4], "..lk") == 0;
+
+ return false;
+}
+
+#define LINKAGE_SYMBOL_REF_P(X) \
+ ((GET_CODE (X) == SYMBOL_REF \
+ && alpha_linkage_symbol_p (XSTR (X, 0))) \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
+ && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
+#endif
+
+/* legitimate_address_p recognizes an RTL expression that is a valid
+ memory address for an instruction. The MODE argument is the
+ machine mode for the MEM expression that wants to use this address.
+
+ For Alpha, we have either a constant address or the sum of a
+ register and a constant address, or just a register. For DImode,
+ any of those forms can be surrounded with an AND that clear the
+ low-order three bits; this is an "unaligned" access. */
+
+bool
+alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
+{
+ /* If this is an ldq_u type address, discard the outer AND. */
+ if (mode == DImode
+ && GET_CODE (x) == AND
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == -8)
+ x = XEXP (x, 0);
+
+ /* Discard non-paradoxical subregs. */
+ if (GET_CODE (x) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (x))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
+ x = SUBREG_REG (x);
+
+ /* Unadorned general registers are valid. */
+ if (REG_P (x)
+ && (strict
+ ? STRICT_REG_OK_FOR_BASE_P (x)
+ : NONSTRICT_REG_OK_FOR_BASE_P (x)))
+ return true;
+
+ /* Constant addresses (i.e. +/- 32k) are valid. */
+ if (CONSTANT_ADDRESS_P (x))
+ return true;
+
+#if TARGET_ABI_OPEN_VMS
+ if (LINKAGE_SYMBOL_REF_P (x))
+ return true;
+#endif
+
+ /* Register plus a small constant offset is valid. */
+ if (GET_CODE (x) == PLUS)
+ {
+ rtx ofs = XEXP (x, 1);
+ x = XEXP (x, 0);
+
+ /* Discard non-paradoxical subregs. */
+ if (GET_CODE (x) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (x))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
+ x = SUBREG_REG (x);
+
+ if (REG_P (x))
+ {
+ if (! strict
+ && NONSTRICT_REG_OK_FP_BASE_P (x)
+ && GET_CODE (ofs) == CONST_INT)
+ return true;
+ if ((strict
+ ? STRICT_REG_OK_FOR_BASE_P (x)
+ : NONSTRICT_REG_OK_FOR_BASE_P (x))
+ && CONSTANT_ADDRESS_P (ofs))
+ return true;
+ }
+ }
+
+ /* If we're managing explicit relocations, LO_SUM is valid, as
+ are small data symbols. */
+ else if (TARGET_EXPLICIT_RELOCS)
+ {
+ if (small_symbolic_operand (x, Pmode))
+ return true;
+
+ if (GET_CODE (x) == LO_SUM)
+ {
+ rtx ofs = XEXP (x, 1);
+ x = XEXP (x, 0);
+
+ /* Discard non-paradoxical subregs. */
+ if (GET_CODE (x) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (x))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
+ x = SUBREG_REG (x);
+
+ /* Must have a valid base register. */
+ if (! (REG_P (x)
+ && (strict
+ ? STRICT_REG_OK_FOR_BASE_P (x)
+ : NONSTRICT_REG_OK_FOR_BASE_P (x))))
+ return false;
+
+ /* The symbol must be local. */
+ if (local_symbolic_operand (ofs, Pmode)
+ || dtp32_symbolic_operand (ofs, Pmode)
+ || tp32_symbolic_operand (ofs, Pmode))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Build the SYMBOL_REF for __tls_get_addr. */
+
+static GTY(()) rtx tls_get_addr_libfunc;
+
+static rtx
+get_tls_get_addr (void)
+{
+ if (!tls_get_addr_libfunc)
+ tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
+ return tls_get_addr_libfunc;
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address. */
+
+rtx
+alpha_legitimize_address (rtx x, rtx scratch,
+ enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT addend;
+
+ /* If the address is (plus reg const_int) and the CONST_INT is not a
+ valid offset, compute the high part of the constant and add it to
+ the register. Then our address is (plus temp low-part-const). */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
+ {
+ addend = INTVAL (XEXP (x, 1));
+ x = XEXP (x, 0);
+ goto split_addend;
+ }
+
+ /* If the address is (const (plus FOO const_int)), find the low-order
+ part of the CONST_INT. Then load FOO plus any high-order part of the
+ CONST_INT into a register. Our address is (plus reg low-part-const).
+ This is done to reduce the number of GOT entries. */
+ if (!no_new_pseudos
+ && GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ {
+ addend = INTVAL (XEXP (XEXP (x, 0), 1));
+ x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
+ goto split_addend;
+ }
+
+ /* If we have a (plus reg const), emit the load as in (2), then add
+ the two registers, and finally generate (plus reg low-part-const) as
+ our address. */
+ if (!no_new_pseudos
+ && GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) == CONST
+ && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
+ {
+ addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
+ x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
+ XEXP (XEXP (XEXP (x, 1), 0), 0),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ goto split_addend;
+ }
+
+ /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
+ if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
+ {
+ rtx r0, r16, eqv, tga, tp, insn, dest, seq;
+
+ switch (tls_symbolic_operand_type (x))
+ {
+ case TLS_MODEL_NONE:
+ break;
+
+ case TLS_MODEL_GLOBAL_DYNAMIC:
+ start_sequence ();
+
+ r0 = gen_rtx_REG (Pmode, 0);
+ r16 = gen_rtx_REG (Pmode, 16);
+ tga = get_tls_get_addr ();
+ dest = gen_reg_rtx (Pmode);
+ seq = GEN_INT (alpha_next_sequence_number++);
+
+ emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
+ insn = gen_call_value_osf_tlsgd (r0, tga, seq);
+ insn = emit_call_insn (insn);
+ CONST_OR_PURE_CALL_P (insn) = 1;
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
+
+ insn = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (insn, dest, r0, x);
+ return dest;
+
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ start_sequence ();
+
+ r0 = gen_rtx_REG (Pmode, 0);
+ r16 = gen_rtx_REG (Pmode, 16);
+ tga = get_tls_get_addr ();
+ scratch = gen_reg_rtx (Pmode);
+ seq = GEN_INT (alpha_next_sequence_number++);
+
+ emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
+ insn = gen_call_value_osf_tlsldm (r0, tga, seq);
+ insn = emit_call_insn (insn);
+ CONST_OR_PURE_CALL_P (insn) = 1;
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
+
+ insn = get_insns ();
+ end_sequence ();
+
+ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
+ UNSPEC_TLSLDM_CALL);
+ emit_libcall_block (insn, scratch, r0, eqv);
+
+ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
+ eqv = gen_rtx_CONST (Pmode, eqv);
+
+ if (alpha_tls_size == 64)
+ {
+ dest = gen_reg_rtx (Pmode);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
+ emit_insn (gen_adddi3 (dest, dest, scratch));
+ return dest;
+ }
+ if (alpha_tls_size == 32)
+ {
+ insn = gen_rtx_HIGH (Pmode, eqv);
+ insn = gen_rtx_PLUS (Pmode, scratch, insn);
+ scratch = gen_reg_rtx (Pmode);
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
+ }
+ return gen_rtx_LO_SUM (Pmode, scratch, eqv);
+
+ case TLS_MODEL_INITIAL_EXEC:
+ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
+ eqv = gen_rtx_CONST (Pmode, eqv);
+ tp = gen_reg_rtx (Pmode);
+ scratch = gen_reg_rtx (Pmode);
+ dest = gen_reg_rtx (Pmode);
+
+ emit_insn (gen_load_tp (tp));
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
+ emit_insn (gen_adddi3 (dest, tp, scratch));
+ return dest;
+
+ case TLS_MODEL_LOCAL_EXEC:
+ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
+ eqv = gen_rtx_CONST (Pmode, eqv);
+ tp = gen_reg_rtx (Pmode);
+
+ emit_insn (gen_load_tp (tp));
+ if (alpha_tls_size == 32)
+ {
+ insn = gen_rtx_HIGH (Pmode, eqv);
+ insn = gen_rtx_PLUS (Pmode, tp, insn);
+ tp = gen_reg_rtx (Pmode);
+ emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
+ }
+ return gen_rtx_LO_SUM (Pmode, tp, eqv);
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (local_symbolic_operand (x, Pmode))
+ {
+ if (small_symbolic_operand (x, Pmode))
+ return x;
+ else
+ {
+ if (!no_new_pseudos)
+ scratch = gen_reg_rtx (Pmode);
+ emit_insn (gen_rtx_SET (VOIDmode, scratch,
+ gen_rtx_HIGH (Pmode, x)));
+ return gen_rtx_LO_SUM (Pmode, scratch, x);
+ }
+ }
+ }
+
+ return NULL;
+
+ split_addend:
+ {
+ HOST_WIDE_INT low, high;
+
+ low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
+ addend -= low;
+ high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ addend -= high;
+
+ if (addend)
+ x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
+ (no_new_pseudos ? scratch : NULL_RTX),
+ 1, OPTAB_LIB_WIDEN);
+ if (high)
+ x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
+ (no_new_pseudos ? scratch : NULL_RTX),
+ 1, OPTAB_LIB_WIDEN);
+
+ return plus_constant (x, low);
+ }
+}
+
+/* Primarily this is required for TLS symbols, but given that our move
+ patterns *ought* to be able to handle any symbol at any time, we
+ should never be spilling symbolic operands to the constant pool, ever. */
+
+static bool
+alpha_cannot_force_const_mem (rtx x)
+{
+ enum rtx_code code = GET_CODE (x);
+ return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
+}
+
+/* We do not allow indirect calls to be optimized into sibling calls, nor
+ can we allow a call to a function with a different GP to be optimized
+ into a sibcall. */
+
+static bool
+alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+{
+ /* Can't do indirect tail calls, since we don't know if the target
+ uses the same GP. */
+ if (!decl)
+ return false;
+
+ /* Otherwise, we can make a tail call if the target function shares
+ the same GP. */
+ return decl_has_samegp (decl);
+}
+
+int
+some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
+{
+ rtx x = *px;
+
+ /* Don't re-split. */
+ if (GET_CODE (x) == LO_SUM)
+ return -1;
+
+ return small_symbolic_operand (x, Pmode) != 0;
+}
+
+static int
+split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
+{
+ rtx x = *px;
+
+ /* Don't re-split. */
+ if (GET_CODE (x) == LO_SUM)
+ return -1;
+
+ if (small_symbolic_operand (x, Pmode))
+ {
+ x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
+ *px = x;
+ return -1;
+ }
+
+ return 0;
+}
+
+rtx
+split_small_symbolic_operand (rtx x)
+{
+ x = copy_insn (x);
+ for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
+ return x;
+}
+
+/* Indicate that INSN cannot be duplicated. This is true for any insn
+ that we've marked with gpdisp relocs, since those have to stay in
+ 1-1 correspondence with one another.
+
+ Technically we could copy them if we could set up a mapping from one
+ sequence number to another, across the set of insns to be duplicated.
+ This seems overly complicated and error-prone since interblock motion
+ from sched-ebb could move one of the pair of insns to a different block.
+
+ Also cannot allow jsr insns to be duplicated. If they throw exceptions,
+ then they'll be in a different block from their ldgp. Which could lead
+ the bb reorder code to think that it would be ok to copy just the block
+ containing the call and branch to the block containing the ldgp. */
+
+static bool
+alpha_cannot_copy_insn_p (rtx insn)
+{
+ if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
+ return false;
+ if (recog_memoized (insn) >= 0)
+ return get_attr_cannot_copy (insn);
+ else
+ return false;
+}
+
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and return the new rtx. */
+
+rtx
+alpha_legitimize_reload_address (rtx x,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int opnum, int type,
+ int ind_levels ATTRIBUTE_UNUSED)
+{
+ /* We must recognize output that we have already generated ourselves. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, type);
+ return x;
+ }
+
+ /* We wish to handle large displacements off a base register by
+ splitting the addend across an ldah and the mem insn. This
+ cuts number of extra insns needed from 3 to 1. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+ && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
+ HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT high
+ = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
+
+ /* Check for 32-bit overflow. */
+ if (high + low != val)
+ return NULL_RTX;
+
+ /* Reload the high part into a base reg; leave the low part
+ in the mem directly. */
+ x = gen_rtx_PLUS (GET_MODE (x),
+ gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
+ GEN_INT (high)),
+ GEN_INT (low));
+
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, type);
+ return x;
+ }
+
+ return NULL_RTX;
+}
+
+/* Compute a (partial) cost for rtx X. Return true if the complete
+ cost has been computed, and false if subexpressions should be
+ scanned. In either case, *TOTAL contains the cost result. */
+
+static bool
+alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
+{
+ enum machine_mode mode = GET_MODE (x);
+ bool float_mode_p = FLOAT_MODE_P (mode);
+ const struct alpha_rtx_cost_data *cost_data;
+
+ if (optimize_size)
+ cost_data = &alpha_rtx_cost_size;
+ else
+ cost_data = &alpha_rtx_cost_data[alpha_tune];
+
+ switch (code)
+ {
+ case CONST_INT:
+ /* If this is an 8-bit constant, return zero since it can be used
+ nearly anywhere with no cost. If it is a valid operand for an
+ ADD or AND, likewise return 0 if we know it will be used in that
+ context. Otherwise, return 2 since it might be used there later.
+ All other constants take at least two insns. */
+ if (INTVAL (x) >= 0 && INTVAL (x) < 256)
+ {
+ *total = 0;
+ return true;
+ }
+ /* FALLTHRU */
+
+ case CONST_DOUBLE:
+ if (x == CONST0_RTX (mode))
+ *total = 0;
+ else if ((outer_code == PLUS && add_operand (x, VOIDmode))
+ || (outer_code == AND && and_operand (x, VOIDmode)))
+ *total = 0;
+ else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
+ *total = 2;
+ else
+ *total = COSTS_N_INSNS (2);
+ return true;
+
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
+ *total = COSTS_N_INSNS (outer_code != MEM);
+ else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
+ *total = COSTS_N_INSNS (1 + (outer_code != MEM));
+ else if (tls_symbolic_operand_type (x))
+ /* Estimate of cost for call_pal rduniq. */
+ /* ??? How many insns do we emit here? More than one... */
+ *total = COSTS_N_INSNS (15);
+ else
+ /* Otherwise we do a load from the GOT. */
+ *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
+ return true;
+
+ case HIGH:
+ /* This is effectively an add_operand. */
+ *total = 2;
+ return true;
+
+ case PLUS:
+ case MINUS:
+ if (float_mode_p)
+ *total = cost_data->fp_add;
+ else if (GET_CODE (XEXP (x, 0)) == MULT
+ && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
+ {
+ *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
+ + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
+ return true;
+ }
+ return false;
+
+ case MULT:
+ if (float_mode_p)
+ *total = cost_data->fp_mult;
+ else if (mode == DImode)
+ *total = cost_data->int_mult_di;
+ else
+ *total = cost_data->int_mult_si;
+ return false;
+
+ case ASHIFT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) <= 3)
+ {
+ *total = COSTS_N_INSNS (1);
+ return false;
+ }
+ /* FALLTHRU */
+
+ case ASHIFTRT:
+ case LSHIFTRT:
+ *total = cost_data->int_shift;
+ return false;
+
+ case IF_THEN_ELSE:
+ if (float_mode_p)
+ *total = cost_data->fp_add;
+ else
+ *total = cost_data->int_cmov;
+ return false;
+
+ case DIV:
+ case UDIV:
+ case MOD:
+ case UMOD:
+ if (!float_mode_p)
+ *total = cost_data->int_div;
+ else if (mode == SFmode)
+ *total = cost_data->fp_div_sf;
+ else
+ *total = cost_data->fp_div_df;
+ return false;
+
+ case MEM:
+ *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
+ return true;
+
+ case NEG:
+ if (! float_mode_p)
+ {
+ *total = COSTS_N_INSNS (1);
+ return false;
+ }
+ /* FALLTHRU */
+
+ case ABS:
+ if (! float_mode_p)
+ {
+ *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
+ return false;
+ }
+ /* FALLTHRU */
+
+ case FLOAT:
+ case UNSIGNED_FLOAT:
+ case FIX:
+ case UNSIGNED_FIX:
+ case FLOAT_TRUNCATE:
+ *total = cost_data->fp_add;
+ return false;
+
+ case FLOAT_EXTEND:
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ *total = 0;
+ else
+ *total = cost_data->fp_add;
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+/* REF is an alignable memory location. Place an aligned SImode
+ reference into *PALIGNED_MEM and the number of bits to shift into
+ *PBITNUM. SCRATCH is a free register for use in reloading out
+ of range stack slots. */
+
+void
+get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
+{
+ rtx base;
+ HOST_WIDE_INT disp, offset;
+
+ gcc_assert (GET_CODE (ref) == MEM);
+
+ if (reload_in_progress
+ && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
+ {
+ base = find_replacement (&XEXP (ref, 0));
+ gcc_assert (memory_address_p (GET_MODE (ref), base));
+ }
+ else
+ base = XEXP (ref, 0);
+
+ if (GET_CODE (base) == PLUS)
+ disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
+ else
+ disp = 0;
+
+ /* Find the byte offset within an aligned word. If the memory itself is
+ claimed to be aligned, believe it. Otherwise, aligned_memory_operand
+ will have examined the base register and determined it is aligned, and
+ thus displacements from it are naturally alignable. */
+ if (MEM_ALIGN (ref) >= 32)
+ offset = 0;
+ else
+ offset = disp & 3;
+
+ /* Access the entire aligned word. */
+ *paligned_mem = widen_memory_access (ref, SImode, -offset);
+
+ /* Convert the byte offset within the word to a bit offset. */
+ if (WORDS_BIG_ENDIAN)
+ offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
+ else
+ offset *= 8;
+ *pbitnum = GEN_INT (offset);
+}
+
+/* Similar, but just get the address. Handle the two reload cases.
+ Add EXTRA_OFFSET to the address we return. */
+
+rtx
+get_unaligned_address (rtx ref)
+{
+ rtx base;
+ HOST_WIDE_INT offset = 0;
+
+ gcc_assert (GET_CODE (ref) == MEM);
+
+ if (reload_in_progress
+ && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
+ {
+ base = find_replacement (&XEXP (ref, 0));
+
+ gcc_assert (memory_address_p (GET_MODE (ref), base));
+ }
+ else
+ base = XEXP (ref, 0);
+
+ if (GET_CODE (base) == PLUS)
+ offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
+
+ return plus_constant (base, offset);
+}
+
+/* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
+ X is always returned in a register. */
+
+rtx
+get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
+{
+ if (GET_CODE (addr) == PLUS)
+ {
+ ofs += INTVAL (XEXP (addr, 1));
+ addr = XEXP (addr, 0);
+ }
+
+ return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+}
+
+/* On the Alpha, all (non-symbolic) constants except zero go into
+ a floating-point register via memory. Note that we cannot
+ return anything that is not a subset of CLASS, and that some
+ symbolic constants cannot be dropped to memory. */
+
+enum reg_class
+alpha_preferred_reload_class(rtx x, enum reg_class class)
+{
+ /* Zero is present in any register class. */
+ if (x == CONST0_RTX (GET_MODE (x)))
+ return class;
+
+ /* These sorts of constants we can easily drop to memory. */
+ if (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_DOUBLE
+ || GET_CODE (x) == CONST_VECTOR)
+ {
+ if (class == FLOAT_REGS)
+ return NO_REGS;
+ if (class == ALL_REGS)
+ return GENERAL_REGS;
+ return class;
+ }
+
+ /* All other kinds of constants should not (and in the case of HIGH
+ cannot) be dropped to memory -- instead we use a GENERAL_REGS
+ secondary reload. */
+ if (CONSTANT_P (x))
+ return (class == ALL_REGS ? GENERAL_REGS : class);
+
+ return class;
+}
+
+/* Loading and storing HImode or QImode values to and from memory
+ usually requires a scratch register. The exceptions are loading
+ QImode and HImode from an aligned address to a general register
+ unless byte instructions are permitted.
+
+ We also cannot load an unaligned address or a paradoxical SUBREG
+ into an FP register.
+
+ We also cannot do integral arithmetic into FP regs, as might result
+ from register elimination into a DImode fp register. */
+
+enum reg_class
+alpha_secondary_reload_class (enum reg_class class, enum machine_mode mode,
+ rtx x, int in)
+{
+ if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
+ {
+ if (GET_CODE (x) == MEM
+ || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
+ || (GET_CODE (x) == SUBREG
+ && (GET_CODE (SUBREG_REG (x)) == MEM
+ || (GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
+ {
+ if (!in || !aligned_memory_operand(x, mode))
+ return GENERAL_REGS;
+ }
+ }
+
+ if (class == FLOAT_REGS)
+ {
+ if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
+ return GENERAL_REGS;
+
+ if (GET_CODE (x) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
+ return GENERAL_REGS;
+
+ if (in && INTEGRAL_MODE_P (mode)
+ && ! (memory_operand (x, mode) || x == const0_rtx))
+ return GENERAL_REGS;
+ }
+
+ return NO_REGS;
+}
+
+/* Subfunction of the following function. Update the flags of any MEM
+ found in part of X. */
+
+static int
+alpha_set_memflags_1 (rtx *xp, void *data)
+{
+ rtx x = *xp, orig = (rtx) data;
+
+ if (GET_CODE (x) != MEM)
+ return 0;
+
+ MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
+ MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
+ MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
+ MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
+ MEM_READONLY_P (x) = MEM_READONLY_P (orig);
+
+ /* Sadly, we cannot use alias sets because the extra aliasing
+ produced by the AND interferes. Given that two-byte quantities
+ are the only thing we would be able to differentiate anyway,
+ there does not seem to be any point in convoluting the early
+ out of the alias check. */
+
+ return -1;
+}
+
+/* Given INSN, which is an INSN list or the PATTERN of a single insn
+ generated to perform a memory operation, look for any MEMs in either
+ a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
+ volatile flags from REF into each of the MEMs found. If REF is not
+ a MEM, don't do anything. */
+
+void
+alpha_set_memflags (rtx insn, rtx ref)
+{
+ rtx *base_ptr;
+
+ if (GET_CODE (ref) != MEM)
+ return;
+
+ /* This is only called from alpha.md, after having had something
+ generated from one of the insn patterns. So if everything is
+ zero, the pattern is already up-to-date. */
+ if (!MEM_VOLATILE_P (ref)
+ && !MEM_IN_STRUCT_P (ref)
+ && !MEM_SCALAR_P (ref)
+ && !MEM_NOTRAP_P (ref)
+ && !MEM_READONLY_P (ref))
+ return;
+
+ if (INSN_P (insn))
+ base_ptr = &PATTERN (insn);
+ else
+ base_ptr = &insn;
+ for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
+}
+
+static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
+ int, bool);
+
+/* Internal routine for alpha_emit_set_const to check for N or below insns.
+ If NO_OUTPUT is true, then we only check to see if N insns are possible,
+ and return pc_rtx if successful. */
+
+static rtx
+alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
+ HOST_WIDE_INT c, int n, bool no_output)
+{
+ HOST_WIDE_INT new;
+ int i, bits;
+ /* Use a pseudo if highly optimizing and still generating RTL. */
+ rtx subtarget
+ = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
+ rtx temp, insn;
+
+ /* If this is a sign-extended 32-bit constant, we can do this in at most
+ three insns, so do it if we have enough insns left. We always have
+ a sign-extended 32-bit constant when compiling on a narrow machine. */
+
+ if (HOST_BITS_PER_WIDE_INT != 64
+ || c >> 31 == -1 || c >> 31 == 0)
+ {
+ HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT tmp1 = c - low;
+ HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT extra = 0;
+
+ /* If HIGH will be interpreted as negative but the constant is
+ positive, we must adjust it to do two ldha insns. */
+
+ if ((high & 0x8000) != 0 && c >= 0)
+ {
+ extra = 0x4000;
+ tmp1 -= 0x40000000;
+ high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
+ }
+
+ if (c == low || (low == 0 && extra == 0))
+ {
+ /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
+ but that meant that we can't handle INT_MIN on 32-bit machines
+ (like NT/Alpha), because we recurse indefinitely through
+ emit_move_insn to gen_movdi. So instead, since we know exactly
+ what we want, create it explicitly. */
+
+ if (no_output)
+ return pc_rtx;
+ if (target == NULL)
+ target = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
+ return target;
+ }
+ else if (n >= 2 + (extra != 0))
+ {
+ if (no_output)
+ return pc_rtx;
+ if (no_new_pseudos)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
+ temp = target;
+ }
+ else
+ temp = copy_to_suggested_reg (GEN_INT (high << 16),
+ subtarget, mode);
+
+ /* As of 2002-02-23, addsi3 is only available when not optimizing.
+ This means that if we go through expand_binop, we'll try to
+ generate extensions, etc, which will require new pseudos, which
+ will fail during some split phases. The SImode add patterns
+ still exist, but are not named. So build the insns by hand. */
+
+ if (extra != 0)
+ {
+ if (! subtarget)
+ subtarget = gen_reg_rtx (mode);
+ insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
+ insn = gen_rtx_SET (VOIDmode, subtarget, insn);
+ emit_insn (insn);
+ temp = subtarget;
+ }
+
+ if (target == NULL)
+ target = gen_reg_rtx (mode);
+ insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
+ insn = gen_rtx_SET (VOIDmode, target, insn);
+ emit_insn (insn);
+ return target;
+ }
+ }
+
+ /* If we couldn't do it that way, try some other methods. But if we have
+ no instructions left, don't bother. Likewise, if this is SImode and
+ we can't make pseudos, we can't do anything since the expand_binop
+ and expand_unop calls will widen and try to make pseudos. */
+
+ if (n == 1 || (mode == SImode && no_new_pseudos))
+ return 0;
+
+ /* Next, see if we can load a related constant and then shift and possibly
+ negate it to get the constant we want. Try this once each increasing
+ numbers of insns. */
+
+ for (i = 1; i < n; i++)
+ {
+ /* First, see if minus some low bits, we've an easy load of
+ high bits. */
+
+ new = ((c & 0xffff) ^ 0x8000) - 0x8000;
+ if (new != 0)
+ {
+ temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
+ if (temp)
+ {
+ if (no_output)
+ return temp;
+ return expand_binop (mode, add_optab, temp, GEN_INT (new),
+ target, 0, OPTAB_WIDEN);
+ }
+ }
+
+ /* Next try complementing. */
+ temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
+ if (temp)
+ {
+ if (no_output)
+ return temp;
+ return expand_unop (mode, one_cmpl_optab, temp, target, 0);
+ }
+
+ /* Next try to form a constant and do a left shift. We can do this
+ if some low-order bits are zero; the exact_log2 call below tells
+ us that information. The bits we are shifting out could be any
+ value, but here we'll just try the 0- and sign-extended forms of
+ the constant. To try to increase the chance of having the same
+ constant in more than one insn, start at the highest number of
+ bits to shift, but try all possibilities in case a ZAPNOT will
+ be useful. */
+
+ bits = exact_log2 (c & -c);
+ if (bits > 0)
+ for (; bits > 0; bits--)
+ {
+ new = c >> bits;
+ temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
+ if (!temp && c < 0)
+ {
+ new = (unsigned HOST_WIDE_INT)c >> bits;
+ temp = alpha_emit_set_const (subtarget, mode, new,
+ i, no_output);
+ }
+ if (temp)
+ {
+ if (no_output)
+ return temp;
+ return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
+ target, 0, OPTAB_WIDEN);
+ }
+ }
+
+ /* Now try high-order zero bits. Here we try the shifted-in bits as
+ all zero and all ones. Be careful to avoid shifting outside the
+ mode and to avoid shifting outside the host wide int size. */
+ /* On narrow hosts, don't shift a 1 into the high bit, since we'll
+ confuse the recursive call and set all of the high 32 bits. */
+
+ bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
+ - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
+ if (bits > 0)
+ for (; bits > 0; bits--)
+ {
+ new = c << bits;
+ temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
+ if (!temp)
+ {
+ new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
+ temp = alpha_emit_set_const (subtarget, mode, new,
+ i, no_output);
+ }
+ if (temp)
+ {
+ if (no_output)
+ return temp;
+ return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
+ target, 1, OPTAB_WIDEN);
+ }
+ }
+
+ /* Now try high-order 1 bits. We get that with a sign-extension.
+ But one bit isn't enough here. Be careful to avoid shifting outside
+ the mode and to avoid shifting outside the host wide int size. */
+
+ bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
+ - floor_log2 (~ c) - 2);
+ if (bits > 0)
+ for (; bits > 0; bits--)
+ {
+ new = c << bits;
+ temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
+ if (!temp)
+ {
+ new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
+ temp = alpha_emit_set_const (subtarget, mode, new,
+ i, no_output);
+ }
+ if (temp)
+ {
+ if (no_output)
+ return temp;
+ return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
+ target, 0, OPTAB_WIDEN);
+ }
+ }
+ }
+
+#if HOST_BITS_PER_WIDE_INT == 64
+ /* Finally, see if can load a value into the target that is the same as the
+ constant except that all bytes that are 0 are changed to be 0xff. If we
+ can, then we can do a ZAPNOT to obtain the desired constant. */
+
+ new = c;
+ for (i = 0; i < 64; i += 8)
+ if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
+ new |= (HOST_WIDE_INT) 0xff << i;
+
+ /* We are only called for SImode and DImode. If this is SImode, ensure that
+ we are sign extended to a full word. */
+
+ if (mode == SImode)
+ new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
+
+ if (new != c)
+ {
+ temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
+ if (temp)
+ {
+ if (no_output)
+ return temp;
+ return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
+ target, 0, OPTAB_WIDEN);
+ }
+ }
+#endif
+
+ return 0;
+}
+
+/* Try to output insns to set TARGET equal to the constant C if it can be
+ done in less than N insns. Do all computations in MODE. Returns the place
+ where the output has been placed if it can be done and the insns have been
+ emitted. If it would take more than N insns, zero is returned and no
+ insns and emitted. */
+
+static rtx
+alpha_emit_set_const (rtx target, enum machine_mode mode,
+ HOST_WIDE_INT c, int n, bool no_output)
+{
+ enum machine_mode orig_mode = mode;
+ rtx orig_target = target;
+ rtx result = 0;
+ int i;
+
+ /* If we can't make any pseudos, TARGET is an SImode hard register, we
+ can't load this constant in one insn, do this in DImode. */
+ if (no_new_pseudos && mode == SImode
+ && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
+ {
+ result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
+ if (result)
+ return result;
+
+ target = no_output ? NULL : gen_lowpart (DImode, target);
+ mode = DImode;
+ }
+ else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
+ {
+ target = no_output ? NULL : gen_lowpart (DImode, target);
+ mode = DImode;
+ }
+
+ /* Try 1 insn, then 2, then up to N. */
+ for (i = 1; i <= n; i++)
+ {
+ result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
+ if (result)
+ {
+ rtx insn, set;
+
+ if (no_output)
+ return result;
+
+ insn = get_last_insn ();
+ set = single_set (insn);
+ if (! CONSTANT_P (SET_SRC (set)))
+ set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
+ break;
+ }
+ }
+
+ /* Allow for the case where we changed the mode of TARGET. */
+ if (result)
+ {
+ if (result == target)
+ result = orig_target;
+ else if (mode != orig_mode)
+ result = gen_lowpart (orig_mode, result);
+ }
+
+ return result;
+}
+
+/* Having failed to find a 3 insn sequence in alpha_emit_set_const,
+ fall back to a straight forward decomposition. We do this to avoid
+ exponential run times encountered when looking for longer sequences
+ with alpha_emit_set_const. */
+
+static rtx
+alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
+{
+ HOST_WIDE_INT d1, d2, d3, d4;
+
+ /* Decompose the entire word */
+#if HOST_BITS_PER_WIDE_INT >= 64
+ gcc_assert (c2 == -(c1 < 0));
+ d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
+ c1 -= d1;
+ d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ c1 = (c1 - d2) >> 32;
+ d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
+ c1 -= d3;
+ d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ gcc_assert (c1 == d4);
+#else
+ d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
+ c1 -= d1;
+ d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ gcc_assert (c1 == d2);
+ c2 += (d2 < 0);
+ d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
+ c2 -= d3;
+ d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ gcc_assert (c2 == d4);
+#endif
+
+ /* Construct the high word */
+ if (d4)
+ {
+ emit_move_insn (target, GEN_INT (d4));
+ if (d3)
+ emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
+ }
+ else
+ emit_move_insn (target, GEN_INT (d3));
+
+ /* Shift it into place */
+ emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
+
+ /* Add in the low bits. */
+ if (d2)
+ emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
+ if (d1)
+ emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
+
+ return target;
+}
+
+/* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
+ the low 64 bits. */
+
+static void
+alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
+{
+ HOST_WIDE_INT i0, i1;
+
+ if (GET_CODE (x) == CONST_VECTOR)
+ x = simplify_subreg (DImode, x, GET_MODE (x), 0);
+
+
+ if (GET_CODE (x) == CONST_INT)
+ {
+ i0 = INTVAL (x);
+ i1 = -(i0 < 0);
+ }
+ else if (HOST_BITS_PER_WIDE_INT >= 64)
+ {
+ i0 = CONST_DOUBLE_LOW (x);
+ i1 = -(i0 < 0);
+ }
+ else
+ {
+ i0 = CONST_DOUBLE_LOW (x);
+ i1 = CONST_DOUBLE_HIGH (x);
+ }
+
+ *p0 = i0;
+ *p1 = i1;
+}
+
+/* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
+ are willing to load the value into a register via a move pattern.
+ Normally this is all symbolic constants, integral constants that
+ take three or fewer instructions, and floating-point zero. */
+
+bool
+alpha_legitimate_constant_p (rtx x)
+{
+ enum machine_mode mode = GET_MODE (x);
+ HOST_WIDE_INT i0, i1;
+
+ switch (GET_CODE (x))
+ {
+ case CONST:
+ case LABEL_REF:
+ case HIGH:
+ return true;
+
+ case SYMBOL_REF:
+ /* TLS symbols are never valid. */
+ return SYMBOL_REF_TLS_MODEL (x) == 0;
+
+ case CONST_DOUBLE:
+ if (x == CONST0_RTX (mode))
+ return true;
+ if (FLOAT_MODE_P (mode))
+ return false;
+ goto do_integer;
+
+ case CONST_VECTOR:
+ if (x == CONST0_RTX (mode))
+ return true;
+ if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
+ return false;
+ if (GET_MODE_SIZE (mode) != 8)
+ return false;
+ goto do_integer;
+
+ case CONST_INT:
+ do_integer:
+ if (TARGET_BUILD_CONSTANTS)
+ return true;
+ alpha_extract_integer (x, &i0, &i1);
+ if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
+ return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+/* Operand 1 is known to be a constant, and should require more than one
+ instruction to load. Emit that multi-part load. */
+
+bool
+alpha_split_const_mov (enum machine_mode mode, rtx *operands)
+{
+ HOST_WIDE_INT i0, i1;
+ rtx temp = NULL_RTX;
+
+ alpha_extract_integer (operands[1], &i0, &i1);
+
+ if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
+ temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
+
+ if (!temp && TARGET_BUILD_CONSTANTS)
+ temp = alpha_emit_set_long_const (operands[0], i0, i1);
+
+ if (temp)
+ {
+ if (!rtx_equal_p (operands[0], temp))
+ emit_move_insn (operands[0], temp);
+ return true;
+ }
+
+ return false;
+}
+
+/* Expand a move instruction; return true if all work is done.
+ We don't handle non-bwx subword loads here. */
+
+bool
+alpha_expand_mov (enum machine_mode mode, rtx *operands)
+{
+ /* If the output is not a register, the input must be. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_0_operand (operands[1], mode))
+ operands[1] = force_reg (mode, operands[1]);
+
+ /* Allow legitimize_address to perform some simplifications. */
+ if (mode == Pmode && symbolic_operand (operands[1], mode))
+ {
+ rtx tmp;
+
+ tmp = alpha_legitimize_address (operands[1], operands[0], mode);
+ if (tmp)
+ {
+ if (tmp == operands[0])
+ return true;
+ operands[1] = tmp;
+ return false;
+ }
+ }
+
+ /* Early out for non-constants and valid constants. */
+ if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
+ return false;
+
+ /* Split large integers. */
+ if (GET_CODE (operands[1]) == CONST_INT
+ || GET_CODE (operands[1]) == CONST_DOUBLE
+ || GET_CODE (operands[1]) == CONST_VECTOR)
+ {
+ if (alpha_split_const_mov (mode, operands))
+ return true;
+ }
+
+ /* Otherwise we've nothing left but to drop the thing to memory. */
+ operands[1] = force_const_mem (mode, operands[1]);
+ if (reload_in_progress)
+ {
+ emit_move_insn (operands[0], XEXP (operands[1], 0));
+ operands[1] = copy_rtx (operands[1]);
+ XEXP (operands[1], 0) = operands[0];
+ }
+ else
+ operands[1] = validize_mem (operands[1]);
+ return false;
+}
+
+/* Expand a non-bwx QImode or HImode move instruction;
+ return true if all work is done. */
+
+bool
+alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
+{
+ /* If the output is not a register, the input must be. */
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (mode, operands[1]);
+
+ /* Handle four memory cases, unaligned and aligned for either the input
+ or the output. The only case where we can be called during reload is
+ for aligned loads; all other cases require temporaries. */
+
+ if (GET_CODE (operands[1]) == MEM
+ || (GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[1])) == MEM)
+ || (reload_in_progress && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
+ || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[1])) == REG
+ && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
+ {
+ if (aligned_memory_operand (operands[1], mode))
+ {
+ if (reload_in_progress)
+ {
+ emit_insn ((mode == QImode
+ ? gen_reload_inqi_help
+ : gen_reload_inhi_help)
+ (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[0]))));
+ }
+ else
+ {
+ rtx aligned_mem, bitnum;
+ rtx scratch = gen_reg_rtx (SImode);
+ rtx subtarget;
+ bool copyout;
+
+ get_aligned_mem (operands[1], &aligned_mem, &bitnum);
+
+ subtarget = operands[0];
+ if (GET_CODE (subtarget) == REG)
+ subtarget = gen_lowpart (DImode, subtarget), copyout = false;
+ else
+ subtarget = gen_reg_rtx (DImode), copyout = true;
+
+ emit_insn ((mode == QImode
+ ? gen_aligned_loadqi
+ : gen_aligned_loadhi)
+ (subtarget, aligned_mem, bitnum, scratch));
+
+ if (copyout)
+ emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
+ }
+ }
+ else
+ {
+ /* Don't pass these as parameters since that makes the generated
+ code depend on parameter evaluation order which will cause
+ bootstrap failures. */
+
+ rtx temp1, temp2, seq, subtarget;
+ bool copyout;
+
+ temp1 = gen_reg_rtx (DImode);
+ temp2 = gen_reg_rtx (DImode);
+
+ subtarget = operands[0];
+ if (GET_CODE (subtarget) == REG)
+ subtarget = gen_lowpart (DImode, subtarget), copyout = false;
+ else
+ subtarget = gen_reg_rtx (DImode), copyout = true;
+
+ seq = ((mode == QImode
+ ? gen_unaligned_loadqi
+ : gen_unaligned_loadhi)
+ (subtarget, get_unaligned_address (operands[1]),
+ temp1, temp2));
+ alpha_set_memflags (seq, operands[1]);
+ emit_insn (seq);
+
+ if (copyout)
+ emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
+ }
+ return true;
+ }
+
+ if (GET_CODE (operands[0]) == MEM
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == MEM)
+ || (reload_in_progress && GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
+ || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
+ {
+ if (aligned_memory_operand (operands[0], mode))
+ {
+ rtx aligned_mem, bitnum;
+ rtx temp1 = gen_reg_rtx (SImode);
+ rtx temp2 = gen_reg_rtx (SImode);
+
+ get_aligned_mem (operands[0], &aligned_mem, &bitnum);
+
+ emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
+ temp1, temp2));
+ }
+ else
+ {
+ rtx temp1 = gen_reg_rtx (DImode);
+ rtx temp2 = gen_reg_rtx (DImode);
+ rtx temp3 = gen_reg_rtx (DImode);
+ rtx seq = ((mode == QImode
+ ? gen_unaligned_storeqi
+ : gen_unaligned_storehi)
+ (get_unaligned_address (operands[0]),
+ operands[1], temp1, temp2, temp3));
+
+ alpha_set_memflags (seq, operands[0]);
+ emit_insn (seq);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+/* Implement the movmisalign patterns. One of the operands is a memory
+ that is not naturally aligned. Emit instructions to load it. */
+
+void
+alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
+{
+ /* Honor misaligned loads, for those we promised to do so. */
+ if (MEM_P (operands[1]))
+ {
+ rtx tmp;
+
+ if (register_operand (operands[0], mode))
+ tmp = operands[0];
+ else
+ tmp = gen_reg_rtx (mode);
+
+ alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
+ if (tmp != operands[0])
+ emit_move_insn (operands[0], tmp);
+ }
+ else if (MEM_P (operands[0]))
+ {
+ if (!reg_or_0_operand (operands[1], mode))
+ operands[1] = force_reg (mode, operands[1]);
+ alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
+ }
+ else
+ gcc_unreachable ();
+}
+
+/* Generate an unsigned DImode to FP conversion. This is the same code
+ optabs would emit if we didn't have TFmode patterns.
+
+ For SFmode, this is the only construction I've found that can pass
+ gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
+ intermediates will work, because you'll get intermediate rounding
+ that ruins the end result. Some of this could be fixed by turning
+ on round-to-positive-infinity, but that requires diddling the fpsr,
+ which kills performance. I tried turning this around and converting
+ to a negative number, so that I could turn on /m, but either I did
+ it wrong or there's something else cause I wound up with the exact
+ same single-bit error. There is a branch-less form of this same code:
+
+ srl $16,1,$1
+ and $16,1,$2
+ cmplt $16,0,$3
+ or $1,$2,$2
+ cmovge $16,$16,$2
+ itoft $3,$f10
+ itoft $2,$f11
+ cvtqs $f11,$f11
+ adds $f11,$f11,$f0
+ fcmoveq $f10,$f11,$f0
+
+ I'm not using it because it's the same number of instructions as
+ this branch-full form, and it has more serialized long latency
+ instructions on the critical path.
+
+ For DFmode, we can avoid rounding errors by breaking up the word
+ into two pieces, converting them separately, and adding them back:
+
+ LC0: .long 0,0x5f800000
+
+ itoft $16,$f11
+ lda $2,LC0
+ cmplt $16,0,$1
+ cpyse $f11,$f31,$f10
+ cpyse $f31,$f11,$f11
+ s4addq $1,$2,$1
+ lds $f12,0($1)
+ cvtqt $f10,$f10
+ cvtqt $f11,$f11
+ addt $f12,$f10,$f0
+ addt $f0,$f11,$f0
+
+ This doesn't seem to be a clear-cut win over the optabs form.
+ It probably all depends on the distribution of numbers being
+ converted -- in the optabs form, all but high-bit-set has a
+ much lower minimum execution time. */
+
+void
+alpha_emit_floatuns (rtx operands[2])
+{
+ rtx neglab, donelab, i0, i1, f0, in, out;
+ enum machine_mode mode;
+
+ out = operands[0];
+ in = force_reg (DImode, operands[1]);
+ mode = GET_MODE (out);
+ neglab = gen_label_rtx ();
+ donelab = gen_label_rtx ();
+ i0 = gen_reg_rtx (DImode);
+ i1 = gen_reg_rtx (DImode);
+ f0 = gen_reg_rtx (mode);
+
+ emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
+
+ emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
+ emit_jump_insn (gen_jump (donelab));
+ emit_barrier ();
+
+ emit_label (neglab);
+
+ emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
+ emit_insn (gen_anddi3 (i1, in, const1_rtx));
+ emit_insn (gen_iordi3 (i0, i0, i1));
+ emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
+ emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
+
+ emit_label (donelab);
+}
+
+/* Generate the comparison for a conditional branch. */
+
+rtx
+alpha_emit_conditional_branch (enum rtx_code code)
+{
+ enum rtx_code cmp_code, branch_code;
+ enum machine_mode cmp_mode, branch_mode = VOIDmode;
+ rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
+ rtx tem;
+
+ if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
+ {
+ op0 = alpha_emit_xfloating_compare (&code, op0, op1);
+ op1 = const0_rtx;
+ alpha_compare.fp_p = 0;
+ }
+
+ /* The general case: fold the comparison code to the types of compares
+ that we have, choosing the branch as necessary. */
+ switch (code)
+ {
+ case EQ: case LE: case LT: case LEU: case LTU:
+ case UNORDERED:
+ /* We have these compares: */
+ cmp_code = code, branch_code = NE;
+ break;
+
+ case NE:
+ case ORDERED:
+ /* These must be reversed. */
+ cmp_code = reverse_condition (code), branch_code = EQ;
+ break;
+
+ case GE: case GT: case GEU: case GTU:
+ /* For FP, we swap them, for INT, we reverse them. */
+ if (alpha_compare.fp_p)
+ {
+ cmp_code = swap_condition (code);
+ branch_code = NE;
+ tem = op0, op0 = op1, op1 = tem;
+ }
+ else
+ {
+ cmp_code = reverse_condition (code);
+ branch_code = EQ;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (alpha_compare.fp_p)
+ {
+ cmp_mode = DFmode;
+ if (flag_unsafe_math_optimizations)
+ {
+ /* When we are not as concerned about non-finite values, and we
+ are comparing against zero, we can branch directly. */
+ if (op1 == CONST0_RTX (DFmode))
+ cmp_code = UNKNOWN, branch_code = code;
+ else if (op0 == CONST0_RTX (DFmode))
+ {
+ /* Undo the swap we probably did just above. */
+ tem = op0, op0 = op1, op1 = tem;
+ branch_code = swap_condition (cmp_code);
+ cmp_code = UNKNOWN;
+ }
+ }
+ else
+ {
+ /* ??? We mark the branch mode to be CCmode to prevent the
+ compare and branch from being combined, since the compare
+ insn follows IEEE rules that the branch does not. */
+ branch_mode = CCmode;
+ }
+ }
+ else
+ {
+ cmp_mode = DImode;
+
+ /* The following optimizations are only for signed compares. */
+ if (code != LEU && code != LTU && code != GEU && code != GTU)
+ {
+ /* Whee. Compare and branch against 0 directly. */
+ if (op1 == const0_rtx)
+ cmp_code = UNKNOWN, branch_code = code;
+
+ /* If the constants doesn't fit into an immediate, but can
+ be generated by lda/ldah, we adjust the argument and
+ compare against zero, so we can use beq/bne directly. */
+ /* ??? Don't do this when comparing against symbols, otherwise
+ we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
+ be declared false out of hand (at least for non-weak). */
+ else if (GET_CODE (op1) == CONST_INT
+ && (code == EQ || code == NE)
+ && !(symbolic_operand (op0, VOIDmode)
+ || (GET_CODE (op0) == REG && REG_POINTER (op0))))
+ {
+ HOST_WIDE_INT v = INTVAL (op1), n = -v;
+
+ if (! CONST_OK_FOR_LETTER_P (v, 'I')
+ && (CONST_OK_FOR_LETTER_P (n, 'K')
+ || CONST_OK_FOR_LETTER_P (n, 'L')))
+ {
+ cmp_code = PLUS, branch_code = code;
+ op1 = GEN_INT (n);
+ }
+ }
+ }
+
+ if (!reg_or_0_operand (op0, DImode))
+ op0 = force_reg (DImode, op0);
+ if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
+ op1 = force_reg (DImode, op1);
+ }
+
+ /* Emit an initial compare instruction, if necessary. */
+ tem = op0;
+ if (cmp_code != UNKNOWN)
+ {
+ tem = gen_reg_rtx (cmp_mode);
+ emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
+ }
+
+ /* Zero the operands. */
+ memset (&alpha_compare, 0, sizeof (alpha_compare));
+
+ /* Return the branch comparison. */
+ return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
+}
+
+/* Certain simplifications can be done to make invalid setcc operations
+ valid. Return the final comparison, or NULL if we can't work. */
+
+rtx
+alpha_emit_setcc (enum rtx_code code)
+{
+ enum rtx_code cmp_code;
+ rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
+ int fp_p = alpha_compare.fp_p;
+ rtx tmp;
+
+ /* Zero the operands. */
+ memset (&alpha_compare, 0, sizeof (alpha_compare));
+
+ if (fp_p && GET_MODE (op0) == TFmode)
+ {
+ op0 = alpha_emit_xfloating_compare (&code, op0, op1);
+ op1 = const0_rtx;
+ fp_p = 0;
+ }
+
+ if (fp_p && !TARGET_FIX)
+ return NULL_RTX;
+
+ /* The general case: fold the comparison code to the types of compares
+ that we have, choosing the branch as necessary. */
+
+ cmp_code = UNKNOWN;
+ switch (code)
+ {
+ case EQ: case LE: case LT: case LEU: case LTU:
+ case UNORDERED:
+ /* We have these compares. */
+ if (fp_p)
+ cmp_code = code, code = NE;
+ break;
+
+ case NE:
+ if (!fp_p && op1 == const0_rtx)
+ break;
+ /* FALLTHRU */
+
+ case ORDERED:
+ cmp_code = reverse_condition (code);
+ code = EQ;
+ break;
+
+ case GE: case GT: case GEU: case GTU:
+ /* These normally need swapping, but for integer zero we have
+ special patterns that recognize swapped operands. */
+ if (!fp_p && op1 == const0_rtx)
+ break;
+ code = swap_condition (code);
+ if (fp_p)
+ cmp_code = code, code = NE;
+ tmp = op0, op0 = op1, op1 = tmp;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (!fp_p)
+ {
+ if (!register_operand (op0, DImode))
+ op0 = force_reg (DImode, op0);
+ if (!reg_or_8bit_operand (op1, DImode))
+ op1 = force_reg (DImode, op1);
+ }
+
+ /* Emit an initial compare instruction, if necessary. */
+ if (cmp_code != UNKNOWN)
+ {
+ enum machine_mode mode = fp_p ? DFmode : DImode;
+
+ tmp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, tmp,
+ gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
+
+ op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
+ op1 = const0_rtx;
+ }
+
+ /* Return the setcc comparison. */
+ return gen_rtx_fmt_ee (code, DImode, op0, op1);
+}
+
+
+/* Rewrite a comparison against zero CMP of the form
+ (CODE (cc0) (const_int 0)) so it can be written validly in
+ a conditional move (if_then_else CMP ...).
+ If both of the operands that set cc0 are nonzero we must emit
+ an insn to perform the compare (it can't be done within
+ the conditional move). */
+
+rtx
+alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
+{
+ enum rtx_code code = GET_CODE (cmp);
+ enum rtx_code cmov_code = NE;
+ rtx op0 = alpha_compare.op0;
+ rtx op1 = alpha_compare.op1;
+ int fp_p = alpha_compare.fp_p;
+ enum machine_mode cmp_mode
+ = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
+ enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
+ enum machine_mode cmov_mode = VOIDmode;
+ int local_fast_math = flag_unsafe_math_optimizations;
+ rtx tem;
+
+ /* Zero the operands. */
+ memset (&alpha_compare, 0, sizeof (alpha_compare));
+
+ if (fp_p != FLOAT_MODE_P (mode))
+ {
+ enum rtx_code cmp_code;
+
+ if (! TARGET_FIX)
+ return 0;
+
+ /* If we have fp<->int register move instructions, do a cmov by
+ performing the comparison in fp registers, and move the
+ zero/nonzero value to integer registers, where we can then
+ use a normal cmov, or vice-versa. */
+
+ switch (code)
+ {
+ case EQ: case LE: case LT: case LEU: case LTU:
+ /* We have these compares. */
+ cmp_code = code, code = NE;
+ break;
+
+ case NE:
+ /* This must be reversed. */
+ cmp_code = EQ, code = EQ;
+ break;
+
+ case GE: case GT: case GEU: case GTU:
+ /* These normally need swapping, but for integer zero we have
+ special patterns that recognize swapped operands. */
+ if (!fp_p && op1 == const0_rtx)
+ cmp_code = code, code = NE;
+ else
+ {
+ cmp_code = swap_condition (code);
+ code = NE;
+ tem = op0, op0 = op1, op1 = tem;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ tem = gen_reg_rtx (cmp_op_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, tem,
+ gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
+ op0, op1)));
+
+ cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
+ op0 = gen_lowpart (cmp_op_mode, tem);
+ op1 = CONST0_RTX (cmp_op_mode);
+ fp_p = !fp_p;
+ local_fast_math = 1;
+ }
+
+ /* We may be able to use a conditional move directly.
+ This avoids emitting spurious compares. */
+ if (signed_comparison_operator (cmp, VOIDmode)
+ && (!fp_p || local_fast_math)
+ && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
+ return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
+
+ /* We can't put the comparison inside the conditional move;
+ emit a compare instruction and put that inside the
+ conditional move. Make sure we emit only comparisons we have;
+ swap or reverse as necessary. */
+
+ if (no_new_pseudos)
+ return NULL_RTX;
+
+ switch (code)
+ {
+ case EQ: case LE: case LT: case LEU: case LTU:
+ /* We have these compares: */
+ break;
+
+ case NE:
+ /* This must be reversed. */
+ code = reverse_condition (code);
+ cmov_code = EQ;
+ break;
+
+ case GE: case GT: case GEU: case GTU:
+ /* These must be swapped. */
+ if (op1 != CONST0_RTX (cmp_mode))
+ {
+ code = swap_condition (code);
+ tem = op0, op0 = op1, op1 = tem;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (!fp_p)
+ {
+ if (!reg_or_0_operand (op0, DImode))
+ op0 = force_reg (DImode, op0);
+ if (!reg_or_8bit_operand (op1, DImode))
+ op1 = force_reg (DImode, op1);
+ }
+
+ /* ??? We mark the branch mode to be CCmode to prevent the compare
+ and cmov from being combined, since the compare insn follows IEEE
+ rules that the cmov does not. */
+ if (fp_p && !local_fast_math)
+ cmov_mode = CCmode;
+
+ tem = gen_reg_rtx (cmp_op_mode);
+ emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
+ return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
+}
+
+/* Simplify a conditional move of two constants into a setcc with
+ arithmetic. This is done with a splitter since combine would
+ just undo the work if done during code generation. It also catches
+ cases we wouldn't have before cse. */
+
+int
+alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
+ rtx t_rtx, rtx f_rtx)
+{
+ HOST_WIDE_INT t, f, diff;
+ enum machine_mode mode;
+ rtx target, subtarget, tmp;
+
+ mode = GET_MODE (dest);
+ t = INTVAL (t_rtx);
+ f = INTVAL (f_rtx);
+ diff = t - f;
+
+ if (((code == NE || code == EQ) && diff < 0)
+ || (code == GE || code == GT))
+ {
+ code = reverse_condition (code);
+ diff = t, t = f, f = diff;
+ diff = t - f;
+ }
+
+ subtarget = target = dest;
+ if (mode != DImode)
+ {
+ target = gen_lowpart (DImode, dest);
+ if (! no_new_pseudos)
+ subtarget = gen_reg_rtx (DImode);
+ else
+ subtarget = target;
+ }
+ /* Below, we must be careful to use copy_rtx on target and subtarget
+ in intermediate insns, as they may be a subreg rtx, which may not
+ be shared. */
+
+ if (f == 0 && exact_log2 (diff) > 0
+ /* On EV6, we've got enough shifters to make non-arithmetic shifts
+ viable over a longer latency cmove. On EV5, the E0 slot is a
+ scarce resource, and on EV4 shift has the same latency as a cmove. */
+ && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
+ {
+ tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
+
+ tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
+ GEN_INT (exact_log2 (t)));
+ emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
+ }
+ else if (f == 0 && t == -1)
+ {
+ tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
+
+ emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
+ }
+ else if (diff == 1 || diff == 4 || diff == 8)
+ {
+ rtx add_op;
+
+ tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
+
+ if (diff == 1)
+ emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
+ else
+ {
+ add_op = GEN_INT (f);
+ if (sext_add_operand (add_op, mode))
+ {
+ tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
+ GEN_INT (diff));
+ tmp = gen_rtx_PLUS (DImode, tmp, add_op);
+ emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
+ }
+ else
+ return 0;
+ }
+ }
+ else
+ return 0;
+
+ return 1;
+}
+
+/* Look up the function X_floating library function name for the
+ given operation. */
+
+struct xfloating_op GTY(())
+{
+ const enum rtx_code code;
+ const char *const GTY((skip)) osf_func;
+ const char *const GTY((skip)) vms_func;
+ rtx libcall;
+};
+
+static GTY(()) struct xfloating_op xfloating_ops[] =
+{
+ { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
+ { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
+ { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
+ { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
+ { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
+ { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
+ { LT, "_OtsLssX", "OTS$LSS_X", 0 },
+ { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
+ { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
+ { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
+ { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
+ { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
+ { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
+ { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
+ { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
+};
+
+static GTY(()) struct xfloating_op vax_cvt_ops[] =
+{
+ { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
+ { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
+};
+
+static rtx
+alpha_lookup_xfloating_lib_func (enum rtx_code code)
+{
+ struct xfloating_op *ops = xfloating_ops;
+ long n = ARRAY_SIZE (xfloating_ops);
+ long i;
+
+ gcc_assert (TARGET_HAS_XFLOATING_LIBS);
+
+ /* How irritating. Nothing to key off for the main table. */
+ if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
+ {
+ ops = vax_cvt_ops;
+ n = ARRAY_SIZE (vax_cvt_ops);
+ }
+
+ for (i = 0; i < n; ++i, ++ops)
+ if (ops->code == code)
+ {
+ rtx func = ops->libcall;
+ if (!func)
+ {
+ func = init_one_libfunc (TARGET_ABI_OPEN_VMS
+ ? ops->vms_func : ops->osf_func);
+ ops->libcall = func;
+ }
+ return func;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Most X_floating operations take the rounding mode as an argument.
+ Compute that here. */
+
+static int
+alpha_compute_xfloating_mode_arg (enum rtx_code code,
+ enum alpha_fp_rounding_mode round)
+{
+ int mode;
+
+ switch (round)
+ {
+ case ALPHA_FPRM_NORM:
+ mode = 2;
+ break;
+ case ALPHA_FPRM_MINF:
+ mode = 1;
+ break;
+ case ALPHA_FPRM_CHOP:
+ mode = 0;
+ break;
+ case ALPHA_FPRM_DYN:
+ mode = 4;
+ break;
+ default:
+ gcc_unreachable ();
+
+ /* XXX For reference, round to +inf is mode = 3. */
+ }
+
+ if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
+ mode |= 0x10000;
+
+ return mode;
+}
+
+/* Emit an X_floating library function call.
+
+ Note that these functions do not follow normal calling conventions:
+ TFmode arguments are passed in two integer registers (as opposed to
+ indirect); TFmode return values appear in R16+R17.
+
+ FUNC is the function to call.
+ TARGET is where the output belongs.
+ OPERANDS are the inputs.
+ NOPERANDS is the count of inputs.
+ EQUIV is the expression equivalent for the function.
+*/
+
+static void
+alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
+ int noperands, rtx equiv)
+{
+ rtx usage = NULL_RTX, tmp, reg;
+ int regno = 16, i;
+
+ start_sequence ();
+
+ for (i = 0; i < noperands; ++i)
+ {
+ switch (GET_MODE (operands[i]))
+ {
+ case TFmode:
+ reg = gen_rtx_REG (TFmode, regno);
+ regno += 2;
+ break;
+
+ case DFmode:
+ reg = gen_rtx_REG (DFmode, regno + 32);
+ regno += 1;
+ break;
+
+ case VOIDmode:
+ gcc_assert (GET_CODE (operands[i]) == CONST_INT);
+ /* FALLTHRU */
+ case DImode:
+ reg = gen_rtx_REG (DImode, regno);
+ regno += 1;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_move_insn (reg, operands[i]);
+ usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
+ }
+
+ switch (GET_MODE (target))
+ {
+ case TFmode:
+ reg = gen_rtx_REG (TFmode, 16);
+ break;
+ case DFmode:
+ reg = gen_rtx_REG (DFmode, 32);
+ break;
+ case DImode:
+ reg = gen_rtx_REG (DImode, 0);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ tmp = gen_rtx_MEM (QImode, func);
+ tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
+ const0_rtx, const0_rtx));
+ CALL_INSN_FUNCTION_USAGE (tmp) = usage;
+ CONST_OR_PURE_CALL_P (tmp) = 1;
+
+ tmp = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (tmp, target, reg, equiv);
+}
+
+/* Emit an X_floating library function call for arithmetic (+,-,*,/). */
+
+void
+alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
+{
+ rtx func;
+ int mode;
+ rtx out_operands[3];
+
+ func = alpha_lookup_xfloating_lib_func (code);
+ mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
+
+ out_operands[0] = operands[1];
+ out_operands[1] = operands[2];
+ out_operands[2] = GEN_INT (mode);
+ alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
+ gen_rtx_fmt_ee (code, TFmode, operands[1],
+ operands[2]));
+}
+
+/* Emit an X_floating library function call for a comparison. */
+
+static rtx
+alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
+{
+ enum rtx_code cmp_code, res_code;
+ rtx func, out, operands[2];
+
+ /* X_floating library comparison functions return
+ -1 unordered
+ 0 false
+ 1 true
+ Convert the compare against the raw return value. */
+
+ cmp_code = *pcode;
+ switch (cmp_code)
+ {
+ case UNORDERED:
+ cmp_code = EQ;
+ res_code = LT;
+ break;
+ case ORDERED:
+ cmp_code = EQ;
+ res_code = GE;
+ break;
+ case NE:
+ res_code = NE;
+ break;
+ case EQ:
+ case LT:
+ case GT:
+ case LE:
+ case GE:
+ res_code = GT;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ *pcode = res_code;
+
+ func = alpha_lookup_xfloating_lib_func (cmp_code);
+
+ operands[0] = op0;
+ operands[1] = op1;
+ out = gen_reg_rtx (DImode);
+
+ /* ??? Strange mode for equiv because what's actually returned
+ is -1,0,1, not a proper boolean value. */
+ alpha_emit_xfloating_libcall (func, out, operands, 2,
+ gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
+
+ return out;
+}
+
+/* Emit an X_floating library function call for a conversion. */
+
+void
+alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
+{
+ int noperands = 1, mode;
+ rtx out_operands[2];
+ rtx func;
+ enum rtx_code code = orig_code;
+
+ if (code == UNSIGNED_FIX)
+ code = FIX;
+
+ func = alpha_lookup_xfloating_lib_func (code);
+
+ out_operands[0] = operands[1];
+
+ switch (code)
+ {
+ case FIX:
+ mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
+ out_operands[1] = GEN_INT (mode);
+ noperands = 2;
+ break;
+ case FLOAT_TRUNCATE:
+ mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
+ out_operands[1] = GEN_INT (mode);
+ noperands = 2;
+ break;
+ default:
+ break;
+ }
+
+ alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
+ gen_rtx_fmt_e (orig_code,
+ GET_MODE (operands[0]),
+ operands[1]));
+}
+
+/* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
+ DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
+ guarantee that the sequence
+ set (OP[0] OP[2])
+ set (OP[1] OP[3])
+ is valid. Naturally, output operand ordering is little-endian.
+ This is used by *movtf_internal and *movti_internal. */
+
+void
+alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
+ bool fixup_overlap)
+{
+ switch (GET_CODE (operands[1]))
+ {
+ case REG:
+ operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
+ operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
+ break;
+
+ case MEM:
+ operands[3] = adjust_address (operands[1], DImode, 8);
+ operands[2] = adjust_address (operands[1], DImode, 0);
+ break;
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ gcc_assert (operands[1] == CONST0_RTX (mode));
+ operands[2] = operands[3] = const0_rtx;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ switch (GET_CODE (operands[0]))
+ {
+ case REG:
+ operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
+ operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
+ break;
+
+ case MEM:
+ operands[1] = adjust_address (operands[0], DImode, 8);
+ operands[0] = adjust_address (operands[0], DImode, 0);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
+ {
+ rtx tmp;
+ tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
+ tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
+ }
+}
+
+/* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
+ op2 is a register containing the sign bit, operation is the
+ logical operation to be performed. */
+
+void
+alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
+{
+ rtx high_bit = operands[2];
+ rtx scratch;
+ int move;
+
+ alpha_split_tmode_pair (operands, TFmode, false);
+
+ /* Detect three flavors of operand overlap. */
+ move = 1;
+ if (rtx_equal_p (operands[0], operands[2]))
+ move = 0;
+ else if (rtx_equal_p (operands[1], operands[2]))
+ {
+ if (rtx_equal_p (operands[0], high_bit))
+ move = 2;
+ else
+ move = -1;
+ }
+
+ if (move < 0)
+ emit_move_insn (operands[0], operands[2]);
+
+ /* ??? If the destination overlaps both source tf and high_bit, then
+ assume source tf is dead in its entirety and use the other half
+ for a scratch register. Otherwise "scratch" is just the proper
+ destination register. */
+ scratch = operands[move < 2 ? 1 : 3];
+
+ emit_insn ((*operation) (scratch, high_bit, operands[3]));
+
+ if (move > 0)
+ {
+ emit_move_insn (operands[0], operands[2]);
+ if (move > 1)
+ emit_move_insn (operands[1], scratch);
+ }
+}
+
+/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
+ unaligned data:
+
+ unsigned: signed:
+ word: ldq_u r1,X(r11) ldq_u r1,X(r11)
+ ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
+ lda r3,X(r11) lda r3,X+2(r11)
+ extwl r1,r3,r1 extql r1,r3,r1
+ extwh r2,r3,r2 extqh r2,r3,r2
+ or r1.r2.r1 or r1,r2,r1
+ sra r1,48,r1
+
+ long: ldq_u r1,X(r11) ldq_u r1,X(r11)
+ ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
+ lda r3,X(r11) lda r3,X(r11)
+ extll r1,r3,r1 extll r1,r3,r1
+ extlh r2,r3,r2 extlh r2,r3,r2
+ or r1.r2.r1 addl r1,r2,r1
+
+ quad: ldq_u r1,X(r11)
+ ldq_u r2,X+7(r11)
+ lda r3,X(r11)
+ extql r1,r3,r1
+ extqh r2,r3,r2
+ or r1.r2.r1
+*/
+
+void
+alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
+ HOST_WIDE_INT ofs, int sign)
+{
+ rtx meml, memh, addr, extl, exth, tmp, mema;
+ enum machine_mode mode;
+
+ if (TARGET_BWX && size == 2)
+ {
+ meml = adjust_address (mem, QImode, ofs);
+ memh = adjust_address (mem, QImode, ofs+1);
+ if (BYTES_BIG_ENDIAN)
+ tmp = meml, meml = memh, memh = tmp;
+ extl = gen_reg_rtx (DImode);
+ exth = gen_reg_rtx (DImode);
+ emit_insn (gen_zero_extendqidi2 (extl, meml));
+ emit_insn (gen_zero_extendqidi2 (exth, memh));
+ exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
+ NULL, 1, OPTAB_LIB_WIDEN);
+ addr = expand_simple_binop (DImode, IOR, extl, exth,
+ NULL, 1, OPTAB_LIB_WIDEN);
+
+ if (sign && GET_MODE (tgt) != HImode)
+ {
+ addr = gen_lowpart (HImode, addr);
+ emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
+ }
+ else
+ {
+ if (GET_MODE (tgt) != DImode)
+ addr = gen_lowpart (GET_MODE (tgt), addr);
+ emit_move_insn (tgt, addr);
+ }
+ return;
+ }
+
+ meml = gen_reg_rtx (DImode);
+ memh = gen_reg_rtx (DImode);
+ addr = gen_reg_rtx (DImode);
+ extl = gen_reg_rtx (DImode);
+ exth = gen_reg_rtx (DImode);
+
+ mema = XEXP (mem, 0);
+ if (GET_CODE (mema) == LO_SUM)
+ mema = force_reg (Pmode, mema);
+
+ /* AND addresses cannot be in any alias set, since they may implicitly
+ alias surrounding code. Ideally we'd have some alias set that
+ covered all types except those with alignment 8 or higher. */
+
+ tmp = change_address (mem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (mema, ofs),
+ GEN_INT (-8)));
+ set_mem_alias_set (tmp, 0);
+ emit_move_insn (meml, tmp);
+
+ tmp = change_address (mem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (mema, ofs + size - 1),
+ GEN_INT (-8)));
+ set_mem_alias_set (tmp, 0);
+ emit_move_insn (memh, tmp);
+
+ if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
+ {
+ emit_move_insn (addr, plus_constant (mema, -1));
+
+ emit_insn (gen_extqh_be (extl, meml, addr));
+ emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
+
+ addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
+ addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
+ addr, 1, OPTAB_WIDEN);
+ }
+ else if (sign && size == 2)
+ {
+ emit_move_insn (addr, plus_constant (mema, ofs+2));
+
+ emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
+ emit_insn (gen_extqh_le (exth, memh, addr));
+
+ /* We must use tgt here for the target. Alpha-vms port fails if we use
+ addr for the target, because addr is marked as a pointer and combine
+ knows that pointers are always sign-extended 32 bit values. */
+ addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
+ addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
+ addr, 1, OPTAB_WIDEN);
+ }
+ else
+ {
+ if (WORDS_BIG_ENDIAN)
+ {
+ emit_move_insn (addr, plus_constant (mema, ofs+size-1));
+ switch ((int) size)
+ {
+ case 2:
+ emit_insn (gen_extwh_be (extl, meml, addr));
+ mode = HImode;
+ break;
+
+ case 4:
+ emit_insn (gen_extlh_be (extl, meml, addr));
+ mode = SImode;
+ break;
+
+ case 8:
+ emit_insn (gen_extqh_be (extl, meml, addr));
+ mode = DImode;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
+ }
+ else
+ {
+ emit_move_insn (addr, plus_constant (mema, ofs));
+ emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
+ switch ((int) size)
+ {
+ case 2:
+ emit_insn (gen_extwh_le (exth, memh, addr));
+ mode = HImode;
+ break;
+
+ case 4:
+ emit_insn (gen_extlh_le (exth, memh, addr));
+ mode = SImode;
+ break;
+
+ case 8:
+ emit_insn (gen_extqh_le (exth, memh, addr));
+ mode = DImode;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
+ gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
+ sign, OPTAB_WIDEN);
+ }
+
+ if (addr != tgt)
+ emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
+}
+
+/* Similarly, use ins and msk instructions to perform unaligned stores. */
+
+void
+alpha_expand_unaligned_store (rtx dst, rtx src,
+ HOST_WIDE_INT size, HOST_WIDE_INT ofs)
+{
+ rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
+
+ if (TARGET_BWX && size == 2)
+ {
+ if (src != const0_rtx)
+ {
+ dstl = gen_lowpart (QImode, src);
+ dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
+ NULL, 1, OPTAB_LIB_WIDEN);
+ dsth = gen_lowpart (QImode, dsth);
+ }
+ else
+ dstl = dsth = const0_rtx;
+
+ meml = adjust_address (dst, QImode, ofs);
+ memh = adjust_address (dst, QImode, ofs+1);
+ if (BYTES_BIG_ENDIAN)
+ addr = meml, meml = memh, memh = addr;
+
+ emit_move_insn (meml, dstl);
+ emit_move_insn (memh, dsth);
+ return;
+ }
+
+ dstl = gen_reg_rtx (DImode);
+ dsth = gen_reg_rtx (DImode);
+ insl = gen_reg_rtx (DImode);
+ insh = gen_reg_rtx (DImode);
+
+ dsta = XEXP (dst, 0);
+ if (GET_CODE (dsta) == LO_SUM)
+ dsta = force_reg (Pmode, dsta);
+
+ /* AND addresses cannot be in any alias set, since they may implicitly
+ alias surrounding code. Ideally we'd have some alias set that
+ covered all types except those with alignment 8 or higher. */
+
+ meml = change_address (dst, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (dsta, ofs),
+ GEN_INT (-8)));
+ set_mem_alias_set (meml, 0);
+
+ memh = change_address (dst, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (dsta, ofs + size - 1),
+ GEN_INT (-8)));
+ set_mem_alias_set (memh, 0);
+
+ emit_move_insn (dsth, memh);
+ emit_move_insn (dstl, meml);
+ if (WORDS_BIG_ENDIAN)
+ {
+ addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
+
+ if (src != const0_rtx)
+ {
+ switch ((int) size)
+ {
+ case 2:
+ emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
+ break;
+ case 4:
+ emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
+ break;
+ case 8:
+ emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
+ break;
+ }
+ emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
+ GEN_INT (size*8), addr));
+ }
+
+ switch ((int) size)
+ {
+ case 2:
+ emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
+ break;
+ case 4:
+ {
+ rtx msk = immed_double_const (0xffffffff, 0, DImode);
+ emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
+ break;
+ }
+ case 8:
+ emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
+ break;
+ }
+
+ emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
+ }
+ else
+ {
+ addr = copy_addr_to_reg (plus_constant (dsta, ofs));
+
+ if (src != CONST0_RTX (GET_MODE (src)))
+ {
+ emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
+ GEN_INT (size*8), addr));
+
+ switch ((int) size)
+ {
+ case 2:
+ emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
+ break;
+ case 4:
+ emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
+ break;
+ case 8:
+ emit_insn (gen_insql_le (insl, src, addr));
+ break;
+ }
+ }
+
+ emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
+
+ switch ((int) size)
+ {
+ case 2:
+ emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
+ break;
+ case 4:
+ {
+ rtx msk = immed_double_const (0xffffffff, 0, DImode);
+ emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
+ break;
+ }
+ case 8:
+ emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
+ break;
+ }
+ }
+
+ if (src != CONST0_RTX (GET_MODE (src)))
+ {
+ dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
+ dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
+ }
+
+ if (WORDS_BIG_ENDIAN)
+ {
+ emit_move_insn (meml, dstl);
+ emit_move_insn (memh, dsth);
+ }
+ else
+ {
+ /* Must store high before low for degenerate case of aligned. */
+ emit_move_insn (memh, dsth);
+ emit_move_insn (meml, dstl);
+ }
+}
+
+/* The block move code tries to maximize speed by separating loads and
+ stores at the expense of register pressure: we load all of the data
+ before we store it back out. There are two secondary effects worth
+ mentioning, that this speeds copying to/from aligned and unaligned
+ buffers, and that it makes the code significantly easier to write. */
+
+#define MAX_MOVE_WORDS 8
+
+/* Load an integral number of consecutive unaligned quadwords. */
+
+static void
+alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
+ HOST_WIDE_INT words, HOST_WIDE_INT ofs)
+{
+ rtx const im8 = GEN_INT (-8);
+ rtx const i64 = GEN_INT (64);
+ rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
+ rtx sreg, areg, tmp, smema;
+ HOST_WIDE_INT i;
+
+ smema = XEXP (smem, 0);
+ if (GET_CODE (smema) == LO_SUM)
+ smema = force_reg (Pmode, smema);
+
+ /* Generate all the tmp registers we need. */
+ for (i = 0; i < words; ++i)
+ {
+ data_regs[i] = out_regs[i];
+ ext_tmps[i] = gen_reg_rtx (DImode);
+ }
+ data_regs[words] = gen_reg_rtx (DImode);
+
+ if (ofs != 0)
+ smem = adjust_address (smem, GET_MODE (smem), ofs);
+
+ /* Load up all of the source data. */
+ for (i = 0; i < words; ++i)
+ {
+ tmp = change_address (smem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (smema, 8*i),
+ im8));
+ set_mem_alias_set (tmp, 0);
+ emit_move_insn (data_regs[i], tmp);
+ }
+
+ tmp = change_address (smem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (smema, 8*words - 1),
+ im8));
+ set_mem_alias_set (tmp, 0);
+ emit_move_insn (data_regs[words], tmp);
+
+ /* Extract the half-word fragments. Unfortunately DEC decided to make
+ extxh with offset zero a noop instead of zeroing the register, so
+ we must take care of that edge condition ourselves with cmov. */
+
+ sreg = copy_addr_to_reg (smema);
+ areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
+ 1, OPTAB_WIDEN);
+ if (WORDS_BIG_ENDIAN)
+ emit_move_insn (sreg, plus_constant (sreg, 7));
+ for (i = 0; i < words; ++i)
+ {
+ if (WORDS_BIG_ENDIAN)
+ {
+ emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
+ emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
+ }
+ else
+ {
+ emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
+ emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
+ }
+ emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
+ gen_rtx_IF_THEN_ELSE (DImode,
+ gen_rtx_EQ (DImode, areg,
+ const0_rtx),
+ const0_rtx, ext_tmps[i])));
+ }
+
+ /* Merge the half-words into whole words. */
+ for (i = 0; i < words; ++i)
+ {
+ out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
+ ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
+ }
+}
+
+/* Store an integral number of consecutive unaligned quadwords. DATA_REGS
+ may be NULL to store zeros. */
+
+static void
+alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
+ HOST_WIDE_INT words, HOST_WIDE_INT ofs)
+{
+ rtx const im8 = GEN_INT (-8);
+ rtx const i64 = GEN_INT (64);
+ rtx ins_tmps[MAX_MOVE_WORDS];
+ rtx st_tmp_1, st_tmp_2, dreg;
+ rtx st_addr_1, st_addr_2, dmema;
+ HOST_WIDE_INT i;
+
+ dmema = XEXP (dmem, 0);
+ if (GET_CODE (dmema) == LO_SUM)
+ dmema = force_reg (Pmode, dmema);
+
+ /* Generate all the tmp registers we need. */
+ if (data_regs != NULL)
+ for (i = 0; i < words; ++i)
+ ins_tmps[i] = gen_reg_rtx(DImode);
+ st_tmp_1 = gen_reg_rtx(DImode);
+ st_tmp_2 = gen_reg_rtx(DImode);
+
+ if (ofs != 0)
+ dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
+
+ st_addr_2 = change_address (dmem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (dmema, words*8 - 1),
+ im8));
+ set_mem_alias_set (st_addr_2, 0);
+
+ st_addr_1 = change_address (dmem, DImode,
+ gen_rtx_AND (DImode, dmema, im8));
+ set_mem_alias_set (st_addr_1, 0);
+
+ /* Load up the destination end bits. */
+ emit_move_insn (st_tmp_2, st_addr_2);
+ emit_move_insn (st_tmp_1, st_addr_1);
+
+ /* Shift the input data into place. */
+ dreg = copy_addr_to_reg (dmema);
+ if (WORDS_BIG_ENDIAN)
+ emit_move_insn (dreg, plus_constant (dreg, 7));
+ if (data_regs != NULL)
+ {
+ for (i = words-1; i >= 0; --i)
+ {
+ if (WORDS_BIG_ENDIAN)
+ {
+ emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
+ emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
+ }
+ else
+ {
+ emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
+ emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
+ }
+ }
+ for (i = words-1; i > 0; --i)
+ {
+ ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
+ ins_tmps[i-1], ins_tmps[i-1], 1,
+ OPTAB_WIDEN);
+ }
+ }
+
+ /* Split and merge the ends with the destination data. */
+ if (WORDS_BIG_ENDIAN)
+ {
+ emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
+ emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
+ }
+ else
+ {
+ emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
+ emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
+ }
+
+ if (data_regs != NULL)
+ {
+ st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
+ st_tmp_2, 1, OPTAB_WIDEN);
+ st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
+ st_tmp_1, 1, OPTAB_WIDEN);
+ }
+
+ /* Store it all. */
+ if (WORDS_BIG_ENDIAN)
+ emit_move_insn (st_addr_1, st_tmp_1);
+ else
+ emit_move_insn (st_addr_2, st_tmp_2);
+ for (i = words-1; i > 0; --i)
+ {
+ rtx tmp = change_address (dmem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant(dmema,
+ WORDS_BIG_ENDIAN ? i*8-1 : i*8),
+ im8));
+ set_mem_alias_set (tmp, 0);
+ emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
+ }
+ if (WORDS_BIG_ENDIAN)
+ emit_move_insn (st_addr_2, st_tmp_2);
+ else
+ emit_move_insn (st_addr_1, st_tmp_1);
+}
+
+
+/* Expand string/block move operations.
+
+ operands[0] is the pointer to the destination.
+ operands[1] is the pointer to the source.
+ operands[2] is the number of bytes to move.
+ operands[3] is the alignment. */
+
+int
+alpha_expand_block_move (rtx operands[])
+{
+ rtx bytes_rtx = operands[2];
+ rtx align_rtx = operands[3];
+ HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
+ HOST_WIDE_INT bytes = orig_bytes;
+ HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
+ HOST_WIDE_INT dst_align = src_align;
+ rtx orig_src = operands[1];
+ rtx orig_dst = operands[0];
+ rtx data_regs[2 * MAX_MOVE_WORDS + 16];
+ rtx tmp;
+ unsigned int i, words, ofs, nregs = 0;
+
+ if (orig_bytes <= 0)
+ return 1;
+ else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
+ return 0;
+
+ /* Look for additional alignment information from recorded register info. */
+
+ tmp = XEXP (orig_src, 0);
+ if (GET_CODE (tmp) == REG)
+ src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
+ else if (GET_CODE (tmp) == PLUS
+ && GET_CODE (XEXP (tmp, 0)) == REG
+ && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
+ unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
+
+ if (a > src_align)
+ {
+ if (a >= 64 && c % 8 == 0)
+ src_align = 64;
+ else if (a >= 32 && c % 4 == 0)
+ src_align = 32;
+ else if (a >= 16 && c % 2 == 0)
+ src_align = 16;
+ }
+ }
+
+ tmp = XEXP (orig_dst, 0);
+ if (GET_CODE (tmp) == REG)
+ dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
+ else if (GET_CODE (tmp) == PLUS
+ && GET_CODE (XEXP (tmp, 0)) == REG
+ && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
+ unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
+
+ if (a > dst_align)
+ {
+ if (a >= 64 && c % 8 == 0)
+ dst_align = 64;
+ else if (a >= 32 && c % 4 == 0)
+ dst_align = 32;
+ else if (a >= 16 && c % 2 == 0)
+ dst_align = 16;
+ }
+ }
+
+ ofs = 0;
+ if (src_align >= 64 && bytes >= 8)
+ {
+ words = bytes / 8;
+
+ for (i = 0; i < words; ++i)
+ data_regs[nregs + i] = gen_reg_rtx (DImode);
+
+ for (i = 0; i < words; ++i)
+ emit_move_insn (data_regs[nregs + i],
+ adjust_address (orig_src, DImode, ofs + i * 8));
+
+ nregs += words;
+ bytes -= words * 8;
+ ofs += words * 8;
+ }
+
+ if (src_align >= 32 && bytes >= 4)
+ {
+ words = bytes / 4;
+
+ for (i = 0; i < words; ++i)
+ data_regs[nregs + i] = gen_reg_rtx (SImode);
+
+ for (i = 0; i < words; ++i)
+ emit_move_insn (data_regs[nregs + i],
+ adjust_address (orig_src, SImode, ofs + i * 4));
+
+ nregs += words;
+ bytes -= words * 4;
+ ofs += words * 4;
+ }
+
+ if (bytes >= 8)
+ {
+ words = bytes / 8;
+
+ for (i = 0; i < words+1; ++i)
+ data_regs[nregs + i] = gen_reg_rtx (DImode);
+
+ alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
+ words, ofs);
+
+ nregs += words;
+ bytes -= words * 8;
+ ofs += words * 8;
+ }
+
+ if (! TARGET_BWX && bytes >= 4)
+ {
+ data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
+ alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
+ bytes -= 4;
+ ofs += 4;
+ }
+
+ if (bytes >= 2)
+ {
+ if (src_align >= 16)
+ {
+ do {
+ data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
+ emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
+ bytes -= 2;
+ ofs += 2;
+ } while (bytes >= 2);
+ }
+ else if (! TARGET_BWX)
+ {
+ data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
+ alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
+ bytes -= 2;
+ ofs += 2;
+ }
+ }
+
+ while (bytes > 0)
+ {
+ data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
+ emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
+ bytes -= 1;
+ ofs += 1;
+ }
+
+ gcc_assert (nregs <= ARRAY_SIZE (data_regs));
+
+ /* Now save it back out again. */
+
+ i = 0, ofs = 0;
+
+ /* Write out the data in whatever chunks reading the source allowed. */
+ if (dst_align >= 64)
+ {
+ while (i < nregs && GET_MODE (data_regs[i]) == DImode)
+ {
+ emit_move_insn (adjust_address (orig_dst, DImode, ofs),
+ data_regs[i]);
+ ofs += 8;
+ i++;
+ }
+ }
+
+ if (dst_align >= 32)
+ {
+ /* If the source has remaining DImode regs, write them out in
+ two pieces. */
+ while (i < nregs && GET_MODE (data_regs[i]) == DImode)
+ {
+ tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
+ NULL_RTX, 1, OPTAB_WIDEN);
+
+ emit_move_insn (adjust_address (orig_dst, SImode, ofs),
+ gen_lowpart (SImode, data_regs[i]));
+ emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
+ gen_lowpart (SImode, tmp));
+ ofs += 8;
+ i++;
+ }
+
+ while (i < nregs && GET_MODE (data_regs[i]) == SImode)
+ {
+ emit_move_insn (adjust_address (orig_dst, SImode, ofs),
+ data_regs[i]);
+ ofs += 4;
+ i++;
+ }
+ }
+
+ if (i < nregs && GET_MODE (data_regs[i]) == DImode)
+ {
+ /* Write out a remaining block of words using unaligned methods. */
+
+ for (words = 1; i + words < nregs; words++)
+ if (GET_MODE (data_regs[i + words]) != DImode)
+ break;
+
+ if (words == 1)
+ alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
+ else
+ alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
+ words, ofs);
+
+ i += words;
+ ofs += words * 8;
+ }
+
+ /* Due to the above, this won't be aligned. */
+ /* ??? If we have more than one of these, consider constructing full
+ words in registers and using alpha_expand_unaligned_store_words. */
+ while (i < nregs && GET_MODE (data_regs[i]) == SImode)
+ {
+ alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
+ ofs += 4;
+ i++;
+ }
+
+ if (dst_align >= 16)
+ while (i < nregs && GET_MODE (data_regs[i]) == HImode)
+ {
+ emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
+ i++;
+ ofs += 2;
+ }
+ else
+ while (i < nregs && GET_MODE (data_regs[i]) == HImode)
+ {
+ alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
+ i++;
+ ofs += 2;
+ }
+
+ /* The remainder must be byte copies. */
+ while (i < nregs)
+ {
+ gcc_assert (GET_MODE (data_regs[i]) == QImode);
+ emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
+ i++;
+ ofs += 1;
+ }
+
+ return 1;
+}
+
+int
+alpha_expand_block_clear (rtx operands[])
+{
+ rtx bytes_rtx = operands[1];
+ rtx align_rtx = operands[3];
+ HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
+ HOST_WIDE_INT bytes = orig_bytes;
+ HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
+ HOST_WIDE_INT alignofs = 0;
+ rtx orig_dst = operands[0];
+ rtx tmp;
+ int i, words, ofs = 0;
+
+ if (orig_bytes <= 0)
+ return 1;
+ if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
+ return 0;
+
+ /* Look for stricter alignment. */
+ tmp = XEXP (orig_dst, 0);
+ if (GET_CODE (tmp) == REG)
+ align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
+ else if (GET_CODE (tmp) == PLUS
+ && GET_CODE (XEXP (tmp, 0)) == REG
+ && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
+ int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
+
+ if (a > align)
+ {
+ if (a >= 64)
+ align = a, alignofs = 8 - c % 8;
+ else if (a >= 32)
+ align = a, alignofs = 4 - c % 4;
+ else if (a >= 16)
+ align = a, alignofs = 2 - c % 2;
+ }
+ }
+
+ /* Handle an unaligned prefix first. */
+
+ if (alignofs > 0)
+ {
+#if HOST_BITS_PER_WIDE_INT >= 64
+ /* Given that alignofs is bounded by align, the only time BWX could
+ generate three stores is for a 7 byte fill. Prefer two individual
+ stores over a load/mask/store sequence. */
+ if ((!TARGET_BWX || alignofs == 7)
+ && align >= 32
+ && !(alignofs == 4 && bytes >= 4))
+ {
+ enum machine_mode mode = (align >= 64 ? DImode : SImode);
+ int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
+ rtx mem, tmp;
+ HOST_WIDE_INT mask;
+
+ mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
+ set_mem_alias_set (mem, 0);
+
+ mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
+ if (bytes < alignofs)
+ {
+ mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
+ ofs += bytes;
+ bytes = 0;
+ }
+ else
+ {
+ bytes -= alignofs;
+ ofs += alignofs;
+ }
+ alignofs = 0;
+
+ tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
+ NULL_RTX, 1, OPTAB_WIDEN);
+
+ emit_move_insn (mem, tmp);
+ }
+#endif
+
+ if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
+ {
+ emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
+ bytes -= 1;
+ ofs += 1;
+ alignofs -= 1;
+ }
+ if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
+ {
+ emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
+ bytes -= 2;
+ ofs += 2;
+ alignofs -= 2;
+ }
+ if (alignofs == 4 && bytes >= 4)
+ {
+ emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
+ bytes -= 4;
+ ofs += 4;
+ alignofs = 0;
+ }
+
+ /* If we've not used the extra lead alignment information by now,
+ we won't be able to. Downgrade align to match what's left over. */
+ if (alignofs > 0)
+ {
+ alignofs = alignofs & -alignofs;
+ align = MIN (align, alignofs * BITS_PER_UNIT);
+ }
+ }
+
+ /* Handle a block of contiguous long-words. */
+
+ if (align >= 64 && bytes >= 8)
+ {
+ words = bytes / 8;
+
+ for (i = 0; i < words; ++i)
+ emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
+ const0_rtx);
+
+ bytes -= words * 8;
+ ofs += words * 8;
+ }
+
+ /* If the block is large and appropriately aligned, emit a single
+ store followed by a sequence of stq_u insns. */
+
+ if (align >= 32 && bytes > 16)
+ {
+ rtx orig_dsta;
+
+ emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
+ bytes -= 4;
+ ofs += 4;
+
+ orig_dsta = XEXP (orig_dst, 0);
+ if (GET_CODE (orig_dsta) == LO_SUM)
+ orig_dsta = force_reg (Pmode, orig_dsta);
+
+ words = bytes / 8;
+ for (i = 0; i < words; ++i)
+ {
+ rtx mem
+ = change_address (orig_dst, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (orig_dsta, ofs + i*8),
+ GEN_INT (-8)));
+ set_mem_alias_set (mem, 0);
+ emit_move_insn (mem, const0_rtx);
+ }
+
+ /* Depending on the alignment, the first stq_u may have overlapped
+ with the initial stl, which means that the last stq_u didn't
+ write as much as it would appear. Leave those questionable bytes
+ unaccounted for. */
+ bytes -= words * 8 - 4;
+ ofs += words * 8 - 4;
+ }
+
+ /* Handle a smaller block of aligned words. */
+
+ if ((align >= 64 && bytes == 4)
+ || (align == 32 && bytes >= 4))
+ {
+ words = bytes / 4;
+
+ for (i = 0; i < words; ++i)
+ emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
+ const0_rtx);
+
+ bytes -= words * 4;
+ ofs += words * 4;
+ }
+
+ /* An unaligned block uses stq_u stores for as many as possible. */
+
+ if (bytes >= 8)
+ {
+ words = bytes / 8;
+
+ alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
+
+ bytes -= words * 8;
+ ofs += words * 8;
+ }
+
+ /* Next clean up any trailing pieces. */
+
+#if HOST_BITS_PER_WIDE_INT >= 64
+ /* Count the number of bits in BYTES for which aligned stores could
+ be emitted. */
+ words = 0;
+ for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
+ if (bytes & i)
+ words += 1;
+
+ /* If we have appropriate alignment (and it wouldn't take too many
+ instructions otherwise), mask out the bytes we need. */
+ if (TARGET_BWX ? words > 2 : bytes > 0)
+ {
+ if (align >= 64)
+ {
+ rtx mem, tmp;
+ HOST_WIDE_INT mask;
+
+ mem = adjust_address (orig_dst, DImode, ofs);
+ set_mem_alias_set (mem, 0);
+
+ mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
+
+ tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
+ NULL_RTX, 1, OPTAB_WIDEN);
+
+ emit_move_insn (mem, tmp);
+ return 1;
+ }
+ else if (align >= 32 && bytes < 4)
+ {
+ rtx mem, tmp;
+ HOST_WIDE_INT mask;
+
+ mem = adjust_address (orig_dst, SImode, ofs);
+ set_mem_alias_set (mem, 0);
+
+ mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
+
+ tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
+ NULL_RTX, 1, OPTAB_WIDEN);
+
+ emit_move_insn (mem, tmp);
+ return 1;
+ }
+ }
+#endif
+
+ if (!TARGET_BWX && bytes >= 4)
+ {
+ alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
+ bytes -= 4;
+ ofs += 4;
+ }
+
+ if (bytes >= 2)
+ {
+ if (align >= 16)
+ {
+ do {
+ emit_move_insn (adjust_address (orig_dst, HImode, ofs),
+ const0_rtx);
+ bytes -= 2;
+ ofs += 2;
+ } while (bytes >= 2);
+ }
+ else if (! TARGET_BWX)
+ {
+ alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
+ bytes -= 2;
+ ofs += 2;
+ }
+ }
+
+ while (bytes > 0)
+ {
+ emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
+ bytes -= 1;
+ ofs += 1;
+ }
+
+ return 1;
+}
+
+/* Returns a mask so that zap(x, value) == x & mask. */
+
+rtx
+alpha_expand_zap_mask (HOST_WIDE_INT value)
+{
+ rtx result;
+ int i;
+
+ if (HOST_BITS_PER_WIDE_INT >= 64)
+ {
+ HOST_WIDE_INT mask = 0;
+
+ for (i = 7; i >= 0; --i)
+ {
+ mask <<= 8;
+ if (!((value >> i) & 1))
+ mask |= 0xff;
+ }
+
+ result = gen_int_mode (mask, DImode);
+ }
+ else
+ {
+ HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
+
+ gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
+
+ for (i = 7; i >= 4; --i)
+ {
+ mask_hi <<= 8;
+ if (!((value >> i) & 1))
+ mask_hi |= 0xff;
+ }
+
+ for (i = 3; i >= 0; --i)
+ {
+ mask_lo <<= 8;
+ if (!((value >> i) & 1))
+ mask_lo |= 0xff;
+ }
+
+ result = immed_double_const (mask_lo, mask_hi, DImode);
+ }
+
+ return result;
+}
+
+void
+alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
+ enum machine_mode mode,
+ rtx op0, rtx op1, rtx op2)
+{
+ op0 = gen_lowpart (mode, op0);
+
+ if (op1 == const0_rtx)
+ op1 = CONST0_RTX (mode);
+ else
+ op1 = gen_lowpart (mode, op1);
+
+ if (op2 == const0_rtx)
+ op2 = CONST0_RTX (mode);
+ else
+ op2 = gen_lowpart (mode, op2);
+
+ emit_insn ((*gen) (op0, op1, op2));
+}
+
+/* A subroutine of the atomic operation splitters. Jump to LABEL if
+ COND is true. Mark the jump as unlikely to be taken. */
+
+static void
+emit_unlikely_jump (rtx cond, rtx label)
+{
+ rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
+ rtx x;
+
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
+ x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
+ REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
+}
+
+/* A subroutine of the atomic operation splitters. Emit a load-locked
+ instruction in MODE. */
+
+static void
+emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
+{
+ rtx (*fn) (rtx, rtx) = NULL;
+ if (mode == SImode)
+ fn = gen_load_locked_si;
+ else if (mode == DImode)
+ fn = gen_load_locked_di;
+ emit_insn (fn (reg, mem));
+}
+
+/* A subroutine of the atomic operation splitters. Emit a store-conditional
+ instruction in MODE. */
+
+static void
+emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
+{
+ rtx (*fn) (rtx, rtx, rtx) = NULL;
+ if (mode == SImode)
+ fn = gen_store_conditional_si;
+ else if (mode == DImode)
+ fn = gen_store_conditional_di;
+ emit_insn (fn (res, mem, val));
+}
+
+/* A subroutine of the atomic operation splitters. Emit an insxl
+ instruction in MODE. */
+
+static rtx
+emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
+{
+ rtx ret = gen_reg_rtx (DImode);
+ rtx (*fn) (rtx, rtx, rtx);
+
+ if (WORDS_BIG_ENDIAN)
+ {
+ if (mode == QImode)
+ fn = gen_insbl_be;
+ else
+ fn = gen_inswl_be;
+ }
+ else
+ {
+ if (mode == QImode)
+ fn = gen_insbl_le;
+ else
+ fn = gen_inswl_le;
+ }
+ emit_insn (fn (ret, op1, op2));
+
+ return ret;
+}
+
+/* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
+ to perform. MEM is the memory on which to operate. VAL is the second
+ operand of the binary operator. BEFORE and AFTER are optional locations to
+ return the value of MEM either before of after the operation. SCRATCH is
+ a scratch register. */
+
+void
+alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
+ rtx before, rtx after, rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
+
+ emit_insn (gen_memory_barrier ());
+
+ label = gen_label_rtx ();
+ emit_label (label);
+ label = gen_rtx_LABEL_REF (DImode, label);
+
+ if (before == NULL)
+ before = scratch;
+ emit_load_locked (mode, before, mem);
+
+ if (code == NOT)
+ x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
+ else
+ x = gen_rtx_fmt_ee (code, mode, before, val);
+ if (after)
+ emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
+
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_EQ (DImode, cond, const0_rtx);
+ emit_unlikely_jump (x, label);
+
+ emit_insn (gen_memory_barrier ());
+}
+
+/* Expand a compare and swap operation. */
+
+void
+alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
+ rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
+
+ emit_insn (gen_memory_barrier ());
+
+ label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
+ label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
+ emit_label (XEXP (label1, 0));
+
+ emit_load_locked (mode, retval, mem);
+
+ x = gen_lowpart (DImode, retval);
+ if (oldval == const0_rtx)
+ x = gen_rtx_NE (DImode, x, const0_rtx);
+ else
+ {
+ x = gen_rtx_EQ (DImode, x, oldval);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+ x = gen_rtx_EQ (DImode, cond, const0_rtx);
+ }
+ emit_unlikely_jump (x, label2);
+
+ emit_move_insn (scratch, newval);
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_EQ (DImode, cond, const0_rtx);
+ emit_unlikely_jump (x, label1);
+
+ emit_insn (gen_memory_barrier ());
+ emit_label (XEXP (label2, 0));
+}
+
+void
+alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx addr, align, wdst;
+ rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
+
+ addr = force_reg (DImode, XEXP (mem, 0));
+ align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
+ NULL_RTX, 1, OPTAB_DIRECT);
+
+ oldval = convert_modes (DImode, mode, oldval, 1);
+ newval = emit_insxl (mode, newval, addr);
+
+ wdst = gen_reg_rtx (DImode);
+ if (mode == QImode)
+ fn5 = gen_sync_compare_and_swapqi_1;
+ else
+ fn5 = gen_sync_compare_and_swaphi_1;
+ emit_insn (fn5 (wdst, addr, oldval, newval, align));
+
+ emit_move_insn (dst, gen_lowpart (mode, wdst));
+}
+
+void
+alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
+ rtx oldval, rtx newval, rtx align,
+ rtx scratch, rtx cond)
+{
+ rtx label1, label2, mem, width, mask, x;
+
+ mem = gen_rtx_MEM (DImode, align);
+ MEM_VOLATILE_P (mem) = 1;
+
+ emit_insn (gen_memory_barrier ());
+ label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
+ label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
+ emit_label (XEXP (label1, 0));
+
+ emit_load_locked (DImode, scratch, mem);
+
+ width = GEN_INT (GET_MODE_BITSIZE (mode));
+ mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
+ if (WORDS_BIG_ENDIAN)
+ emit_insn (gen_extxl_be (dest, scratch, width, addr));
+ else
+ emit_insn (gen_extxl_le (dest, scratch, width, addr));
+
+ if (oldval == const0_rtx)
+ x = gen_rtx_NE (DImode, dest, const0_rtx);
+ else
+ {
+ x = gen_rtx_EQ (DImode, dest, oldval);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+ x = gen_rtx_EQ (DImode, cond, const0_rtx);
+ }
+ emit_unlikely_jump (x, label2);
+
+ if (WORDS_BIG_ENDIAN)
+ emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
+ else
+ emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
+ emit_insn (gen_iordi3 (scratch, scratch, newval));
+
+ emit_store_conditional (DImode, scratch, mem, scratch);
+
+ x = gen_rtx_EQ (DImode, scratch, const0_rtx);
+ emit_unlikely_jump (x, label1);
+
+ emit_insn (gen_memory_barrier ());
+ emit_label (XEXP (label2, 0));
+}
+
+/* Expand an atomic exchange operation. */
+
+void
+alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label, x, cond = gen_lowpart (DImode, scratch);
+
+ emit_insn (gen_memory_barrier ());
+
+ label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
+ emit_label (XEXP (label, 0));
+
+ emit_load_locked (mode, retval, mem);
+ emit_move_insn (scratch, val);
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_EQ (DImode, cond, const0_rtx);
+ emit_unlikely_jump (x, label);
+}
+
+void
+alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx addr, align, wdst;
+ rtx (*fn4) (rtx, rtx, rtx, rtx);
+
+ /* Force the address into a register. */
+ addr = force_reg (DImode, XEXP (mem, 0));
+
+ /* Align it to a multiple of 8. */
+ align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
+ NULL_RTX, 1, OPTAB_DIRECT);
+
+ /* Insert val into the correct byte location within the word. */
+ val = emit_insxl (mode, val, addr);
+
+ wdst = gen_reg_rtx (DImode);
+ if (mode == QImode)
+ fn4 = gen_sync_lock_test_and_setqi_1;
+ else
+ fn4 = gen_sync_lock_test_and_sethi_1;
+ emit_insn (fn4 (wdst, addr, val, align));
+
+ emit_move_insn (dst, gen_lowpart (mode, wdst));
+}
+
+void
+alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
+ rtx val, rtx align, rtx scratch)
+{
+ rtx label, mem, width, mask, x;
+
+ mem = gen_rtx_MEM (DImode, align);
+ MEM_VOLATILE_P (mem) = 1;
+
+ emit_insn (gen_memory_barrier ());
+ label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
+ emit_label (XEXP (label, 0));
+
+ emit_load_locked (DImode, scratch, mem);
+
+ width = GEN_INT (GET_MODE_BITSIZE (mode));
+ mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
+ if (WORDS_BIG_ENDIAN)
+ {
+ emit_insn (gen_extxl_be (dest, scratch, width, addr));
+ emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
+ }
+ else
+ {
+ emit_insn (gen_extxl_le (dest, scratch, width, addr));
+ emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
+ }
+ emit_insn (gen_iordi3 (scratch, scratch, val));
+
+ emit_store_conditional (DImode, scratch, mem, scratch);
+
+ x = gen_rtx_EQ (DImode, scratch, const0_rtx);
+ emit_unlikely_jump (x, label);
+}
+
+/* Adjust the cost of a scheduling dependency. Return the new cost of
+ a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
+
+static int
+alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+{
+ enum attr_type insn_type, dep_insn_type;
+
+ /* If the dependence is an anti-dependence, there is no cost. For an
+ output dependence, there is sometimes a cost, but it doesn't seem
+ worth handling those few cases. */
+ if (REG_NOTE_KIND (link) != 0)
+ return cost;
+
+ /* If we can't recognize the insns, we can't really do anything. */
+ if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
+ return cost;
+
+ insn_type = get_attr_type (insn);
+ dep_insn_type = get_attr_type (dep_insn);
+
+ /* Bring in the user-defined memory latency. */
+ if (dep_insn_type == TYPE_ILD
+ || dep_insn_type == TYPE_FLD
+ || dep_insn_type == TYPE_LDSYM)
+ cost += alpha_memory_latency-1;
+
+ /* Everything else handled in DFA bypasses now. */
+
+ return cost;
+}
+
+/* The number of instructions that can be issued per cycle. */
+
+static int
+alpha_issue_rate (void)
+{
+ return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
+}
+
+/* How many alternative schedules to try. This should be as wide as the
+ scheduling freedom in the DFA, but no wider. Making this value too
+ large results extra work for the scheduler.
+
+ For EV4, loads can be issued to either IB0 or IB1, thus we have 2
+ alternative schedules. For EV5, we can choose between E0/E1 and
+ FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
+
+static int
+alpha_multipass_dfa_lookahead (void)
+{
+ return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
+}
+
+/* Machine-specific function data. */
+
+struct machine_function GTY(())
+{
+ /* For unicosmk. */
+ /* List of call information words for calls from this function. */
+ struct rtx_def *first_ciw;
+ struct rtx_def *last_ciw;
+ int ciw_count;
+
+ /* List of deferred case vectors. */
+ struct rtx_def *addr_list;
+
+ /* For OSF. */
+ const char *some_ld_name;
+
+ /* For TARGET_LD_BUGGY_LDGP. */
+ struct rtx_def *gp_save_rtx;
+};
+
+/* How to allocate a 'struct machine_function'. */
+
+static struct machine_function *
+alpha_init_machine_status (void)
+{
+ return ((struct machine_function *)
+ ggc_alloc_cleared (sizeof (struct machine_function)));
+}
+
+/* Functions to save and restore alpha_return_addr_rtx. */
+
+/* Start the ball rolling with RETURN_ADDR_RTX. */
+
+rtx
+alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
+{
+ if (count != 0)
+ return const0_rtx;
+
+ return get_hard_reg_initial_val (Pmode, REG_RA);
+}
+
+/* Return or create a memory slot containing the gp value for the current
+ function. Needed only if TARGET_LD_BUGGY_LDGP. */
+
+rtx
+alpha_gp_save_rtx (void)
+{
+ rtx seq, m = cfun->machine->gp_save_rtx;
+
+ if (m == NULL)
+ {
+ start_sequence ();
+
+ m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
+ m = validize_mem (m);
+ emit_move_insn (m, pic_offset_table_rtx);
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insn_at_entry (seq);
+
+ cfun->machine->gp_save_rtx = m;
+ }
+
+ return m;
+}
+
+static int
+alpha_ra_ever_killed (void)
+{
+ rtx top;
+
+ if (!has_hard_reg_initial_val (Pmode, REG_RA))
+ return regs_ever_live[REG_RA];
+
+ push_topmost_sequence ();
+ top = get_insns ();
+ pop_topmost_sequence ();
+
+ return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
+}
+
+
+/* Return the trap mode suffix applicable to the current
+ instruction, or NULL. */
+
+static const char *
+get_trap_mode_suffix (void)
+{
+ enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
+
+ switch (s)
+ {
+ case TRAP_SUFFIX_NONE:
+ return NULL;
+
+ case TRAP_SUFFIX_SU:
+ if (alpha_fptm >= ALPHA_FPTM_SU)
+ return "su";
+ return NULL;
+
+ case TRAP_SUFFIX_SUI:
+ if (alpha_fptm >= ALPHA_FPTM_SUI)
+ return "sui";
+ return NULL;
+
+ case TRAP_SUFFIX_V_SV:
+ switch (alpha_fptm)
+ {
+ case ALPHA_FPTM_N:
+ return NULL;
+ case ALPHA_FPTM_U:
+ return "v";
+ case ALPHA_FPTM_SU:
+ case ALPHA_FPTM_SUI:
+ return "sv";
+ default:
+ gcc_unreachable ();
+ }
+
+ case TRAP_SUFFIX_V_SV_SVI:
+ switch (alpha_fptm)
+ {
+ case ALPHA_FPTM_N:
+ return NULL;
+ case ALPHA_FPTM_U:
+ return "v";
+ case ALPHA_FPTM_SU:
+ return "sv";
+ case ALPHA_FPTM_SUI:
+ return "svi";
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case TRAP_SUFFIX_U_SU_SUI:
+ switch (alpha_fptm)
+ {
+ case ALPHA_FPTM_N:
+ return NULL;
+ case ALPHA_FPTM_U:
+ return "u";
+ case ALPHA_FPTM_SU:
+ return "su";
+ case ALPHA_FPTM_SUI:
+ return "sui";
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ gcc_unreachable ();
+}
+
+/* Return the rounding mode suffix applicable to the current
+ instruction, or NULL. */
+
+static const char *
+get_round_mode_suffix (void)
+{
+ enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
+
+ switch (s)
+ {
+ case ROUND_SUFFIX_NONE:
+ return NULL;
+ case ROUND_SUFFIX_NORMAL:
+ switch (alpha_fprm)
+ {
+ case ALPHA_FPRM_NORM:
+ return NULL;
+ case ALPHA_FPRM_MINF:
+ return "m";
+ case ALPHA_FPRM_CHOP:
+ return "c";
+ case ALPHA_FPRM_DYN:
+ return "d";
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case ROUND_SUFFIX_C:
+ return "c";
+
+ default:
+ gcc_unreachable ();
+ }
+ gcc_unreachable ();
+}
+
+/* Locate some local-dynamic symbol still in use by this function
+ so that we can print its name in some movdi_er_tlsldm pattern. */
+
+static int
+get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
+{
+ rtx x = *px;
+
+ if (GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
+ {
+ cfun->machine->some_ld_name = XSTR (x, 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const char *
+get_some_local_dynamic_name (void)
+{
+ rtx insn;
+
+ if (cfun->machine->some_ld_name)
+ return cfun->machine->some_ld_name;
+
+ for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
+ if (INSN_P (insn)
+ && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
+ return cfun->machine->some_ld_name;
+
+ gcc_unreachable ();
+}
+
+/* Print an operand. Recognize special options, documented below. */
+
+void
+print_operand (FILE *file, rtx x, int code)
+{
+ int i;
+
+ switch (code)
+ {
+ case '~':
+ /* Print the assembler name of the current function. */
+ assemble_name (file, alpha_fnname);
+ break;
+
+ case '&':
+ assemble_name (file, get_some_local_dynamic_name ());
+ break;
+
+ case '/':
+ {
+ const char *trap = get_trap_mode_suffix ();
+ const char *round = get_round_mode_suffix ();
+
+ if (trap || round)
+ fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
+ (trap ? trap : ""), (round ? round : ""));
+ break;
+ }
+
+ case ',':
+ /* Generates single precision instruction suffix. */
+ fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
+ break;
+
+ case '-':
+ /* Generates double precision instruction suffix. */
+ fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
+ break;
+
+ case '+':
+ /* Generates a nop after a noreturn call at the very end of the
+ function. */
+ if (next_real_insn (current_output_insn) == 0)
+ fprintf (file, "\n\tnop");
+ break;
+
+ case '#':
+ if (alpha_this_literal_sequence_number == 0)
+ alpha_this_literal_sequence_number = alpha_next_sequence_number++;
+ fprintf (file, "%d", alpha_this_literal_sequence_number);
+ break;
+
+ case '*':
+ if (alpha_this_gpdisp_sequence_number == 0)
+ alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
+ fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
+ break;
+
+ case 'H':
+ if (GET_CODE (x) == HIGH)
+ output_addr_const (file, XEXP (x, 0));
+ else
+ output_operand_lossage ("invalid %%H value");
+ break;
+
+ case 'J':
+ {
+ const char *lituse;
+
+ if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
+ {
+ x = XVECEXP (x, 0, 0);
+ lituse = "lituse_tlsgd";
+ }
+ else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
+ {
+ x = XVECEXP (x, 0, 0);
+ lituse = "lituse_tlsldm";
+ }
+ else if (GET_CODE (x) == CONST_INT)
+ lituse = "lituse_jsr";
+ else
+ {
+ output_operand_lossage ("invalid %%J value");
+ break;
+ }
+
+ if (x != const0_rtx)
+ fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
+ }
+ break;
+
+ case 'j':
+ {
+ const char *lituse;
+
+#ifdef HAVE_AS_JSRDIRECT_RELOCS
+ lituse = "lituse_jsrdirect";
+#else
+ lituse = "lituse_jsr";
+#endif
+
+ gcc_assert (INTVAL (x) != 0);
+ fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
+ }
+ break;
+ case 'r':
+ /* If this operand is the constant zero, write it as "$31". */
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ else if (x == CONST0_RTX (GET_MODE (x)))
+ fprintf (file, "$31");
+ else
+ output_operand_lossage ("invalid %%r value");
+ break;
+
+ case 'R':
+ /* Similar, but for floating-point. */
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ else if (x == CONST0_RTX (GET_MODE (x)))
+ fprintf (file, "$f31");
+ else
+ output_operand_lossage ("invalid %%R value");
+ break;
+
+ case 'N':
+ /* Write the 1's complement of a constant. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%N value");
+
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
+ break;
+
+ case 'P':
+ /* Write 1 << C, for a constant C. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%P value");
+
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
+ break;
+
+ case 'h':
+ /* Write the high-order 16 bits of a constant, sign-extended. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%h value");
+
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
+ break;
+
+ case 'L':
+ /* Write the low-order 16 bits of a constant, sign-extended. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%L value");
+
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
+ break;
+
+ case 'm':
+ /* Write mask for ZAP insn. */
+ if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ HOST_WIDE_INT mask = 0;
+ HOST_WIDE_INT value;
+
+ value = CONST_DOUBLE_LOW (x);
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
+ i++, value >>= 8)
+ if (value & 0xff)
+ mask |= (1 << i);
+
+ value = CONST_DOUBLE_HIGH (x);
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
+ i++, value >>= 8)
+ if (value & 0xff)
+ mask |= (1 << (i + sizeof (int)));
+
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
+ }
+
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ HOST_WIDE_INT mask = 0, value = INTVAL (x);
+
+ for (i = 0; i < 8; i++, value >>= 8)
+ if (value & 0xff)
+ mask |= (1 << i);
+
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
+ }
+ else
+ output_operand_lossage ("invalid %%m value");
+ break;
+
+ case 'M':
+ /* 'b', 'w', 'l', or 'q' as the value of the constant. */
+ if (GET_CODE (x) != CONST_INT
+ || (INTVAL (x) != 8 && INTVAL (x) != 16
+ && INTVAL (x) != 32 && INTVAL (x) != 64))
+ output_operand_lossage ("invalid %%M value");
+
+ fprintf (file, "%s",
+ (INTVAL (x) == 8 ? "b"
+ : INTVAL (x) == 16 ? "w"
+ : INTVAL (x) == 32 ? "l"
+ : "q"));
+ break;
+
+ case 'U':
+ /* Similar, except do it from the mask. */
+ if (GET_CODE (x) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL (x);
+
+ if (value == 0xff)
+ {
+ fputc ('b', file);
+ break;
+ }
+ if (value == 0xffff)
+ {
+ fputc ('w', file);
+ break;
+ }
+ if (value == 0xffffffff)
+ {
+ fputc ('l', file);
+ break;
+ }
+ if (value == -1)
+ {
+ fputc ('q', file);
+ break;
+ }
+ }
+ else if (HOST_BITS_PER_WIDE_INT == 32
+ && GET_CODE (x) == CONST_DOUBLE
+ && CONST_DOUBLE_LOW (x) == 0xffffffff
+ && CONST_DOUBLE_HIGH (x) == 0)
+ {
+ fputc ('l', file);
+ break;
+ }
+ output_operand_lossage ("invalid %%U value");
+ break;
+
+ case 's':
+ /* Write the constant value divided by 8 for little-endian mode or
+ (56 - value) / 8 for big-endian mode. */
+
+ if (GET_CODE (x) != CONST_INT
+ || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
+ ? 56
+ : 64)
+ || (INTVAL (x) & 7) != 0)
+ output_operand_lossage ("invalid %%s value");
+
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ WORDS_BIG_ENDIAN
+ ? (56 - INTVAL (x)) / 8
+ : INTVAL (x) / 8);
+ break;
+
+ case 'S':
+ /* Same, except compute (64 - c) / 8 */
+
+ if (GET_CODE (x) != CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
+ && (INTVAL (x) & 7) != 8)
+ output_operand_lossage ("invalid %%s value");
+
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
+ break;
+
+ case 't':
+ {
+ /* On Unicos/Mk systems: use a DEX expression if the symbol
+ clashes with a register name. */
+ int dex = unicosmk_need_dex (x);
+ if (dex)
+ fprintf (file, "DEX(%d)", dex);
+ else
+ output_addr_const (file, x);
+ }
+ break;
+
+ case 'C': case 'D': case 'c': case 'd':
+ /* Write out comparison name. */
+ {
+ enum rtx_code c = GET_CODE (x);
+
+ if (!COMPARISON_P (x))
+ output_operand_lossage ("invalid %%C value");
+
+ else if (code == 'D')
+ c = reverse_condition (c);
+ else if (code == 'c')
+ c = swap_condition (c);
+ else if (code == 'd')
+ c = swap_condition (reverse_condition (c));
+
+ if (c == LEU)
+ fprintf (file, "ule");
+ else if (c == LTU)
+ fprintf (file, "ult");
+ else if (c == UNORDERED)
+ fprintf (file, "un");
+ else
+ fprintf (file, "%s", GET_RTX_NAME (c));
+ }
+ break;
+
+ case 'E':
+ /* Write the divide or modulus operator. */
+ switch (GET_CODE (x))
+ {
+ case DIV:
+ fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
+ break;
+ case UDIV:
+ fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
+ break;
+ case MOD:
+ fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
+ break;
+ case UMOD:
+ fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
+ break;
+ default:
+ output_operand_lossage ("invalid %%E value");
+ break;
+ }
+ break;
+
+ case 'A':
+ /* Write "_u" for unaligned access. */
+ if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
+ fprintf (file, "_u");
+ break;
+
+ case 0:
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
+ {
+ switch (XINT (XEXP (x, 0), 1))
+ {
+ case UNSPEC_DTPREL:
+ case UNSPEC_TPREL:
+ output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
+ break;
+ default:
+ output_operand_lossage ("unknown relocation unspec");
+ break;
+ }
+ }
+ else
+ output_addr_const (file, x);
+ break;
+
+ default:
+ output_operand_lossage ("invalid %%xn code");
+ }
+}
+
+void
+print_operand_address (FILE *file, rtx addr)
+{
+ int basereg = 31;
+ HOST_WIDE_INT offset = 0;
+
+ if (GET_CODE (addr) == AND)
+ addr = XEXP (addr, 0);
+
+ if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ {
+ offset = INTVAL (XEXP (addr, 1));
+ addr = XEXP (addr, 0);
+ }
+
+ if (GET_CODE (addr) == LO_SUM)
+ {
+ const char *reloc16, *reloclo;
+ rtx op1 = XEXP (addr, 1);
+
+ if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
+ {
+ op1 = XEXP (op1, 0);
+ switch (XINT (op1, 1))
+ {
+ case UNSPEC_DTPREL:
+ reloc16 = NULL;
+ reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
+ break;
+ case UNSPEC_TPREL:
+ reloc16 = NULL;
+ reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
+ break;
+ default:
+ output_operand_lossage ("unknown relocation unspec");
+ return;
+ }
+
+ output_addr_const (file, XVECEXP (op1, 0, 0));
+ }
+ else
+ {
+ reloc16 = "gprel";
+ reloclo = "gprellow";
+ output_addr_const (file, op1);
+ }
+
+ if (offset)
+ fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
+
+ addr = XEXP (addr, 0);
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ basereg = REGNO (addr);
+ break;
+
+ case SUBREG:
+ basereg = subreg_regno (addr);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ fprintf (file, "($%d)\t\t!%s", basereg,
+ (basereg == 29 ? reloc16 : reloclo));
+ return;
+ }
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ basereg = REGNO (addr);
+ break;
+
+ case SUBREG:
+ basereg = subreg_regno (addr);
+ break;
+
+ case CONST_INT:
+ offset = INTVAL (addr);
+ break;
+
+#if TARGET_ABI_OPEN_VMS
+ case SYMBOL_REF:
+ fprintf (file, "%s", XSTR (addr, 0));
+ return;
+
+ case CONST:
+ gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
+ fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
+ XSTR (XEXP (XEXP (addr, 0), 0), 0),
+ INTVAL (XEXP (XEXP (addr, 0), 1)));
+ return;
+
+#endif
+ default:
+ gcc_unreachable ();
+ }
+
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
+}
+
+/* Emit RTL insns to initialize the variable parts of a trampoline at
+ TRAMP. FNADDR is an RTX for the address of the function's pure
+ code. CXT is an RTX for the static chain value for the function.
+
+ The three offset parameters are for the individual template's
+ layout. A JMPOFS < 0 indicates that the trampoline does not
+ contain instructions at all.
+
+ We assume here that a function will be called many more times than
+ its address is taken (e.g., it might be passed to qsort), so we
+ take the trouble to initialize the "hint" field in the JMP insn.
+ Note that the hint field is PC (new) + 4 * bits 13:0. */
+
+void
+alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
+ int fnofs, int cxtofs, int jmpofs)
+{
+ rtx temp, temp1, addr;
+ /* VMS really uses DImode pointers in memory at this point. */
+ enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ fnaddr = convert_memory_address (mode, fnaddr);
+ cxt = convert_memory_address (mode, cxt);
+#endif
+
+ /* Store function address and CXT. */
+ addr = memory_address (mode, plus_constant (tramp, fnofs));
+ emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
+ addr = memory_address (mode, plus_constant (tramp, cxtofs));
+ emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
+
+ /* This has been disabled since the hint only has a 32k range, and in
+ no existing OS is the stack within 32k of the text segment. */
+ if (0 && jmpofs >= 0)
+ {
+ /* Compute hint value. */
+ temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
+ temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
+ OPTAB_WIDEN);
+ temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
+ build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
+ temp = expand_and (SImode, gen_lowpart (SImode, temp),
+ GEN_INT (0x3fff), 0);
+
+ /* Merge in the hint. */
+ addr = memory_address (SImode, plus_constant (tramp, jmpofs));
+ temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
+ temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
+ temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
+ OPTAB_WIDEN);
+ emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
+ }
+
+#ifdef ENABLE_EXECUTE_STACK
+ emit_library_call (init_one_libfunc ("__enable_execute_stack"),
+ 0, VOIDmode, 1, tramp, Pmode);
+#endif
+
+ if (jmpofs >= 0)
+ emit_insn (gen_imb ());
+}
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On Alpha the first 6 words of args are normally in registers
+ and the rest are pushed. */
+
+rtx
+function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
+ int named ATTRIBUTE_UNUSED)
+{
+ int basereg;
+ int num_args;
+
+ /* Don't get confused and pass small structures in FP registers. */
+ if (type && AGGREGATE_TYPE_P (type))
+ basereg = 16;
+ else
+ {
+#ifdef ENABLE_CHECKING
+ /* With alpha_split_complex_arg, we shouldn't see any raw complex
+ values here. */
+ gcc_assert (!COMPLEX_MODE_P (mode));
+#endif
+
+ /* Set up defaults for FP operands passed in FP registers, and
+ integral operands passed in integer registers. */
+ if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ basereg = 32 + 16;
+ else
+ basereg = 16;
+ }
+
+ /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
+ the three platforms, so we can't avoid conditional compilation. */
+#if TARGET_ABI_OPEN_VMS
+ {
+ if (mode == VOIDmode)
+ return alpha_arg_info_reg_val (cum);
+
+ num_args = cum.num_args;
+ if (num_args >= 6
+ || targetm.calls.must_pass_in_stack (mode, type))
+ return NULL_RTX;
+ }
+#elif TARGET_ABI_UNICOSMK
+ {
+ int size;
+
+ /* If this is the last argument, generate the call info word (CIW). */
+ /* ??? We don't include the caller's line number in the CIW because
+ I don't know how to determine it if debug infos are turned off. */
+ if (mode == VOIDmode)
+ {
+ int i;
+ HOST_WIDE_INT lo;
+ HOST_WIDE_INT hi;
+ rtx ciw;
+
+ lo = 0;
+
+ for (i = 0; i < cum.num_reg_words && i < 5; i++)
+ if (cum.reg_args_type[i])
+ lo |= (1 << (7 - i));
+
+ if (cum.num_reg_words == 6 && cum.reg_args_type[5])
+ lo |= 7;
+ else
+ lo |= cum.num_reg_words;
+
+#if HOST_BITS_PER_WIDE_INT == 32
+ hi = (cum.num_args << 20) | cum.num_arg_words;
+#else
+ lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
+ | ((HOST_WIDE_INT) cum.num_arg_words << 32);
+ hi = 0;
+#endif
+ ciw = immed_double_const (lo, hi, DImode);
+
+ return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
+ UNSPEC_UMK_LOAD_CIW);
+ }
+
+ size = ALPHA_ARG_SIZE (mode, type, named);
+ num_args = cum.num_reg_words;
+ if (cum.force_stack
+ || cum.num_reg_words + size > 6
+ || targetm.calls.must_pass_in_stack (mode, type))
+ return NULL_RTX;
+ else if (type && TYPE_MODE (type) == BLKmode)
+ {
+ rtx reg1, reg2;
+
+ reg1 = gen_rtx_REG (DImode, num_args + 16);
+ reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
+
+ /* The argument fits in two registers. Note that we still need to
+ reserve a register for empty structures. */
+ if (size == 0)
+ return NULL_RTX;
+ else if (size == 1)
+ return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
+ else
+ {
+ reg2 = gen_rtx_REG (DImode, num_args + 17);
+ reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
+ }
+ }
+ }
+#elif TARGET_ABI_OSF
+ {
+ if (cum >= 6)
+ return NULL_RTX;
+ num_args = cum;
+
+ /* VOID is passed as a special flag for "last argument". */
+ if (type == void_type_node)
+ basereg = 16;
+ else if (targetm.calls.must_pass_in_stack (mode, type))
+ return NULL_RTX;
+ }
+#else
+#error Unhandled ABI
+#endif
+
+ return gen_rtx_REG (mode, num_args + basereg);
+}
+
+static int
+alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ int words = 0;
+
+#if TARGET_ABI_OPEN_VMS
+ if (cum->num_args < 6
+ && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
+ words = 6 - cum->num_args;
+#elif TARGET_ABI_UNICOSMK
+ /* Never any split arguments. */
+#elif TARGET_ABI_OSF
+ if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
+ words = 6 - *cum;
+#else
+#error Unhandled ABI
+#endif
+
+ return words * UNITS_PER_WORD;
+}
+
+
+/* Return true if TYPE must be returned in memory, instead of in registers. */
+
+static bool
+alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode = VOIDmode;
+ int size;
+
+ if (type)
+ {
+ mode = TYPE_MODE (type);
+
+ /* All aggregates are returned in memory. */
+ if (AGGREGATE_TYPE_P (type))
+ return true;
+ }
+
+ size = GET_MODE_SIZE (mode);
+ switch (GET_MODE_CLASS (mode))
+ {
+ case MODE_VECTOR_FLOAT:
+ /* Pass all float vectors in memory, like an aggregate. */
+ return true;
+
+ case MODE_COMPLEX_FLOAT:
+ /* We judge complex floats on the size of their element,
+ not the size of the whole type. */
+ size = GET_MODE_UNIT_SIZE (mode);
+ break;
+
+ case MODE_INT:
+ case MODE_FLOAT:
+ case MODE_COMPLEX_INT:
+ case MODE_VECTOR_INT:
+ break;
+
+ default:
+ /* ??? We get called on all sorts of random stuff from
+ aggregate_value_p. We must return something, but it's not
+ clear what's safe to return. Pretend it's a struct I
+ guess. */
+ return true;
+ }
+
+ /* Otherwise types must fit in one register. */
+ return size > UNITS_PER_WORD;
+}
+
+/* Return true if TYPE should be passed by invisible reference. */
+
+static bool
+alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
+ enum machine_mode mode,
+ tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ return mode == TFmode || mode == TCmode;
+}
+
+/* Define how to find the value returned by a function. VALTYPE is the
+ data type of the value (as a tree). If the precise function being
+ called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
+ MODE is set instead of VALTYPE for libcalls.
+
+ On Alpha the value is found in $0 for integer functions and
+ $f0 for floating-point functions. */
+
+rtx
+function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
+{
+ unsigned int regnum, dummy;
+ enum mode_class class;
+
+ gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
+
+ if (valtype)
+ mode = TYPE_MODE (valtype);
+
+ class = GET_MODE_CLASS (mode);
+ switch (class)
+ {
+ case MODE_INT:
+ PROMOTE_MODE (mode, dummy, valtype);
+ /* FALLTHRU */
+
+ case MODE_COMPLEX_INT:
+ case MODE_VECTOR_INT:
+ regnum = 0;
+ break;
+
+ case MODE_FLOAT:
+ regnum = 32;
+ break;
+
+ case MODE_COMPLEX_FLOAT:
+ {
+ enum machine_mode cmode = GET_MODE_INNER (mode);
+
+ return gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
+ GEN_INT (GET_MODE_SIZE (cmode)))));
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return gen_rtx_REG (mode, regnum);
+}
+
+/* TCmode complex values are passed by invisible reference. We
+ should not split these values. */
+
+static bool
+alpha_split_complex_arg (tree type)
+{
+ return TYPE_MODE (type) != TCmode;
+}
+
+static tree
+alpha_build_builtin_va_list (void)
+{
+ tree base, ofs, space, record, type_decl;
+
+ if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
+ return ptr_type_node;
+
+ record = (*lang_hooks.types.make_type) (RECORD_TYPE);
+ type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
+ TREE_CHAIN (record) = type_decl;
+ TYPE_NAME (record) = type_decl;
+
+ /* C++? SET_IS_AGGR_TYPE (record, 1); */
+
+ /* Dummy field to prevent alignment warnings. */
+ space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
+ DECL_FIELD_CONTEXT (space) = record;
+ DECL_ARTIFICIAL (space) = 1;
+ DECL_IGNORED_P (space) = 1;
+
+ ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
+ integer_type_node);
+ DECL_FIELD_CONTEXT (ofs) = record;
+ TREE_CHAIN (ofs) = space;
+
+ base = build_decl (FIELD_DECL, get_identifier ("__base"),
+ ptr_type_node);
+ DECL_FIELD_CONTEXT (base) = record;
+ TREE_CHAIN (base) = ofs;
+
+ TYPE_FIELDS (record) = base;
+ layout_type (record);
+
+ va_list_gpr_counter_field = ofs;
+ return record;
+}
+
+#if TARGET_ABI_OSF
+/* Helper function for alpha_stdarg_optimize_hook. Skip over casts
+ and constant additions. */
+
+static tree
+va_list_skip_additions (tree lhs)
+{
+ tree rhs, stmt;
+
+ if (TREE_CODE (lhs) != SSA_NAME)
+ return lhs;
+
+ for (;;)
+ {
+ stmt = SSA_NAME_DEF_STMT (lhs);
+
+ if (TREE_CODE (stmt) == PHI_NODE)
+ return stmt;
+
+ if (TREE_CODE (stmt) != MODIFY_EXPR
+ || TREE_OPERAND (stmt, 0) != lhs)
+ return lhs;
+
+ rhs = TREE_OPERAND (stmt, 1);
+ if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
+ rhs = TREE_OPERAND (rhs, 0);
+
+ if ((TREE_CODE (rhs) != NOP_EXPR
+ && TREE_CODE (rhs) != CONVERT_EXPR
+ && (TREE_CODE (rhs) != PLUS_EXPR
+ || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
+ || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
+ || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
+ return rhs;
+
+ lhs = TREE_OPERAND (rhs, 0);
+ }
+}
+
+/* Check if LHS = RHS statement is
+ LHS = *(ap.__base + ap.__offset + cst)
+ or
+ LHS = *(ap.__base
+ + ((ap.__offset + cst <= 47)
+ ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
+ If the former, indicate that GPR registers are needed,
+ if the latter, indicate that FPR registers are needed.
+ On alpha, cfun->va_list_gpr_size is used as size of the needed
+ regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
+ GPR registers are needed and bit 1 set if FPR registers are needed.
+ Return true if va_list references should not be scanned for the current
+ statement. */
+
+static bool
+alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
+{
+ tree base, offset, arg1, arg2;
+ int offset_arg = 1;
+
+ if (TREE_CODE (rhs) != INDIRECT_REF
+ || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
+ return false;
+
+ lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
+ if (lhs == NULL_TREE
+ || TREE_CODE (lhs) != PLUS_EXPR)
+ return false;
+
+ base = TREE_OPERAND (lhs, 0);
+ if (TREE_CODE (base) == SSA_NAME)
+ base = va_list_skip_additions (base);
+
+ if (TREE_CODE (base) != COMPONENT_REF
+ || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
+ {
+ base = TREE_OPERAND (lhs, 0);
+ if (TREE_CODE (base) == SSA_NAME)
+ base = va_list_skip_additions (base);
+
+ if (TREE_CODE (base) != COMPONENT_REF
+ || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
+ return false;
+
+ offset_arg = 0;
+ }
+
+ base = get_base_address (base);
+ if (TREE_CODE (base) != VAR_DECL
+ || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
+ return false;
+
+ offset = TREE_OPERAND (lhs, offset_arg);
+ if (TREE_CODE (offset) == SSA_NAME)
+ offset = va_list_skip_additions (offset);
+
+ if (TREE_CODE (offset) == PHI_NODE)
+ {
+ HOST_WIDE_INT sub;
+
+ if (PHI_NUM_ARGS (offset) != 2)
+ goto escapes;
+
+ arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
+ arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
+ if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
+ {
+ tree tem = arg1;
+ arg1 = arg2;
+ arg2 = tem;
+
+ if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
+ goto escapes;
+ }
+ if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
+ goto escapes;
+
+ sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
+ if (TREE_CODE (arg2) == MINUS_EXPR)
+ sub = -sub;
+ if (sub < -48 || sub > -32)
+ goto escapes;
+
+ arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
+ if (arg1 != arg2)
+ goto escapes;
+
+ if (TREE_CODE (arg1) == SSA_NAME)
+ arg1 = va_list_skip_additions (arg1);
+
+ if (TREE_CODE (arg1) != COMPONENT_REF
+ || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
+ || get_base_address (arg1) != base)
+ goto escapes;
+
+ /* Need floating point regs. */
+ cfun->va_list_fpr_size |= 2;
+ }
+ else if (TREE_CODE (offset) != COMPONENT_REF
+ || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
+ || get_base_address (offset) != base)
+ goto escapes;
+ else
+ /* Need general regs. */
+ cfun->va_list_fpr_size |= 1;
+ return false;
+
+escapes:
+ si->va_list_escapes = true;
+ return false;
+}
+#endif
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments. */
+
+static void
+alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
+ tree type, int *pretend_size, int no_rtl)
+{
+ CUMULATIVE_ARGS cum = *pcum;
+
+ /* Skip the current argument. */
+ FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
+
+#if TARGET_ABI_UNICOSMK
+ /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
+ arguments on the stack. Unfortunately, it doesn't always store the first
+ one (i.e. the one that arrives in $16 or $f16). This is not a problem
+ with stdargs as we always have at least one named argument there. */
+ if (cum.num_reg_words < 6)
+ {
+ if (!no_rtl)
+ {
+ emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
+ emit_insn (gen_arg_home_umk ());
+ }
+ *pretend_size = 0;
+ }
+#elif TARGET_ABI_OPEN_VMS
+ /* For VMS, we allocate space for all 6 arg registers plus a count.
+
+ However, if NO registers need to be saved, don't allocate any space.
+ This is not only because we won't need the space, but because AP
+ includes the current_pretend_args_size and we don't want to mess up
+ any ap-relative addresses already made. */
+ if (cum.num_args < 6)
+ {
+ if (!no_rtl)
+ {
+ emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
+ emit_insn (gen_arg_home ());
+ }
+ *pretend_size = 7 * UNITS_PER_WORD;
+ }
+#else
+ /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
+ only push those that are remaining. However, if NO registers need to
+ be saved, don't allocate any space. This is not only because we won't
+ need the space, but because AP includes the current_pretend_args_size
+ and we don't want to mess up any ap-relative addresses already made.
+
+ If we are not to use the floating-point registers, save the integer
+ registers where we would put the floating-point registers. This is
+ not the most efficient way to implement varargs with just one register
+ class, but it isn't worth doing anything more efficient in this rare
+ case. */
+ if (cum >= 6)
+ return;
+
+ if (!no_rtl)
+ {
+ int count, set = get_varargs_alias_set ();
+ rtx tmp;
+
+ count = cfun->va_list_gpr_size / UNITS_PER_WORD;
+ if (count > 6 - cum)
+ count = 6 - cum;
+
+ /* Detect whether integer registers or floating-point registers
+ are needed by the detected va_arg statements. See above for
+ how these values are computed. Note that the "escape" value
+ is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
+ these bits set. */
+ gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
+
+ if (cfun->va_list_fpr_size & 1)
+ {
+ tmp = gen_rtx_MEM (BLKmode,
+ plus_constant (virtual_incoming_args_rtx,
+ (cum + 6) * UNITS_PER_WORD));
+ MEM_NOTRAP_P (tmp) = 1;
+ set_mem_alias_set (tmp, set);
+ move_block_from_reg (16 + cum, tmp, count);
+ }
+
+ if (cfun->va_list_fpr_size & 2)
+ {
+ tmp = gen_rtx_MEM (BLKmode,
+ plus_constant (virtual_incoming_args_rtx,
+ cum * UNITS_PER_WORD));
+ MEM_NOTRAP_P (tmp) = 1;
+ set_mem_alias_set (tmp, set);
+ move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
+ }
+ }
+ *pretend_size = 12 * UNITS_PER_WORD;
+#endif
+}
+
+void
+alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT offset;
+ tree t, offset_field, base_field;
+
+ if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
+ return;
+
+ if (TARGET_ABI_UNICOSMK)
+ std_expand_builtin_va_start (valist, nextarg);
+
+ /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
+ up by 48, storing fp arg registers in the first 48 bytes, and the
+ integer arg registers in the next 48 bytes. This is only done,
+ however, if any integer registers need to be stored.
+
+ If no integer registers need be stored, then we must subtract 48
+ in order to account for the integer arg registers which are counted
+ in argsize above, but which are not actually stored on the stack.
+ Must further be careful here about structures straddling the last
+ integer argument register; that futzes with pretend_args_size,
+ which changes the meaning of AP. */
+
+ if (NUM_ARGS < 6)
+ offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
+ else
+ offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
+
+ if (TARGET_ABI_OPEN_VMS)
+ {
+ nextarg = plus_constant (nextarg, offset);
+ nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
+ make_tree (ptr_type_node, nextarg));
+ TREE_SIDE_EFFECTS (t) = 1;
+
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+ else
+ {
+ base_field = TYPE_FIELDS (TREE_TYPE (valist));
+ offset_field = TREE_CHAIN (base_field);
+
+ base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
+ valist, base_field, NULL_TREE);
+ offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
+ valist, offset_field, NULL_TREE);
+
+ t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
+ t = build2 (PLUS_EXPR, ptr_type_node, t,
+ build_int_cst (NULL_TREE, offset));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+}
+
+static tree
+alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
+{
+ tree type_size, ptr_type, addend, t, addr, internal_post;
+
+ /* If the type could not be passed in registers, skip the block
+ reserved for the registers. */
+ if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
+ {
+ t = build_int_cst (TREE_TYPE (offset), 6*8);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (offset), offset,
+ build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
+ gimplify_and_add (t, pre_p);
+ }
+
+ addend = offset;
+ ptr_type = build_pointer_type (type);
+
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ {
+ tree real_part, imag_part, real_temp;
+
+ real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
+ offset, pre_p);
+
+ /* Copy the value into a new temporary, lest the formal temporary
+ be reused out from under us. */
+ real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
+
+ imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
+ offset, pre_p);
+
+ return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
+ }
+ else if (TREE_CODE (type) == REAL_TYPE)
+ {
+ tree fpaddend, cond, fourtyeight;
+
+ fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
+ fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
+ addend, fourtyeight);
+ cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
+ addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
+ fpaddend, addend);
+ }
+
+ /* Build the final address and force that value into a temporary. */
+ addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
+ fold_convert (ptr_type, addend));
+ internal_post = NULL;
+ gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
+ append_to_statement_list (internal_post, pre_p);
+
+ /* Update the offset field. */
+ type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
+ if (type_size == NULL || TREE_OVERFLOW (type_size))
+ t = size_zero_node;
+ else
+ {
+ t = size_binop (PLUS_EXPR, type_size, size_int (7));
+ t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
+ t = size_binop (MULT_EXPR, t, size_int (8));
+ }
+ t = fold_convert (TREE_TYPE (offset), t);
+ t = build2 (MODIFY_EXPR, void_type_node, offset,
+ build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
+ gimplify_and_add (t, pre_p);
+
+ return build_va_arg_indirect_ref (addr);
+}
+
+static tree
+alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+{
+ tree offset_field, base_field, offset, base, t, r;
+ bool indirect;
+
+ if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
+ return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
+
+ base_field = TYPE_FIELDS (va_list_type_node);
+ offset_field = TREE_CHAIN (base_field);
+ base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
+ valist, base_field, NULL_TREE);
+ offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
+ valist, offset_field, NULL_TREE);
+
+ /* Pull the fields of the structure out into temporaries. Since we never
+ modify the base field, we can use a formal temporary. Sign-extend the
+ offset field so that it's the proper width for pointer arithmetic. */
+ base = get_formal_tmp_var (base_field, pre_p);
+
+ t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
+ offset = get_initialized_tmp_var (t, pre_p, NULL);
+
+ indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
+ if (indirect)
+ type = build_pointer_type (type);
+
+ /* Find the value. Note that this will be a stable indirection, or
+ a composite of stable indirections in the case of complex. */
+ r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
+
+ /* Stuff the offset temporary back into its field. */
+ t = build2 (MODIFY_EXPR, void_type_node, offset_field,
+ fold_convert (TREE_TYPE (offset_field), offset));
+ gimplify_and_add (t, pre_p);
+
+ if (indirect)
+ r = build_va_arg_indirect_ref (r);
+
+ return r;
+}
+
+/* Builtins. */
+
+enum alpha_builtin
+{
+ ALPHA_BUILTIN_CMPBGE,
+ ALPHA_BUILTIN_EXTBL,
+ ALPHA_BUILTIN_EXTWL,
+ ALPHA_BUILTIN_EXTLL,
+ ALPHA_BUILTIN_EXTQL,
+ ALPHA_BUILTIN_EXTWH,
+ ALPHA_BUILTIN_EXTLH,
+ ALPHA_BUILTIN_EXTQH,
+ ALPHA_BUILTIN_INSBL,
+ ALPHA_BUILTIN_INSWL,
+ ALPHA_BUILTIN_INSLL,
+ ALPHA_BUILTIN_INSQL,
+ ALPHA_BUILTIN_INSWH,
+ ALPHA_BUILTIN_INSLH,
+ ALPHA_BUILTIN_INSQH,
+ ALPHA_BUILTIN_MSKBL,
+ ALPHA_BUILTIN_MSKWL,
+ ALPHA_BUILTIN_MSKLL,
+ ALPHA_BUILTIN_MSKQL,
+ ALPHA_BUILTIN_MSKWH,
+ ALPHA_BUILTIN_MSKLH,
+ ALPHA_BUILTIN_MSKQH,
+ ALPHA_BUILTIN_UMULH,
+ ALPHA_BUILTIN_ZAP,
+ ALPHA_BUILTIN_ZAPNOT,
+ ALPHA_BUILTIN_AMASK,
+ ALPHA_BUILTIN_IMPLVER,
+ ALPHA_BUILTIN_RPCC,
+ ALPHA_BUILTIN_THREAD_POINTER,
+ ALPHA_BUILTIN_SET_THREAD_POINTER,
+
+ /* TARGET_MAX */
+ ALPHA_BUILTIN_MINUB8,
+ ALPHA_BUILTIN_MINSB8,
+ ALPHA_BUILTIN_MINUW4,
+ ALPHA_BUILTIN_MINSW4,
+ ALPHA_BUILTIN_MAXUB8,
+ ALPHA_BUILTIN_MAXSB8,
+ ALPHA_BUILTIN_MAXUW4,
+ ALPHA_BUILTIN_MAXSW4,
+ ALPHA_BUILTIN_PERR,
+ ALPHA_BUILTIN_PKLB,
+ ALPHA_BUILTIN_PKWB,
+ ALPHA_BUILTIN_UNPKBL,
+ ALPHA_BUILTIN_UNPKBW,
+
+ /* TARGET_CIX */
+ ALPHA_BUILTIN_CTTZ,
+ ALPHA_BUILTIN_CTLZ,
+ ALPHA_BUILTIN_CTPOP,
+
+ ALPHA_BUILTIN_max
+};
+
+static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
+ CODE_FOR_builtin_cmpbge,
+ CODE_FOR_builtin_extbl,
+ CODE_FOR_builtin_extwl,
+ CODE_FOR_builtin_extll,
+ CODE_FOR_builtin_extql,
+ CODE_FOR_builtin_extwh,
+ CODE_FOR_builtin_extlh,
+ CODE_FOR_builtin_extqh,
+ CODE_FOR_builtin_insbl,
+ CODE_FOR_builtin_inswl,
+ CODE_FOR_builtin_insll,
+ CODE_FOR_builtin_insql,
+ CODE_FOR_builtin_inswh,
+ CODE_FOR_builtin_inslh,
+ CODE_FOR_builtin_insqh,
+ CODE_FOR_builtin_mskbl,
+ CODE_FOR_builtin_mskwl,
+ CODE_FOR_builtin_mskll,
+ CODE_FOR_builtin_mskql,
+ CODE_FOR_builtin_mskwh,
+ CODE_FOR_builtin_msklh,
+ CODE_FOR_builtin_mskqh,
+ CODE_FOR_umuldi3_highpart,
+ CODE_FOR_builtin_zap,
+ CODE_FOR_builtin_zapnot,
+ CODE_FOR_builtin_amask,
+ CODE_FOR_builtin_implver,
+ CODE_FOR_builtin_rpcc,
+ CODE_FOR_load_tp,
+ CODE_FOR_set_tp,
+
+ /* TARGET_MAX */
+ CODE_FOR_builtin_minub8,
+ CODE_FOR_builtin_minsb8,
+ CODE_FOR_builtin_minuw4,
+ CODE_FOR_builtin_minsw4,
+ CODE_FOR_builtin_maxub8,
+ CODE_FOR_builtin_maxsb8,
+ CODE_FOR_builtin_maxuw4,
+ CODE_FOR_builtin_maxsw4,
+ CODE_FOR_builtin_perr,
+ CODE_FOR_builtin_pklb,
+ CODE_FOR_builtin_pkwb,
+ CODE_FOR_builtin_unpkbl,
+ CODE_FOR_builtin_unpkbw,
+
+ /* TARGET_CIX */
+ CODE_FOR_ctzdi2,
+ CODE_FOR_clzdi2,
+ CODE_FOR_popcountdi2
+};
+
+struct alpha_builtin_def
+{
+ const char *name;
+ enum alpha_builtin code;
+ unsigned int target_mask;
+ bool is_const;
+};
+
+static struct alpha_builtin_def const zero_arg_builtins[] = {
+ { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
+ { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
+};
+
+static struct alpha_builtin_def const one_arg_builtins[] = {
+ { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
+ { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
+ { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
+ { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
+ { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
+ { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
+ { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
+ { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
+};
+
+static struct alpha_builtin_def const two_arg_builtins[] = {
+ { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
+ { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
+ { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
+ { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
+ { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
+ { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
+ { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
+ { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
+ { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
+ { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
+ { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
+ { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
+ { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
+ { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
+ { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
+ { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
+ { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
+ { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
+ { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
+ { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
+ { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
+ { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
+ { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
+ { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
+ { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
+ { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
+ { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
+ { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
+ { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
+ { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
+ { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
+ { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
+ { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
+ { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
+};
+
+static GTY(()) tree alpha_v8qi_u;
+static GTY(()) tree alpha_v8qi_s;
+static GTY(()) tree alpha_v4hi_u;
+static GTY(()) tree alpha_v4hi_s;
+
+static void
+alpha_init_builtins (void)
+{
+ const struct alpha_builtin_def *p;
+ tree dimode_integer_type_node;
+ tree ftype, attrs[2];
+ size_t i;
+
+ dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
+
+ attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
+ attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
+
+ ftype = build_function_type (dimode_integer_type_node, void_list_node);
+
+ p = zero_arg_builtins;
+ for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
+ if ((target_flags & p->target_mask) == p->target_mask)
+ lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
+ NULL, attrs[p->is_const]);
+
+ ftype = build_function_type_list (dimode_integer_type_node,
+ dimode_integer_type_node, NULL_TREE);
+
+ p = one_arg_builtins;
+ for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
+ if ((target_flags & p->target_mask) == p->target_mask)
+ lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
+ NULL, attrs[p->is_const]);
+
+ ftype = build_function_type_list (dimode_integer_type_node,
+ dimode_integer_type_node,
+ dimode_integer_type_node, NULL_TREE);
+
+ p = two_arg_builtins;
+ for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
+ if ((target_flags & p->target_mask) == p->target_mask)
+ lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
+ NULL, attrs[p->is_const]);
+
+ ftype = build_function_type (ptr_type_node, void_list_node);
+ lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
+ ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
+ NULL, attrs[0]);
+
+ ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
+ lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
+ ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
+ NULL, attrs[0]);
+
+ alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
+ alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
+ alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
+ alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+static rtx
+alpha_expand_builtin (tree exp, rtx target,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+#define MAX_ARGS 2
+
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree arglist = TREE_OPERAND (exp, 1);
+ enum insn_code icode;
+ rtx op[MAX_ARGS], pat;
+ int arity;
+ bool nonvoid;
+
+ if (fcode >= ALPHA_BUILTIN_max)
+ internal_error ("bad builtin fcode");
+ icode = code_for_builtin[fcode];
+ if (icode == 0)
+ internal_error ("bad builtin fcode");
+
+ nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
+
+ for (arglist = TREE_OPERAND (exp, 1), arity = 0;
+ arglist;
+ arglist = TREE_CHAIN (arglist), arity++)
+ {
+ const struct insn_operand_data *insn_op;
+
+ tree arg = TREE_VALUE (arglist);
+ if (arg == error_mark_node)
+ return NULL_RTX;
+ if (arity > MAX_ARGS)
+ return NULL_RTX;
+
+ insn_op = &insn_data[icode].operand[arity + nonvoid];
+
+ op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
+
+ if (!(*insn_op->predicate) (op[arity], insn_op->mode))
+ op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
+ }
+
+ if (nonvoid)
+ {
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ if (!target
+ || GET_MODE (target) != tmode
+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ }
+
+ switch (arity)
+ {
+ case 0:
+ pat = GEN_FCN (icode) (target);
+ break;
+ case 1:
+ if (nonvoid)
+ pat = GEN_FCN (icode) (target, op[0]);
+ else
+ pat = GEN_FCN (icode) (op[0]);
+ break;
+ case 2:
+ pat = GEN_FCN (icode) (target, op[0], op[1]);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (!pat)
+ return NULL_RTX;
+ emit_insn (pat);
+
+ if (nonvoid)
+ return target;
+ else
+ return const0_rtx;
+}
+
+
+/* Several bits below assume HWI >= 64 bits. This should be enforced
+ by config.gcc. */
+#if HOST_BITS_PER_WIDE_INT < 64
+# error "HOST_WIDE_INT too small"
+#endif
+
+/* Fold the builtin for the CMPBGE instruction. This is a vector comparison
+ with an 8 bit output vector. OPINT contains the integer operands; bit N
+ of OP_CONST is set if OPINT[N] is valid. */
+
+static tree
+alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
+{
+ if (op_const == 3)
+ {
+ int i, val;
+ for (i = 0, val = 0; i < 8; ++i)
+ {
+ unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
+ unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
+ if (c0 >= c1)
+ val |= 1 << i;
+ }
+ return build_int_cst (long_integer_type_node, val);
+ }
+ else if (op_const == 2 && opint[1] == 0)
+ return build_int_cst (long_integer_type_node, 0xff);
+ return NULL;
+}
+
+/* Fold the builtin for the ZAPNOT instruction. This is essentially a
+ specialized form of an AND operation. Other byte manipulation instructions
+ are defined in terms of this instruction, so this is also used as a
+ subroutine for other builtins.
+
+ OP contains the tree operands; OPINT contains the extracted integer values.
+ Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
+ OPINT may be considered. */
+
+static tree
+alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
+ long op_const)
+{
+ if (op_const & 2)
+ {
+ unsigned HOST_WIDE_INT mask = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i)
+ if ((opint[1] >> i) & 1)
+ mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
+
+ if (op_const & 1)
+ return build_int_cst (long_integer_type_node, opint[0] & mask);
+
+ if (op)
+ return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
+ build_int_cst (long_integer_type_node, mask)));
+ }
+ else if ((op_const & 1) && opint[0] == 0)
+ return build_int_cst (long_integer_type_node, 0);
+ return NULL;
+}
+
+/* Fold the builtins for the EXT family of instructions. */
+
+static tree
+alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
+ long op_const, unsigned HOST_WIDE_INT bytemask,
+ bool is_high)
+{
+ long zap_const = 2;
+ tree *zap_op = NULL;
+
+ if (op_const & 2)
+ {
+ unsigned HOST_WIDE_INT loc;
+
+ loc = opint[1] & 7;
+ if (BYTES_BIG_ENDIAN)
+ loc ^= 7;
+ loc *= 8;
+
+ if (loc != 0)
+ {
+ if (op_const & 1)
+ {
+ unsigned HOST_WIDE_INT temp = opint[0];
+ if (is_high)
+ temp <<= loc;
+ else
+ temp >>= loc;
+ opint[0] = temp;
+ zap_const = 3;
+ }
+ }
+ else
+ zap_op = op;
+ }
+
+ opint[1] = bytemask;
+ return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
+}
+
+/* Fold the builtins for the INS family of instructions. */
+
+static tree
+alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
+ long op_const, unsigned HOST_WIDE_INT bytemask,
+ bool is_high)
+{
+ if ((op_const & 1) && opint[0] == 0)
+ return build_int_cst (long_integer_type_node, 0);
+
+ if (op_const & 2)
+ {
+ unsigned HOST_WIDE_INT temp, loc, byteloc;
+ tree *zap_op = NULL;
+
+ loc = opint[1] & 7;
+ if (BYTES_BIG_ENDIAN)
+ loc ^= 7;
+ bytemask <<= loc;
+
+ temp = opint[0];
+ if (is_high)
+ {
+ byteloc = (64 - (loc * 8)) & 0x3f;
+ if (byteloc == 0)
+ zap_op = op;
+ else
+ temp >>= byteloc;
+ bytemask >>= 8;
+ }
+ else
+ {
+ byteloc = loc * 8;
+ if (byteloc == 0)
+ zap_op = op;
+ else
+ temp <<= byteloc;
+ }
+
+ opint[0] = temp;
+ opint[1] = bytemask;
+ return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
+ }
+
+ return NULL;
+}
+
+static tree
+alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
+ long op_const, unsigned HOST_WIDE_INT bytemask,
+ bool is_high)
+{
+ if (op_const & 2)
+ {
+ unsigned HOST_WIDE_INT loc;
+
+ loc = opint[1] & 7;
+ if (BYTES_BIG_ENDIAN)
+ loc ^= 7;
+ bytemask <<= loc;
+
+ if (is_high)
+ bytemask >>= 8;
+
+ opint[1] = bytemask ^ 0xff;
+ }
+
+ return alpha_fold_builtin_zapnot (op, opint, op_const);
+}
+
+static tree
+alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
+{
+ switch (op_const)
+ {
+ case 3:
+ {
+ unsigned HOST_WIDE_INT l;
+ HOST_WIDE_INT h;
+
+ mul_double (opint[0], 0, opint[1], 0, &l, &h);
+
+#if HOST_BITS_PER_WIDE_INT > 64
+# error fixme
+#endif
+
+ return build_int_cst (long_integer_type_node, h);
+ }
+
+ case 1:
+ opint[1] = opint[0];
+ /* FALLTHRU */
+ case 2:
+ /* Note that (X*1) >> 64 == 0. */
+ if (opint[1] == 0 || opint[1] == 1)
+ return build_int_cst (long_integer_type_node, 0);
+ break;
+ }
+ return NULL;
+}
+
+static tree
+alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
+{
+ tree op0 = fold_convert (vtype, op[0]);
+ tree op1 = fold_convert (vtype, op[1]);
+ tree val = fold (build2 (code, vtype, op0, op1));
+ return fold_convert (long_integer_type_node, val);
+}
+
+static tree
+alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
+{
+ unsigned HOST_WIDE_INT temp = 0;
+ int i;
+
+ if (op_const != 3)
+ return NULL;
+
+ for (i = 0; i < 8; ++i)
+ {
+ unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
+ unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
+ if (a >= b)
+ temp += a - b;
+ else
+ temp += b - a;
+ }
+
+ return build_int_cst (long_integer_type_node, temp);
+}
+
+static tree
+alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
+{
+ unsigned HOST_WIDE_INT temp;
+
+ if (op_const == 0)
+ return NULL;
+
+ temp = opint[0] & 0xff;
+ temp |= (opint[0] >> 24) & 0xff00;
+
+ return build_int_cst (long_integer_type_node, temp);
+}
+
+static tree
+alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
+{
+ unsigned HOST_WIDE_INT temp;
+
+ if (op_const == 0)
+ return NULL;
+
+ temp = opint[0] & 0xff;
+ temp |= (opint[0] >> 8) & 0xff00;
+ temp |= (opint[0] >> 16) & 0xff0000;
+ temp |= (opint[0] >> 24) & 0xff000000;
+
+ return build_int_cst (long_integer_type_node, temp);
+}
+
+static tree
+alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
+{
+ unsigned HOST_WIDE_INT temp;
+
+ if (op_const == 0)
+ return NULL;
+
+ temp = opint[0] & 0xff;
+ temp |= (opint[0] & 0xff00) << 24;
+
+ return build_int_cst (long_integer_type_node, temp);
+}
+
+static tree
+alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
+{
+ unsigned HOST_WIDE_INT temp;
+
+ if (op_const == 0)
+ return NULL;
+
+ temp = opint[0] & 0xff;
+ temp |= (opint[0] & 0x0000ff00) << 8;
+ temp |= (opint[0] & 0x00ff0000) << 16;
+ temp |= (opint[0] & 0xff000000) << 24;
+
+ return build_int_cst (long_integer_type_node, temp);
+}
+
+static tree
+alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
+{
+ unsigned HOST_WIDE_INT temp;
+
+ if (op_const == 0)
+ return NULL;
+
+ if (opint[0] == 0)
+ temp = 64;
+ else
+ temp = exact_log2 (opint[0] & -opint[0]);
+
+ return build_int_cst (long_integer_type_node, temp);
+}
+
+static tree
+alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
+{
+ unsigned HOST_WIDE_INT temp;
+
+ if (op_const == 0)
+ return NULL;
+
+ if (opint[0] == 0)
+ temp = 64;
+ else
+ temp = 64 - floor_log2 (opint[0]) - 1;
+
+ return build_int_cst (long_integer_type_node, temp);
+}
+
+static tree
+alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
+{
+ unsigned HOST_WIDE_INT temp, op;
+
+ if (op_const == 0)
+ return NULL;
+
+ op = opint[0];
+ temp = 0;
+ while (op)
+ temp++, op &= op - 1;
+
+ return build_int_cst (long_integer_type_node, temp);
+}
+
+/* Fold one of our builtin functions. */
+
+static tree
+alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
+{
+ tree op[MAX_ARGS], t;
+ unsigned HOST_WIDE_INT opint[MAX_ARGS];
+ long op_const = 0, arity = 0;
+
+ for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
+ {
+ tree arg = TREE_VALUE (t);
+ if (arg == error_mark_node)
+ return NULL;
+ if (arity >= MAX_ARGS)
+ return NULL;
+
+ op[arity] = arg;
+ opint[arity] = 0;
+ if (TREE_CODE (arg) == INTEGER_CST)
+ {
+ op_const |= 1L << arity;
+ opint[arity] = int_cst_value (arg);
+ }
+ }
+
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case ALPHA_BUILTIN_CMPBGE:
+ return alpha_fold_builtin_cmpbge (opint, op_const);
+
+ case ALPHA_BUILTIN_EXTBL:
+ return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
+ case ALPHA_BUILTIN_EXTWL:
+ return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
+ case ALPHA_BUILTIN_EXTLL:
+ return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
+ case ALPHA_BUILTIN_EXTQL:
+ return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
+ case ALPHA_BUILTIN_EXTWH:
+ return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
+ case ALPHA_BUILTIN_EXTLH:
+ return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
+ case ALPHA_BUILTIN_EXTQH:
+ return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
+
+ case ALPHA_BUILTIN_INSBL:
+ return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
+ case ALPHA_BUILTIN_INSWL:
+ return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
+ case ALPHA_BUILTIN_INSLL:
+ return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
+ case ALPHA_BUILTIN_INSQL:
+ return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
+ case ALPHA_BUILTIN_INSWH:
+ return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
+ case ALPHA_BUILTIN_INSLH:
+ return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
+ case ALPHA_BUILTIN_INSQH:
+ return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
+
+ case ALPHA_BUILTIN_MSKBL:
+ return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
+ case ALPHA_BUILTIN_MSKWL:
+ return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
+ case ALPHA_BUILTIN_MSKLL:
+ return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
+ case ALPHA_BUILTIN_MSKQL:
+ return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
+ case ALPHA_BUILTIN_MSKWH:
+ return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
+ case ALPHA_BUILTIN_MSKLH:
+ return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
+ case ALPHA_BUILTIN_MSKQH:
+ return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
+
+ case ALPHA_BUILTIN_UMULH:
+ return alpha_fold_builtin_umulh (opint, op_const);
+
+ case ALPHA_BUILTIN_ZAP:
+ opint[1] ^= 0xff;
+ /* FALLTHRU */
+ case ALPHA_BUILTIN_ZAPNOT:
+ return alpha_fold_builtin_zapnot (op, opint, op_const);
+
+ case ALPHA_BUILTIN_MINUB8:
+ return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
+ case ALPHA_BUILTIN_MINSB8:
+ return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
+ case ALPHA_BUILTIN_MINUW4:
+ return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
+ case ALPHA_BUILTIN_MINSW4:
+ return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
+ case ALPHA_BUILTIN_MAXUB8:
+ return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
+ case ALPHA_BUILTIN_MAXSB8:
+ return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
+ case ALPHA_BUILTIN_MAXUW4:
+ return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
+ case ALPHA_BUILTIN_MAXSW4:
+ return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
+
+ case ALPHA_BUILTIN_PERR:
+ return alpha_fold_builtin_perr (opint, op_const);
+ case ALPHA_BUILTIN_PKLB:
+ return alpha_fold_builtin_pklb (opint, op_const);
+ case ALPHA_BUILTIN_PKWB:
+ return alpha_fold_builtin_pkwb (opint, op_const);
+ case ALPHA_BUILTIN_UNPKBL:
+ return alpha_fold_builtin_unpkbl (opint, op_const);
+ case ALPHA_BUILTIN_UNPKBW:
+ return alpha_fold_builtin_unpkbw (opint, op_const);
+
+ case ALPHA_BUILTIN_CTTZ:
+ return alpha_fold_builtin_cttz (opint, op_const);
+ case ALPHA_BUILTIN_CTLZ:
+ return alpha_fold_builtin_ctlz (opint, op_const);
+ case ALPHA_BUILTIN_CTPOP:
+ return alpha_fold_builtin_ctpop (opint, op_const);
+
+ case ALPHA_BUILTIN_AMASK:
+ case ALPHA_BUILTIN_IMPLVER:
+ case ALPHA_BUILTIN_RPCC:
+ case ALPHA_BUILTIN_THREAD_POINTER:
+ case ALPHA_BUILTIN_SET_THREAD_POINTER:
+ /* None of these are foldable at compile-time. */
+ default:
+ return NULL;
+ }
+}
+
+/* This page contains routines that are used to determine what the function
+ prologue and epilogue code will do and write them out. */
+
+/* Compute the size of the save area in the stack. */
+
+/* These variables are used for communication between the following functions.
+ They indicate various things about the current function being compiled
+ that are used to tell what kind of prologue, epilogue and procedure
+ descriptor to generate. */
+
+/* Nonzero if we need a stack procedure. */
+enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
+static enum alpha_procedure_types alpha_procedure_type;
+
+/* Register number (either FP or SP) that is used to unwind the frame. */
+static int vms_unwind_regno;
+
+/* Register number used to save FP. We need not have one for RA since
+ we don't modify it for register procedures. This is only defined
+ for register frame procedures. */
+static int vms_save_fp_regno;
+
+/* Register number used to reference objects off our PV. */
+static int vms_base_regno;
+
+/* Compute register masks for saved registers. */
+
+static void
+alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
+{
+ unsigned long imask = 0;
+ unsigned long fmask = 0;
+ unsigned int i;
+
+ /* When outputting a thunk, we don't have valid register life info,
+ but assemble_start_function wants to output .frame and .mask
+ directives. */
+ if (current_function_is_thunk)
+ {
+ *imaskP = 0;
+ *fmaskP = 0;
+ return;
+ }
+
+ if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
+ imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
+
+ /* One for every register we have to save. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (! fixed_regs[i] && ! call_used_regs[i]
+ && regs_ever_live[i] && i != REG_RA
+ && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
+ {
+ if (i < 32)
+ imask |= (1UL << i);
+ else
+ fmask |= (1UL << (i - 32));
+ }
+
+ /* We need to restore these for the handler. */
+ if (current_function_calls_eh_return)
+ {
+ for (i = 0; ; ++i)
+ {
+ unsigned regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+ imask |= 1UL << regno;
+ }
+ }
+
+ /* If any register spilled, then spill the return address also. */
+ /* ??? This is required by the Digital stack unwind specification
+ and isn't needed if we're doing Dwarf2 unwinding. */
+ if (imask || fmask || alpha_ra_ever_killed ())
+ imask |= (1UL << REG_RA);
+
+ *imaskP = imask;
+ *fmaskP = fmask;
+}
+
+int
+alpha_sa_size (void)
+{
+ unsigned long mask[2];
+ int sa_size = 0;
+ int i, j;
+
+ alpha_sa_mask (&mask[0], &mask[1]);
+
+ if (TARGET_ABI_UNICOSMK)
+ {
+ if (mask[0] || mask[1])
+ sa_size = 14;
+ }
+ else
+ {
+ for (j = 0; j < 2; ++j)
+ for (i = 0; i < 32; ++i)
+ if ((mask[j] >> i) & 1)
+ sa_size++;
+ }
+
+ if (TARGET_ABI_UNICOSMK)
+ {
+ /* We might not need to generate a frame if we don't make any calls
+ (including calls to __T3E_MISMATCH if this is a vararg function),
+ don't have any local variables which require stack slots, don't
+ use alloca and have not determined that we need a frame for other
+ reasons. */
+
+ alpha_procedure_type
+ = (sa_size || get_frame_size() != 0
+ || current_function_outgoing_args_size
+ || current_function_stdarg || current_function_calls_alloca
+ || frame_pointer_needed)
+ ? PT_STACK : PT_REGISTER;
+
+ /* Always reserve space for saving callee-saved registers if we
+ need a frame as required by the calling convention. */
+ if (alpha_procedure_type == PT_STACK)
+ sa_size = 14;
+ }
+ else if (TARGET_ABI_OPEN_VMS)
+ {
+ /* Start by assuming we can use a register procedure if we don't
+ make any calls (REG_RA not used) or need to save any
+ registers and a stack procedure if we do. */
+ if ((mask[0] >> REG_RA) & 1)
+ alpha_procedure_type = PT_STACK;
+ else if (get_frame_size() != 0)
+ alpha_procedure_type = PT_REGISTER;
+ else
+ alpha_procedure_type = PT_NULL;
+
+ /* Don't reserve space for saving FP & RA yet. Do that later after we've
+ made the final decision on stack procedure vs register procedure. */
+ if (alpha_procedure_type == PT_STACK)
+ sa_size -= 2;
+
+ /* Decide whether to refer to objects off our PV via FP or PV.
+ If we need FP for something else or if we receive a nonlocal
+ goto (which expects PV to contain the value), we must use PV.
+ Otherwise, start by assuming we can use FP. */
+
+ vms_base_regno
+ = (frame_pointer_needed
+ || current_function_has_nonlocal_label
+ || alpha_procedure_type == PT_STACK
+ || current_function_outgoing_args_size)
+ ? REG_PV : HARD_FRAME_POINTER_REGNUM;
+
+ /* If we want to copy PV into FP, we need to find some register
+ in which to save FP. */
+
+ vms_save_fp_regno = -1;
+ if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
+ for (i = 0; i < 32; i++)
+ if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
+ vms_save_fp_regno = i;
+
+ if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
+ vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
+ else if (alpha_procedure_type == PT_NULL)
+ vms_base_regno = REG_PV;
+
+ /* Stack unwinding should be done via FP unless we use it for PV. */
+ vms_unwind_regno = (vms_base_regno == REG_PV
+ ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
+
+ /* If this is a stack procedure, allow space for saving FP and RA. */
+ if (alpha_procedure_type == PT_STACK)
+ sa_size += 2;
+ }
+ else
+ {
+ /* Our size must be even (multiple of 16 bytes). */
+ if (sa_size & 1)
+ sa_size++;
+ }
+
+ return sa_size * 8;
+}
+
+/* Define the offset between two registers, one to be eliminated,
+ and the other its replacement, at the start of a routine. */
+
+HOST_WIDE_INT
+alpha_initial_elimination_offset (unsigned int from,
+ unsigned int to ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT ret;
+
+ ret = alpha_sa_size ();
+ ret += ALPHA_ROUND (current_function_outgoing_args_size);
+
+ switch (from)
+ {
+ case FRAME_POINTER_REGNUM:
+ break;
+
+ case ARG_POINTER_REGNUM:
+ ret += (ALPHA_ROUND (get_frame_size ()
+ + current_function_pretend_args_size)
+ - current_function_pretend_args_size);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return ret;
+}
+
+int
+alpha_pv_save_size (void)
+{
+ alpha_sa_size ();
+ return alpha_procedure_type == PT_STACK ? 8 : 0;
+}
+
+int
+alpha_using_fp (void)
+{
+ alpha_sa_size ();
+ return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
+}
+
+#if TARGET_ABI_OPEN_VMS
+
+const struct attribute_spec vms_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "overlaid", 0, 0, true, false, false, NULL },
+ { "global", 0, 0, true, false, false, NULL },
+ { "initialize", 0, 0, true, false, false, NULL },
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+#endif
+
+static int
+find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
+{
+ return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
+}
+
+int
+alpha_find_lo_sum_using_gp (rtx insn)
+{
+ return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
+}
+
+static int
+alpha_does_function_need_gp (void)
+{
+ rtx insn;
+
+ /* The GP being variable is an OSF abi thing. */
+ if (! TARGET_ABI_OSF)
+ return 0;
+
+ /* We need the gp to load the address of __mcount. */
+ if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
+ return 1;
+
+ /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
+ if (current_function_is_thunk)
+ return 1;
+
+ /* The nonlocal receiver pattern assumes that the gp is valid for
+ the nested function. Reasonable because it's almost always set
+ correctly already. For the cases where that's wrong, make sure
+ the nested function loads its gp on entry. */
+ if (current_function_has_nonlocal_goto)
+ return 1;
+
+ /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
+ Even if we are a static function, we still need to do this in case
+ our address is taken and passed to something like qsort. */
+
+ push_topmost_sequence ();
+ insn = get_insns ();
+ pop_topmost_sequence ();
+
+ for (; insn; insn = NEXT_INSN (insn))
+ if (INSN_P (insn)
+ && ! JUMP_TABLE_DATA_P (insn)
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER
+ && get_attr_usegp (insn))
+ return 1;
+
+ return 0;
+}
+
+
+/* Helper function to set RTX_FRAME_RELATED_P on instructions, including
+ sequences. */
+
+static rtx
+set_frame_related_p (void)
+{
+ rtx seq = get_insns ();
+ rtx insn;
+
+ end_sequence ();
+
+ if (!seq)
+ return NULL_RTX;
+
+ if (INSN_P (seq))
+ {
+ insn = seq;
+ while (insn != NULL_RTX)
+ {
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = NEXT_INSN (insn);
+ }
+ seq = emit_insn (seq);
+ }
+ else
+ {
+ seq = emit_insn (seq);
+ RTX_FRAME_RELATED_P (seq) = 1;
+ }
+ return seq;
+}
+
+#define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
+
+/* Generates a store with the proper unwind info attached. VALUE is
+ stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
+ contains SP+FRAME_BIAS, and that is the unwind info that should be
+ generated. If FRAME_REG != VALUE, then VALUE is being stored on
+ behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
+
+static void
+emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
+ HOST_WIDE_INT base_ofs, rtx frame_reg)
+{
+ rtx addr, mem, insn;
+
+ addr = plus_constant (base_reg, base_ofs);
+ mem = gen_rtx_MEM (DImode, addr);
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+
+ insn = emit_move_insn (mem, value);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ if (frame_bias || value != frame_reg)
+ {
+ if (frame_bias)
+ {
+ addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
+ mem = gen_rtx_MEM (DImode, addr);
+ }
+
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, mem, frame_reg),
+ REG_NOTES (insn));
+ }
+}
+
+static void
+emit_frame_store (unsigned int regno, rtx base_reg,
+ HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
+{
+ rtx reg = gen_rtx_REG (DImode, regno);
+ emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
+}
+
+/* Write function prologue. */
+
+/* On vms we have two kinds of functions:
+
+ - stack frame (PROC_STACK)
+ these are 'normal' functions with local vars and which are
+ calling other functions
+ - register frame (PROC_REGISTER)
+ keeps all data in registers, needs no stack
+
+ We must pass this to the assembler so it can generate the
+ proper pdsc (procedure descriptor)
+ This is done with the '.pdesc' command.
+
+ On not-vms, we don't really differentiate between the two, as we can
+ simply allocate stack without saving registers. */
+
+void
+alpha_expand_prologue (void)
+{
+ /* Registers to save. */
+ unsigned long imask = 0;
+ unsigned long fmask = 0;
+ /* Stack space needed for pushing registers clobbered by us. */
+ HOST_WIDE_INT sa_size;
+ /* Complete stack size needed. */
+ HOST_WIDE_INT frame_size;
+ /* Offset from base reg to register save area. */
+ HOST_WIDE_INT reg_offset;
+ rtx sa_reg;
+ int i;
+
+ sa_size = alpha_sa_size ();
+
+ frame_size = get_frame_size ();
+ if (TARGET_ABI_OPEN_VMS)
+ frame_size = ALPHA_ROUND (sa_size
+ + (alpha_procedure_type == PT_STACK ? 8 : 0)
+ + frame_size
+ + current_function_pretend_args_size);
+ else if (TARGET_ABI_UNICOSMK)
+ /* We have to allocate space for the DSIB if we generate a frame. */
+ frame_size = ALPHA_ROUND (sa_size
+ + (alpha_procedure_type == PT_STACK ? 48 : 0))
+ + ALPHA_ROUND (frame_size
+ + current_function_outgoing_args_size);
+ else
+ frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
+ + sa_size
+ + ALPHA_ROUND (frame_size
+ + current_function_pretend_args_size));
+
+ if (TARGET_ABI_OPEN_VMS)
+ reg_offset = 8;
+ else
+ reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
+
+ alpha_sa_mask (&imask, &fmask);
+
+ /* Emit an insn to reload GP, if needed. */
+ if (TARGET_ABI_OSF)
+ {
+ alpha_function_needs_gp = alpha_does_function_need_gp ();
+ if (alpha_function_needs_gp)
+ emit_insn (gen_prologue_ldgp ());
+ }
+
+ /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
+ the call to mcount ourselves, rather than having the linker do it
+ magically in response to -pg. Since _mcount has special linkage,
+ don't represent the call as a call. */
+ if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
+ emit_insn (gen_prologue_mcount ());
+
+ if (TARGET_ABI_UNICOSMK)
+ unicosmk_gen_dsib (&imask);
+
+ /* Adjust the stack by the frame size. If the frame size is > 4096
+ bytes, we need to be sure we probe somewhere in the first and last
+ 4096 bytes (we can probably get away without the latter test) and
+ every 8192 bytes in between. If the frame size is > 32768, we
+ do this in a loop. Otherwise, we generate the explicit probe
+ instructions.
+
+ Note that we are only allowed to adjust sp once in the prologue. */
+
+ if (frame_size <= 32768)
+ {
+ if (frame_size > 4096)
+ {
+ int probed;
+
+ for (probed = 4096; probed < frame_size; probed += 8192)
+ emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
+ ? -probed + 64
+ : -probed)));
+
+ /* We only have to do this probe if we aren't saving registers. */
+ if (sa_size == 0 && frame_size > probed - 4096)
+ emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
+ }
+
+ if (frame_size != 0)
+ FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (TARGET_ABI_UNICOSMK
+ ? -frame_size + 64
+ : -frame_size))));
+ }
+ else
+ {
+ /* Here we generate code to set R22 to SP + 4096 and set R23 to the
+ number of 8192 byte blocks to probe. We then probe each block
+ in the loop and then set SP to the proper location. If the
+ amount remaining is > 4096, we have to do one more probe if we
+ are not saving any registers. */
+
+ HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
+ HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
+ rtx ptr = gen_rtx_REG (DImode, 22);
+ rtx count = gen_rtx_REG (DImode, 23);
+ rtx seq;
+
+ emit_move_insn (count, GEN_INT (blocks));
+ emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
+ GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
+
+ /* Because of the difficulty in emitting a new basic block this
+ late in the compilation, generate the loop as a single insn. */
+ emit_insn (gen_prologue_stack_probe_loop (count, ptr));
+
+ if (leftover > 4096 && sa_size == 0)
+ {
+ rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
+ MEM_VOLATILE_P (last) = 1;
+ emit_move_insn (last, const0_rtx);
+ }
+
+ if (TARGET_ABI_WINDOWS_NT)
+ {
+ /* For NT stack unwind (done by 'reverse execution'), it's
+ not OK to take the result of a loop, even though the value
+ is already in ptr, so we reload it via a single operation
+ and subtract it to sp.
+
+ Yes, that's correct -- we have to reload the whole constant
+ into a temporary via ldah+lda then subtract from sp. */
+
+ HOST_WIDE_INT lo, hi;
+ lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
+ hi = frame_size - lo;
+
+ emit_move_insn (ptr, GEN_INT (hi));
+ emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
+ seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
+ ptr));
+ }
+ else
+ {
+ seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
+ GEN_INT (-leftover)));
+ }
+
+ /* This alternative is special, because the DWARF code cannot
+ possibly intuit through the loop above. So we invent this
+ note it looks at instead. */
+ RTX_FRAME_RELATED_P (seq) = 1;
+ REG_NOTES (seq)
+ = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (TARGET_ABI_UNICOSMK
+ ? -frame_size + 64
+ : -frame_size))),
+ REG_NOTES (seq));
+ }
+
+ if (!TARGET_ABI_UNICOSMK)
+ {
+ HOST_WIDE_INT sa_bias = 0;
+
+ /* Cope with very large offsets to the register save area. */
+ sa_reg = stack_pointer_rtx;
+ if (reg_offset + sa_size > 0x8000)
+ {
+ int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
+ rtx sa_bias_rtx;
+
+ if (low + sa_size <= 0x8000)
+ sa_bias = reg_offset - low, reg_offset = low;
+ else
+ sa_bias = reg_offset, reg_offset = 0;
+
+ sa_reg = gen_rtx_REG (DImode, 24);
+ sa_bias_rtx = GEN_INT (sa_bias);
+
+ if (add_operand (sa_bias_rtx, DImode))
+ emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
+ else
+ {
+ emit_move_insn (sa_reg, sa_bias_rtx);
+ emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
+ }
+ }
+
+ /* Save regs in stack order. Beginning with VMS PV. */
+ if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
+ emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
+
+ /* Save register RA next. */
+ if (imask & (1UL << REG_RA))
+ {
+ emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
+ imask &= ~(1UL << REG_RA);
+ reg_offset += 8;
+ }
+
+ /* Now save any other registers required to be saved. */
+ for (i = 0; i < 31; i++)
+ if (imask & (1UL << i))
+ {
+ emit_frame_store (i, sa_reg, sa_bias, reg_offset);
+ reg_offset += 8;
+ }
+
+ for (i = 0; i < 31; i++)
+ if (fmask & (1UL << i))
+ {
+ emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
+ reg_offset += 8;
+ }
+ }
+ else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
+ {
+ /* The standard frame on the T3E includes space for saving registers.
+ We just have to use it. We don't have to save the return address and
+ the old frame pointer here - they are saved in the DSIB. */
+
+ reg_offset = -56;
+ for (i = 9; i < 15; i++)
+ if (imask & (1UL << i))
+ {
+ emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
+ reg_offset -= 8;
+ }
+ for (i = 2; i < 10; i++)
+ if (fmask & (1UL << i))
+ {
+ emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
+ reg_offset -= 8;
+ }
+ }
+
+ if (TARGET_ABI_OPEN_VMS)
+ {
+ if (alpha_procedure_type == PT_REGISTER)
+ /* Register frame procedures save the fp.
+ ?? Ought to have a dwarf2 save for this. */
+ emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
+ hard_frame_pointer_rtx);
+
+ if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
+ emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
+ gen_rtx_REG (DImode, REG_PV)));
+
+ if (alpha_procedure_type != PT_NULL
+ && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
+ FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
+
+ /* If we have to allocate space for outgoing args, do it now. */
+ if (current_function_outgoing_args_size != 0)
+ {
+ rtx seq
+ = emit_move_insn (stack_pointer_rtx,
+ plus_constant
+ (hard_frame_pointer_rtx,
+ - (ALPHA_ROUND
+ (current_function_outgoing_args_size))));
+
+ /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
+ if ! frame_pointer_needed. Setting the bit will change the CFA
+ computation rule to use sp again, which would be wrong if we had
+ frame_pointer_needed, as this means sp might move unpredictably
+ later on.
+
+ Also, note that
+ frame_pointer_needed
+ => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
+ and
+ current_function_outgoing_args_size != 0
+ => alpha_procedure_type != PT_NULL,
+
+ so when we are not setting the bit here, we are guaranteed to
+ have emitted an FRP frame pointer update just before. */
+ RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
+ }
+ }
+ else if (!TARGET_ABI_UNICOSMK)
+ {
+ /* If we need a frame pointer, set it from the stack pointer. */
+ if (frame_pointer_needed)
+ {
+ if (TARGET_CAN_FAULT_IN_PROLOGUE)
+ FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
+ else
+ /* This must always be the last instruction in the
+ prologue, thus we emit a special move + clobber. */
+ FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
+ stack_pointer_rtx, sa_reg)));
+ }
+ }
+
+ /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
+ the prologue, for exception handling reasons, we cannot do this for
+ any insn that might fault. We could prevent this for mems with a
+ (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
+ have to prevent all such scheduling with a blockage.
+
+ Linux, on the other hand, never bothered to implement OSF/1's
+ exception handling, and so doesn't care about such things. Anyone
+ planning to use dwarf2 frame-unwind info can also omit the blockage. */
+
+ if (! TARGET_CAN_FAULT_IN_PROLOGUE)
+ emit_insn (gen_blockage ());
+}
+
+/* Count the number of .file directives, so that .loc is up to date. */
+int num_source_filenames = 0;
+
+/* Output the textual info surrounding the prologue. */
+
+void
+alpha_start_function (FILE *file, const char *fnname,
+ tree decl ATTRIBUTE_UNUSED)
+{
+ unsigned long imask = 0;
+ unsigned long fmask = 0;
+ /* Stack space needed for pushing registers clobbered by us. */
+ HOST_WIDE_INT sa_size;
+ /* Complete stack size needed. */
+ unsigned HOST_WIDE_INT frame_size;
+ /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
+ unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
+ ? 524288
+ : 1UL << 31;
+ /* Offset from base reg to register save area. */
+ HOST_WIDE_INT reg_offset;
+ char *entry_label = (char *) alloca (strlen (fnname) + 6);
+ int i;
+
+ /* Don't emit an extern directive for functions defined in the same file. */
+ if (TARGET_ABI_UNICOSMK)
+ {
+ tree name_tree;
+ name_tree = get_identifier (fnname);
+ TREE_ASM_WRITTEN (name_tree) = 1;
+ }
+
+ alpha_fnname = fnname;
+ sa_size = alpha_sa_size ();
+
+ frame_size = get_frame_size ();
+ if (TARGET_ABI_OPEN_VMS)
+ frame_size = ALPHA_ROUND (sa_size
+ + (alpha_procedure_type == PT_STACK ? 8 : 0)
+ + frame_size
+ + current_function_pretend_args_size);
+ else if (TARGET_ABI_UNICOSMK)
+ frame_size = ALPHA_ROUND (sa_size
+ + (alpha_procedure_type == PT_STACK ? 48 : 0))
+ + ALPHA_ROUND (frame_size
+ + current_function_outgoing_args_size);
+ else
+ frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
+ + sa_size
+ + ALPHA_ROUND (frame_size
+ + current_function_pretend_args_size));
+
+ if (TARGET_ABI_OPEN_VMS)
+ reg_offset = 8;
+ else
+ reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
+
+ alpha_sa_mask (&imask, &fmask);
+
+ /* Ecoff can handle multiple .file directives, so put out file and lineno.
+ We have to do that before the .ent directive as we cannot switch
+ files within procedures with native ecoff because line numbers are
+ linked to procedure descriptors.
+ Outputting the lineno helps debugging of one line functions as they
+ would otherwise get no line number at all. Please note that we would
+ like to put out last_linenum from final.c, but it is not accessible. */
+
+ if (write_symbols == SDB_DEBUG)
+ {
+#ifdef ASM_OUTPUT_SOURCE_FILENAME
+ ASM_OUTPUT_SOURCE_FILENAME (file,
+ DECL_SOURCE_FILE (current_function_decl));
+#endif
+#ifdef SDB_OUTPUT_SOURCE_LINE
+ if (debug_info_level != DINFO_LEVEL_TERSE)
+ SDB_OUTPUT_SOURCE_LINE (file,
+ DECL_SOURCE_LINE (current_function_decl));
+#endif
+ }
+
+ /* Issue function start and label. */
+ if (TARGET_ABI_OPEN_VMS
+ || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
+ {
+ fputs ("\t.ent ", file);
+ assemble_name (file, fnname);
+ putc ('\n', file);
+
+ /* If the function needs GP, we'll write the "..ng" label there.
+ Otherwise, do it here. */
+ if (TARGET_ABI_OSF
+ && ! alpha_function_needs_gp
+ && ! current_function_is_thunk)
+ {
+ putc ('$', file);
+ assemble_name (file, fnname);
+ fputs ("..ng:\n", file);
+ }
+ }
+
+ strcpy (entry_label, fnname);
+ if (TARGET_ABI_OPEN_VMS)
+ strcat (entry_label, "..en");
+
+ /* For public functions, the label must be globalized by appending an
+ additional colon. */
+ if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
+ strcat (entry_label, ":");
+
+ ASM_OUTPUT_LABEL (file, entry_label);
+ inside_function = TRUE;
+
+ if (TARGET_ABI_OPEN_VMS)
+ fprintf (file, "\t.base $%d\n", vms_base_regno);
+
+ if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
+ && !flag_inhibit_size_directive)
+ {
+ /* Set flags in procedure descriptor to request IEEE-conformant
+ math-library routines. The value we set it to is PDSC_EXC_IEEE
+ (/usr/include/pdsc.h). */
+ fputs ("\t.eflag 48\n", file);
+ }
+
+ /* Set up offsets to alpha virtual arg/local debugging pointer. */
+ alpha_auto_offset = -frame_size + current_function_pretend_args_size;
+ alpha_arg_offset = -frame_size + 48;
+
+ /* Describe our frame. If the frame size is larger than an integer,
+ print it as zero to avoid an assembler error. We won't be
+ properly describing such a frame, but that's the best we can do. */
+ if (TARGET_ABI_UNICOSMK)
+ ;
+ else if (TARGET_ABI_OPEN_VMS)
+ fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
+ HOST_WIDE_INT_PRINT_DEC "\n",
+ vms_unwind_regno,
+ frame_size >= (1UL << 31) ? 0 : frame_size,
+ reg_offset);
+ else if (!flag_inhibit_size_directive)
+ fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
+ (frame_pointer_needed
+ ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
+ frame_size >= max_frame_size ? 0 : frame_size,
+ current_function_pretend_args_size);
+
+ /* Describe which registers were spilled. */
+ if (TARGET_ABI_UNICOSMK)
+ ;
+ else if (TARGET_ABI_OPEN_VMS)
+ {
+ if (imask)
+ /* ??? Does VMS care if mask contains ra? The old code didn't
+ set it, so I don't here. */
+ fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
+ if (fmask)
+ fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
+ if (alpha_procedure_type == PT_REGISTER)
+ fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
+ }
+ else if (!flag_inhibit_size_directive)
+ {
+ if (imask)
+ {
+ fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
+ frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
+
+ for (i = 0; i < 32; ++i)
+ if (imask & (1UL << i))
+ reg_offset += 8;
+ }
+
+ if (fmask)
+ fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
+ frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
+ }
+
+#if TARGET_ABI_OPEN_VMS
+ /* Ifdef'ed cause link_section are only available then. */
+ switch_to_section (readonly_data_section);
+ fprintf (file, "\t.align 3\n");
+ assemble_name (file, fnname); fputs ("..na:\n", file);
+ fputs ("\t.ascii \"", file);
+ assemble_name (file, fnname);
+ fputs ("\\0\"\n", file);
+ alpha_need_linkage (fnname, 1);
+ switch_to_section (text_section);
+#endif
+}
+
+/* Emit the .prologue note at the scheduled end of the prologue. */
+
+static void
+alpha_output_function_end_prologue (FILE *file)
+{
+ if (TARGET_ABI_UNICOSMK)
+ ;
+ else if (TARGET_ABI_OPEN_VMS)
+ fputs ("\t.prologue\n", file);
+ else if (TARGET_ABI_WINDOWS_NT)
+ fputs ("\t.prologue 0\n", file);
+ else if (!flag_inhibit_size_directive)
+ fprintf (file, "\t.prologue %d\n",
+ alpha_function_needs_gp || current_function_is_thunk);
+}
+
+/* Write function epilogue. */
+
+/* ??? At some point we will want to support full unwind, and so will
+ need to mark the epilogue as well. At the moment, we just confuse
+ dwarf2out. */
+#undef FRP
+#define FRP(exp) exp
+
+void
+alpha_expand_epilogue (void)
+{
+ /* Registers to save. */
+ unsigned long imask = 0;
+ unsigned long fmask = 0;
+ /* Stack space needed for pushing registers clobbered by us. */
+ HOST_WIDE_INT sa_size;
+ /* Complete stack size needed. */
+ HOST_WIDE_INT frame_size;
+ /* Offset from base reg to register save area. */
+ HOST_WIDE_INT reg_offset;
+ int fp_is_frame_pointer, fp_offset;
+ rtx sa_reg, sa_reg_exp = NULL;
+ rtx sp_adj1, sp_adj2, mem;
+ rtx eh_ofs;
+ int i;
+
+ sa_size = alpha_sa_size ();
+
+ frame_size = get_frame_size ();
+ if (TARGET_ABI_OPEN_VMS)
+ frame_size = ALPHA_ROUND (sa_size
+ + (alpha_procedure_type == PT_STACK ? 8 : 0)
+ + frame_size
+ + current_function_pretend_args_size);
+ else if (TARGET_ABI_UNICOSMK)
+ frame_size = ALPHA_ROUND (sa_size
+ + (alpha_procedure_type == PT_STACK ? 48 : 0))
+ + ALPHA_ROUND (frame_size
+ + current_function_outgoing_args_size);
+ else
+ frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
+ + sa_size
+ + ALPHA_ROUND (frame_size
+ + current_function_pretend_args_size));
+
+ if (TARGET_ABI_OPEN_VMS)
+ {
+ if (alpha_procedure_type == PT_STACK)
+ reg_offset = 8;
+ else
+ reg_offset = 0;
+ }
+ else
+ reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
+
+ alpha_sa_mask (&imask, &fmask);
+
+ fp_is_frame_pointer
+ = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
+ || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
+ fp_offset = 0;
+ sa_reg = stack_pointer_rtx;
+
+ if (current_function_calls_eh_return)
+ eh_ofs = EH_RETURN_STACKADJ_RTX;
+ else
+ eh_ofs = NULL_RTX;
+
+ if (!TARGET_ABI_UNICOSMK && sa_size)
+ {
+ /* If we have a frame pointer, restore SP from it. */
+ if ((TARGET_ABI_OPEN_VMS
+ && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
+ || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
+ FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
+
+ /* Cope with very large offsets to the register save area. */
+ if (reg_offset + sa_size > 0x8000)
+ {
+ int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT bias;
+
+ if (low + sa_size <= 0x8000)
+ bias = reg_offset - low, reg_offset = low;
+ else
+ bias = reg_offset, reg_offset = 0;
+
+ sa_reg = gen_rtx_REG (DImode, 22);
+ sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
+
+ FRP (emit_move_insn (sa_reg, sa_reg_exp));
+ }
+
+ /* Restore registers in order, excepting a true frame pointer. */
+
+ mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
+ if (! eh_ofs)
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
+
+ reg_offset += 8;
+ imask &= ~(1UL << REG_RA);
+
+ for (i = 0; i < 31; ++i)
+ if (imask & (1UL << i))
+ {
+ if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
+ fp_offset = reg_offset;
+ else
+ {
+ mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
+ }
+ reg_offset += 8;
+ }
+
+ for (i = 0; i < 31; ++i)
+ if (fmask & (1UL << i))
+ {
+ mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
+ reg_offset += 8;
+ }
+ }
+ else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
+ {
+ /* Restore callee-saved general-purpose registers. */
+
+ reg_offset = -56;
+
+ for (i = 9; i < 15; i++)
+ if (imask & (1UL << i))
+ {
+ mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
+ reg_offset));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
+ reg_offset -= 8;
+ }
+
+ for (i = 2; i < 10; i++)
+ if (fmask & (1UL << i))
+ {
+ mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
+ reg_offset));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
+ reg_offset -= 8;
+ }
+
+ /* Restore the return address from the DSIB. */
+
+ mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
+ }
+
+ if (frame_size || eh_ofs)
+ {
+ sp_adj1 = stack_pointer_rtx;
+
+ if (eh_ofs)
+ {
+ sp_adj1 = gen_rtx_REG (DImode, 23);
+ emit_move_insn (sp_adj1,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
+ }
+
+ /* If the stack size is large, begin computation into a temporary
+ register so as not to interfere with a potential fp restore,
+ which must be consecutive with an SP restore. */
+ if (frame_size < 32768
+ && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
+ sp_adj2 = GEN_INT (frame_size);
+ else if (TARGET_ABI_UNICOSMK)
+ {
+ sp_adj1 = gen_rtx_REG (DImode, 23);
+ FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
+ sp_adj2 = const0_rtx;
+ }
+ else if (frame_size < 0x40007fffL)
+ {
+ int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
+
+ sp_adj2 = plus_constant (sp_adj1, frame_size - low);
+ if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
+ sp_adj1 = sa_reg;
+ else
+ {
+ sp_adj1 = gen_rtx_REG (DImode, 23);
+ FRP (emit_move_insn (sp_adj1, sp_adj2));
+ }
+ sp_adj2 = GEN_INT (low);
+ }
+ else
+ {
+ rtx tmp = gen_rtx_REG (DImode, 23);
+ FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
+ 3, false));
+ if (!sp_adj2)
+ {
+ /* We can't drop new things to memory this late, afaik,
+ so build it up by pieces. */
+ FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
+ -(frame_size < 0)));
+ gcc_assert (sp_adj2);
+ }
+ }
+
+ /* From now on, things must be in order. So emit blockages. */
+
+ /* Restore the frame pointer. */
+ if (TARGET_ABI_UNICOSMK)
+ {
+ emit_insn (gen_blockage ());
+ mem = gen_rtx_MEM (DImode,
+ plus_constant (hard_frame_pointer_rtx, -16));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
+ }
+ else if (fp_is_frame_pointer)
+ {
+ emit_insn (gen_blockage ());
+ mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
+ }
+ else if (TARGET_ABI_OPEN_VMS)
+ {
+ emit_insn (gen_blockage ());
+ FRP (emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno)));
+ }
+
+ /* Restore the stack pointer. */
+ emit_insn (gen_blockage ());
+ if (sp_adj2 == const0_rtx)
+ FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
+ else
+ FRP (emit_move_insn (stack_pointer_rtx,
+ gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
+ }
+ else
+ {
+ if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
+ {
+ emit_insn (gen_blockage ());
+ FRP (emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno)));
+ }
+ else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
+ {
+ /* Decrement the frame pointer if the function does not have a
+ frame. */
+
+ emit_insn (gen_blockage ());
+ FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx, constm1_rtx)));
+ }
+ }
+}
+
+/* Output the rest of the textual info surrounding the epilogue. */
+
+void
+alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
+{
+#if TARGET_ABI_OPEN_VMS
+ alpha_write_linkage (file, fnname, decl);
+#endif
+
+ /* End the function. */
+ if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
+ {
+ fputs ("\t.end ", file);
+ assemble_name (file, fnname);
+ putc ('\n', file);
+ }
+ inside_function = FALSE;
+
+ /* Output jump tables and the static subroutine information block. */
+ if (TARGET_ABI_UNICOSMK)
+ {
+ unicosmk_output_ssib (file, fnname);
+ unicosmk_output_deferred_case_vectors (file);
+ }
+}
+
+#if TARGET_ABI_OSF
+/* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
+
+ In order to avoid the hordes of differences between generated code
+ with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
+ lots of code loading up large constants, generate rtl and emit it
+ instead of going straight to text.
+
+ Not sure why this idea hasn't been explored before... */
+
+static void
+alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
+ tree function)
+{
+ HOST_WIDE_INT hi, lo;
+ rtx this, insn, funexp;
+
+ reset_block_changes ();
+
+ /* We always require a valid GP. */
+ emit_insn (gen_prologue_ldgp ());
+ emit_note (NOTE_INSN_PROLOGUE_END);
+
+ /* Find the "this" pointer. If the function returns a structure,
+ the structure return pointer is in $16. */
+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
+ this = gen_rtx_REG (Pmode, 17);
+ else
+ this = gen_rtx_REG (Pmode, 16);
+
+ /* Add DELTA. When possible we use ldah+lda. Otherwise load the
+ entire constant for the add. */
+ lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
+ hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ if (hi + lo == delta)
+ {
+ if (hi)
+ emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
+ if (lo)
+ emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
+ }
+ else
+ {
+ rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
+ delta, -(delta < 0));
+ emit_insn (gen_adddi3 (this, this, tmp));
+ }
+
+ /* Add a delta stored in the vtable at VCALL_OFFSET. */
+ if (vcall_offset)
+ {
+ rtx tmp, tmp2;
+
+ tmp = gen_rtx_REG (Pmode, 0);
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
+
+ lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
+ hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ if (hi + lo == vcall_offset)
+ {
+ if (hi)
+ emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
+ }
+ else
+ {
+ tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
+ vcall_offset, -(vcall_offset < 0));
+ emit_insn (gen_adddi3 (tmp, tmp, tmp2));
+ lo = 0;
+ }
+ if (lo)
+ tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
+ else
+ tmp2 = tmp;
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
+
+ emit_insn (gen_adddi3 (this, this, tmp));
+ }
+
+ /* Generate a tail call to the target function. */
+ if (! TREE_USED (function))
+ {
+ assemble_external (function);
+ TREE_USED (function) = 1;
+ }
+ funexp = XEXP (DECL_RTL (function), 0);
+ funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
+ insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
+ SIBLING_CALL_P (insn) = 1;
+
+ /* Run just enough of rest_of_compilation to get the insns emitted.
+ There's not really enough bulk here to make other passes such as
+ instruction scheduling worth while. Note that use_thunk calls
+ assemble_start_function and assemble_end_function. */
+ insn = get_insns ();
+ insn_locators_initialize ();
+ shorten_branches (insn);
+ final_start_function (insn, file, 1);
+ final (insn, file, 1);
+ final_end_function ();
+}
+#endif /* TARGET_ABI_OSF */
+
+/* Debugging support. */
+
+#include "gstab.h"
+
+/* Count the number of sdb related labels are generated (to find block
+ start and end boundaries). */
+
+int sdb_label_count = 0;
+
+/* Name of the file containing the current function. */
+
+static const char *current_function_file = "";
+
+/* Offsets to alpha virtual arg/local debugging pointers. */
+
+long alpha_arg_offset;
+long alpha_auto_offset;
+
+/* Emit a new filename to a stream. */
+
+void
+alpha_output_filename (FILE *stream, const char *name)
+{
+ static int first_time = TRUE;
+
+ if (first_time)
+ {
+ first_time = FALSE;
+ ++num_source_filenames;
+ current_function_file = name;
+ fprintf (stream, "\t.file\t%d ", num_source_filenames);
+ output_quoted_string (stream, name);
+ fprintf (stream, "\n");
+ if (!TARGET_GAS && write_symbols == DBX_DEBUG)
+ fprintf (stream, "\t#@stabs\n");
+ }
+
+ else if (write_symbols == DBX_DEBUG)
+ /* dbxout.c will emit an appropriate .stabs directive. */
+ return;
+
+ else if (name != current_function_file
+ && strcmp (name, current_function_file) != 0)
+ {
+ if (inside_function && ! TARGET_GAS)
+ fprintf (stream, "\t#.file\t%d ", num_source_filenames);
+ else
+ {
+ ++num_source_filenames;
+ current_function_file = name;
+ fprintf (stream, "\t.file\t%d ", num_source_filenames);
+ }
+
+ output_quoted_string (stream, name);
+ fprintf (stream, "\n");
+ }
+}
+
+/* Structure to show the current status of registers and memory. */
+
+struct shadow_summary
+{
+ struct {
+ unsigned int i : 31; /* Mask of int regs */
+ unsigned int fp : 31; /* Mask of fp regs */
+ unsigned int mem : 1; /* mem == imem | fpmem */
+ } used, defd;
+};
+
+/* Summary the effects of expression X on the machine. Update SUM, a pointer
+ to the summary structure. SET is nonzero if the insn is setting the
+ object, otherwise zero. */
+
+static void
+summarize_insn (rtx x, struct shadow_summary *sum, int set)
+{
+ const char *format_ptr;
+ int i, j;
+
+ if (x == 0)
+ return;
+
+ switch (GET_CODE (x))
+ {
+ /* ??? Note that this case would be incorrect if the Alpha had a
+ ZERO_EXTRACT in SET_DEST. */
+ case SET:
+ summarize_insn (SET_SRC (x), sum, 0);
+ summarize_insn (SET_DEST (x), sum, 1);
+ break;
+
+ case CLOBBER:
+ summarize_insn (XEXP (x, 0), sum, 1);
+ break;
+
+ case USE:
+ summarize_insn (XEXP (x, 0), sum, 0);
+ break;
+
+ case ASM_OPERANDS:
+ for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
+ summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
+ break;
+
+ case PARALLEL:
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ summarize_insn (XVECEXP (x, 0, i), sum, 0);
+ break;
+
+ case SUBREG:
+ summarize_insn (SUBREG_REG (x), sum, 0);
+ break;
+
+ case REG:
+ {
+ int regno = REGNO (x);
+ unsigned long mask = ((unsigned long) 1) << (regno % 32);
+
+ if (regno == 31 || regno == 63)
+ break;
+
+ if (set)
+ {
+ if (regno < 32)
+ sum->defd.i |= mask;
+ else
+ sum->defd.fp |= mask;
+ }
+ else
+ {
+ if (regno < 32)
+ sum->used.i |= mask;
+ else
+ sum->used.fp |= mask;
+ }
+ }
+ break;
+
+ case MEM:
+ if (set)
+ sum->defd.mem = 1;
+ else
+ sum->used.mem = 1;
+
+ /* Find the regs used in memory address computation: */
+ summarize_insn (XEXP (x, 0), sum, 0);
+ break;
+
+ case CONST_INT: case CONST_DOUBLE:
+ case SYMBOL_REF: case LABEL_REF: case CONST:
+ case SCRATCH: case ASM_INPUT:
+ break;
+
+ /* Handle common unary and binary ops for efficiency. */
+ case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
+ case MOD: case UDIV: case UMOD: case AND: case IOR:
+ case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
+ case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
+ case NE: case EQ: case GE: case GT: case LE:
+ case LT: case GEU: case GTU: case LEU: case LTU:
+ summarize_insn (XEXP (x, 0), sum, 0);
+ summarize_insn (XEXP (x, 1), sum, 0);
+ break;
+
+ case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
+ case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
+ case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
+ case SQRT: case FFS:
+ summarize_insn (XEXP (x, 0), sum, 0);
+ break;
+
+ default:
+ format_ptr = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ switch (format_ptr[i])
+ {
+ case 'e':
+ summarize_insn (XEXP (x, i), sum, 0);
+ break;
+
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ summarize_insn (XVECEXP (x, i, j), sum, 0);
+ break;
+
+ case 'i':
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+}
+
+/* Ensure a sufficient number of `trapb' insns are in the code when
+ the user requests code with a trap precision of functions or
+ instructions.
+
+ In naive mode, when the user requests a trap-precision of
+ "instruction", a trapb is needed after every instruction that may
+ generate a trap. This ensures that the code is resumption safe but
+ it is also slow.
+
+ When optimizations are turned on, we delay issuing a trapb as long
+ as possible. In this context, a trap shadow is the sequence of
+ instructions that starts with a (potentially) trap generating
+ instruction and extends to the next trapb or call_pal instruction
+ (but GCC never generates call_pal by itself). We can delay (and
+ therefore sometimes omit) a trapb subject to the following
+ conditions:
+
+ (a) On entry to the trap shadow, if any Alpha register or memory
+ location contains a value that is used as an operand value by some
+ instruction in the trap shadow (live on entry), then no instruction
+ in the trap shadow may modify the register or memory location.
+
+ (b) Within the trap shadow, the computation of the base register
+ for a memory load or store instruction may not involve using the
+ result of an instruction that might generate an UNPREDICTABLE
+ result.
+
+ (c) Within the trap shadow, no register may be used more than once
+ as a destination register. (This is to make life easier for the
+ trap-handler.)
+
+ (d) The trap shadow may not include any branch instructions. */
+
+static void
+alpha_handle_trap_shadows (void)
+{
+ struct shadow_summary shadow;
+ int trap_pending, exception_nesting;
+ rtx i, n;
+
+ trap_pending = 0;
+ exception_nesting = 0;
+ shadow.used.i = 0;
+ shadow.used.fp = 0;
+ shadow.used.mem = 0;
+ shadow.defd = shadow.used;
+
+ for (i = get_insns (); i ; i = NEXT_INSN (i))
+ {
+ if (GET_CODE (i) == NOTE)
+ {
+ switch (NOTE_LINE_NUMBER (i))
+ {
+ case NOTE_INSN_EH_REGION_BEG:
+ exception_nesting++;
+ if (trap_pending)
+ goto close_shadow;
+ break;
+
+ case NOTE_INSN_EH_REGION_END:
+ exception_nesting--;
+ if (trap_pending)
+ goto close_shadow;
+ break;
+
+ case NOTE_INSN_EPILOGUE_BEG:
+ if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
+ goto close_shadow;
+ break;
+ }
+ }
+ else if (trap_pending)
+ {
+ if (alpha_tp == ALPHA_TP_FUNC)
+ {
+ if (GET_CODE (i) == JUMP_INSN
+ && GET_CODE (PATTERN (i)) == RETURN)
+ goto close_shadow;
+ }
+ else if (alpha_tp == ALPHA_TP_INSN)
+ {
+ if (optimize > 0)
+ {
+ struct shadow_summary sum;
+
+ sum.used.i = 0;
+ sum.used.fp = 0;
+ sum.used.mem = 0;
+ sum.defd = sum.used;
+
+ switch (GET_CODE (i))
+ {
+ case INSN:
+ /* Annoyingly, get_attr_trap will die on these. */
+ if (GET_CODE (PATTERN (i)) == USE
+ || GET_CODE (PATTERN (i)) == CLOBBER)
+ break;
+
+ summarize_insn (PATTERN (i), &sum, 0);
+
+ if ((sum.defd.i & shadow.defd.i)
+ || (sum.defd.fp & shadow.defd.fp))
+ {
+ /* (c) would be violated */
+ goto close_shadow;
+ }
+
+ /* Combine shadow with summary of current insn: */
+ shadow.used.i |= sum.used.i;
+ shadow.used.fp |= sum.used.fp;
+ shadow.used.mem |= sum.used.mem;
+ shadow.defd.i |= sum.defd.i;
+ shadow.defd.fp |= sum.defd.fp;
+ shadow.defd.mem |= sum.defd.mem;
+
+ if ((sum.defd.i & shadow.used.i)
+ || (sum.defd.fp & shadow.used.fp)
+ || (sum.defd.mem & shadow.used.mem))
+ {
+ /* (a) would be violated (also takes care of (b)) */
+ gcc_assert (get_attr_trap (i) != TRAP_YES
+ || (!(sum.defd.i & sum.used.i)
+ && !(sum.defd.fp & sum.used.fp)));
+
+ goto close_shadow;
+ }
+ break;
+
+ case JUMP_INSN:
+ case CALL_INSN:
+ case CODE_LABEL:
+ goto close_shadow;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ close_shadow:
+ n = emit_insn_before (gen_trapb (), i);
+ PUT_MODE (n, TImode);
+ PUT_MODE (i, TImode);
+ trap_pending = 0;
+ shadow.used.i = 0;
+ shadow.used.fp = 0;
+ shadow.used.mem = 0;
+ shadow.defd = shadow.used;
+ }
+ }
+ }
+
+ if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
+ && GET_CODE (i) == INSN
+ && GET_CODE (PATTERN (i)) != USE
+ && GET_CODE (PATTERN (i)) != CLOBBER
+ && get_attr_trap (i) == TRAP_YES)
+ {
+ if (optimize && !trap_pending)
+ summarize_insn (PATTERN (i), &shadow, 0);
+ trap_pending = 1;
+ }
+ }
+}
+
+/* Alpha can only issue instruction groups simultaneously if they are
+ suitably aligned. This is very processor-specific. */
+/* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
+ that are marked "fake". These instructions do not exist on that target,
+ but it is possible to see these insns with deranged combinations of
+ command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
+ choose a result at random. */
+
+enum alphaev4_pipe {
+ EV4_STOP = 0,
+ EV4_IB0 = 1,
+ EV4_IB1 = 2,
+ EV4_IBX = 4
+};
+
+enum alphaev5_pipe {
+ EV5_STOP = 0,
+ EV5_NONE = 1,
+ EV5_E01 = 2,
+ EV5_E0 = 4,
+ EV5_E1 = 8,
+ EV5_FAM = 16,
+ EV5_FA = 32,
+ EV5_FM = 64
+};
+
+static enum alphaev4_pipe
+alphaev4_insn_pipe (rtx insn)
+{
+ if (recog_memoized (insn) < 0)
+ return EV4_STOP;
+ if (get_attr_length (insn) != 4)
+ return EV4_STOP;
+
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ILD:
+ case TYPE_LDSYM:
+ case TYPE_FLD:
+ case TYPE_LD_L:
+ return EV4_IBX;
+
+ case TYPE_IADD:
+ case TYPE_ILOG:
+ case TYPE_ICMOV:
+ case TYPE_ICMP:
+ case TYPE_FST:
+ case TYPE_SHIFT:
+ case TYPE_IMUL:
+ case TYPE_FBR:
+ case TYPE_MVI: /* fake */
+ return EV4_IB0;
+
+ case TYPE_IST:
+ case TYPE_MISC:
+ case TYPE_IBR:
+ case TYPE_JSR:
+ case TYPE_CALLPAL:
+ case TYPE_FCPYS:
+ case TYPE_FCMOV:
+ case TYPE_FADD:
+ case TYPE_FDIV:
+ case TYPE_FMUL:
+ case TYPE_ST_C:
+ case TYPE_MB:
+ case TYPE_FSQRT: /* fake */
+ case TYPE_FTOI: /* fake */
+ case TYPE_ITOF: /* fake */
+ return EV4_IB1;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+static enum alphaev5_pipe
+alphaev5_insn_pipe (rtx insn)
+{
+ if (recog_memoized (insn) < 0)
+ return EV5_STOP;
+ if (get_attr_length (insn) != 4)
+ return EV5_STOP;
+
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ILD:
+ case TYPE_FLD:
+ case TYPE_LDSYM:
+ case TYPE_IADD:
+ case TYPE_ILOG:
+ case TYPE_ICMOV:
+ case TYPE_ICMP:
+ return EV5_E01;
+
+ case TYPE_IST:
+ case TYPE_FST:
+ case TYPE_SHIFT:
+ case TYPE_IMUL:
+ case TYPE_MISC:
+ case TYPE_MVI:
+ case TYPE_LD_L:
+ case TYPE_ST_C:
+ case TYPE_MB:
+ case TYPE_FTOI: /* fake */
+ case TYPE_ITOF: /* fake */
+ return EV5_E0;
+
+ case TYPE_IBR:
+ case TYPE_JSR:
+ case TYPE_CALLPAL:
+ return EV5_E1;
+
+ case TYPE_FCPYS:
+ return EV5_FAM;
+
+ case TYPE_FBR:
+ case TYPE_FCMOV:
+ case TYPE_FADD:
+ case TYPE_FDIV:
+ case TYPE_FSQRT: /* fake */
+ return EV5_FA;
+
+ case TYPE_FMUL:
+ return EV5_FM;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* IN_USE is a mask of the slots currently filled within the insn group.
+ The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
+ the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
+
+ LEN is, of course, the length of the group in bytes. */
+
+static rtx
+alphaev4_next_group (rtx insn, int *pin_use, int *plen)
+{
+ int len, in_use;
+
+ len = in_use = 0;
+
+ if (! INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == CLOBBER
+ || GET_CODE (PATTERN (insn)) == USE)
+ goto next_and_done;
+
+ while (1)
+ {
+ enum alphaev4_pipe pipe;
+
+ pipe = alphaev4_insn_pipe (insn);
+ switch (pipe)
+ {
+ case EV4_STOP:
+ /* Force complex instructions to start new groups. */
+ if (in_use)
+ goto done;
+
+ /* If this is a completely unrecognized insn, it's an asm.
+ We don't know how long it is, so record length as -1 to
+ signal a needed realignment. */
+ if (recog_memoized (insn) < 0)
+ len = -1;
+ else
+ len = get_attr_length (insn);
+ goto next_and_done;
+
+ case EV4_IBX:
+ if (in_use & EV4_IB0)
+ {
+ if (in_use & EV4_IB1)
+ goto done;
+ in_use |= EV4_IB1;
+ }
+ else
+ in_use |= EV4_IB0 | EV4_IBX;
+ break;
+
+ case EV4_IB0:
+ if (in_use & EV4_IB0)
+ {
+ if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
+ goto done;
+ in_use |= EV4_IB1;
+ }
+ in_use |= EV4_IB0;
+ break;
+
+ case EV4_IB1:
+ if (in_use & EV4_IB1)
+ goto done;
+ in_use |= EV4_IB1;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ len += 4;
+
+ /* Haifa doesn't do well scheduling branches. */
+ if (GET_CODE (insn) == JUMP_INSN)
+ goto next_and_done;
+
+ next:
+ insn = next_nonnote_insn (insn);
+
+ if (!insn || ! INSN_P (insn))
+ goto done;
+
+ /* Let Haifa tell us where it thinks insn group boundaries are. */
+ if (GET_MODE (insn) == TImode)
+ goto done;
+
+ if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
+ goto next;
+ }
+
+ next_and_done:
+ insn = next_nonnote_insn (insn);
+
+ done:
+ *plen = len;
+ *pin_use = in_use;
+ return insn;
+}
+
+/* IN_USE is a mask of the slots currently filled within the insn group.
+ The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
+ the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
+
+ LEN is, of course, the length of the group in bytes. */
+
+static rtx
+alphaev5_next_group (rtx insn, int *pin_use, int *plen)
+{
+ int len, in_use;
+
+ len = in_use = 0;
+
+ if (! INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == CLOBBER
+ || GET_CODE (PATTERN (insn)) == USE)
+ goto next_and_done;
+
+ while (1)
+ {
+ enum alphaev5_pipe pipe;
+
+ pipe = alphaev5_insn_pipe (insn);
+ switch (pipe)
+ {
+ case EV5_STOP:
+ /* Force complex instructions to start new groups. */
+ if (in_use)
+ goto done;
+
+ /* If this is a completely unrecognized insn, it's an asm.
+ We don't know how long it is, so record length as -1 to
+ signal a needed realignment. */
+ if (recog_memoized (insn) < 0)
+ len = -1;
+ else
+ len = get_attr_length (insn);
+ goto next_and_done;
+
+ /* ??? Most of the places below, we would like to assert never
+ happen, as it would indicate an error either in Haifa, or
+ in the scheduling description. Unfortunately, Haifa never
+ schedules the last instruction of the BB, so we don't have
+ an accurate TI bit to go off. */
+ case EV5_E01:
+ if (in_use & EV5_E0)
+ {
+ if (in_use & EV5_E1)
+ goto done;
+ in_use |= EV5_E1;
+ }
+ else
+ in_use |= EV5_E0 | EV5_E01;
+ break;
+
+ case EV5_E0:
+ if (in_use & EV5_E0)
+ {
+ if (!(in_use & EV5_E01) || (in_use & EV5_E1))
+ goto done;
+ in_use |= EV5_E1;
+ }
+ in_use |= EV5_E0;
+ break;
+
+ case EV5_E1:
+ if (in_use & EV5_E1)
+ goto done;
+ in_use |= EV5_E1;
+ break;
+
+ case EV5_FAM:
+ if (in_use & EV5_FA)
+ {
+ if (in_use & EV5_FM)
+ goto done;
+ in_use |= EV5_FM;
+ }
+ else
+ in_use |= EV5_FA | EV5_FAM;
+ break;
+
+ case EV5_FA:
+ if (in_use & EV5_FA)
+ goto done;
+ in_use |= EV5_FA;
+ break;
+
+ case EV5_FM:
+ if (in_use & EV5_FM)
+ goto done;
+ in_use |= EV5_FM;
+ break;
+
+ case EV5_NONE:
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ len += 4;
+
+ /* Haifa doesn't do well scheduling branches. */
+ /* ??? If this is predicted not-taken, slotting continues, except
+ that no more IBR, FBR, or JSR insns may be slotted. */
+ if (GET_CODE (insn) == JUMP_INSN)
+ goto next_and_done;
+
+ next:
+ insn = next_nonnote_insn (insn);
+
+ if (!insn || ! INSN_P (insn))
+ goto done;
+
+ /* Let Haifa tell us where it thinks insn group boundaries are. */
+ if (GET_MODE (insn) == TImode)
+ goto done;
+
+ if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
+ goto next;
+ }
+
+ next_and_done:
+ insn = next_nonnote_insn (insn);
+
+ done:
+ *plen = len;
+ *pin_use = in_use;
+ return insn;
+}
+
+static rtx
+alphaev4_next_nop (int *pin_use)
+{
+ int in_use = *pin_use;
+ rtx nop;
+
+ if (!(in_use & EV4_IB0))
+ {
+ in_use |= EV4_IB0;
+ nop = gen_nop ();
+ }
+ else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
+ {
+ in_use |= EV4_IB1;
+ nop = gen_nop ();
+ }
+ else if (TARGET_FP && !(in_use & EV4_IB1))
+ {
+ in_use |= EV4_IB1;
+ nop = gen_fnop ();
+ }
+ else
+ nop = gen_unop ();
+
+ *pin_use = in_use;
+ return nop;
+}
+
+static rtx
+alphaev5_next_nop (int *pin_use)
+{
+ int in_use = *pin_use;
+ rtx nop;
+
+ if (!(in_use & EV5_E1))
+ {
+ in_use |= EV5_E1;
+ nop = gen_nop ();
+ }
+ else if (TARGET_FP && !(in_use & EV5_FA))
+ {
+ in_use |= EV5_FA;
+ nop = gen_fnop ();
+ }
+ else if (TARGET_FP && !(in_use & EV5_FM))
+ {
+ in_use |= EV5_FM;
+ nop = gen_fnop ();
+ }
+ else
+ nop = gen_unop ();
+
+ *pin_use = in_use;
+ return nop;
+}
+
+/* The instruction group alignment main loop. */
+
+static void
+alpha_align_insns (unsigned int max_align,
+ rtx (*next_group) (rtx, int *, int *),
+ rtx (*next_nop) (int *))
+{
+ /* ALIGN is the known alignment for the insn group. */
+ unsigned int align;
+ /* OFS is the offset of the current insn in the insn group. */
+ int ofs;
+ int prev_in_use, in_use, len, ldgp;
+ rtx i, next;
+
+ /* Let shorten branches care for assigning alignments to code labels. */
+ shorten_branches (get_insns ());
+
+ if (align_functions < 4)
+ align = 4;
+ else if ((unsigned int) align_functions < max_align)
+ align = align_functions;
+ else
+ align = max_align;
+
+ ofs = prev_in_use = 0;
+ i = get_insns ();
+ if (GET_CODE (i) == NOTE)
+ i = next_nonnote_insn (i);
+
+ ldgp = alpha_function_needs_gp ? 8 : 0;
+
+ while (i)
+ {
+ next = (*next_group) (i, &in_use, &len);
+
+ /* When we see a label, resync alignment etc. */
+ if (GET_CODE (i) == CODE_LABEL)
+ {
+ unsigned int new_align = 1 << label_to_alignment (i);
+
+ if (new_align >= align)
+ {
+ align = new_align < max_align ? new_align : max_align;
+ ofs = 0;
+ }
+
+ else if (ofs & (new_align-1))
+ ofs = (ofs | (new_align-1)) + 1;
+ gcc_assert (!len);
+ }
+
+ /* Handle complex instructions special. */
+ else if (in_use == 0)
+ {
+ /* Asms will have length < 0. This is a signal that we have
+ lost alignment knowledge. Assume, however, that the asm
+ will not mis-align instructions. */
+ if (len < 0)
+ {
+ ofs = 0;
+ align = 4;
+ len = 0;
+ }
+ }
+
+ /* If the known alignment is smaller than the recognized insn group,
+ realign the output. */
+ else if ((int) align < len)
+ {
+ unsigned int new_log_align = len > 8 ? 4 : 3;
+ rtx prev, where;
+
+ where = prev = prev_nonnote_insn (i);
+ if (!where || GET_CODE (where) != CODE_LABEL)
+ where = i;
+
+ /* Can't realign between a call and its gp reload. */
+ if (! (TARGET_EXPLICIT_RELOCS
+ && prev && GET_CODE (prev) == CALL_INSN))
+ {
+ emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
+ align = 1 << new_log_align;
+ ofs = 0;
+ }
+ }
+
+ /* We may not insert padding inside the initial ldgp sequence. */
+ else if (ldgp > 0)
+ ldgp -= len;
+
+ /* If the group won't fit in the same INT16 as the previous,
+ we need to add padding to keep the group together. Rather
+ than simply leaving the insn filling to the assembler, we
+ can make use of the knowledge of what sorts of instructions
+ were issued in the previous group to make sure that all of
+ the added nops are really free. */
+ else if (ofs + len > (int) align)
+ {
+ int nop_count = (align - ofs) / 4;
+ rtx where;
+
+ /* Insert nops before labels, branches, and calls to truly merge
+ the execution of the nops with the previous instruction group. */
+ where = prev_nonnote_insn (i);
+ if (where)
+ {
+ if (GET_CODE (where) == CODE_LABEL)
+ {
+ rtx where2 = prev_nonnote_insn (where);
+ if (where2 && GET_CODE (where2) == JUMP_INSN)
+ where = where2;
+ }
+ else if (GET_CODE (where) == INSN)
+ where = i;
+ }
+ else
+ where = i;
+
+ do
+ emit_insn_before ((*next_nop)(&prev_in_use), where);
+ while (--nop_count);
+ ofs = 0;
+ }
+
+ ofs = (ofs + len) & (align - 1);
+ prev_in_use = in_use;
+ i = next;
+ }
+}
+
+/* Machine dependent reorg pass. */
+
+static void
+alpha_reorg (void)
+{
+ if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
+ alpha_handle_trap_shadows ();
+
+ /* Due to the number of extra trapb insns, don't bother fixing up
+ alignment when trap precision is instruction. Moreover, we can
+ only do our job when sched2 is run. */
+ if (optimize && !optimize_size
+ && alpha_tp != ALPHA_TP_INSN
+ && flag_schedule_insns_after_reload)
+ {
+ if (alpha_tune == PROCESSOR_EV4)
+ alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
+ else if (alpha_tune == PROCESSOR_EV5)
+ alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
+ }
+}
+
+#if !TARGET_ABI_UNICOSMK
+
+#ifdef HAVE_STAMP_H
+#include <stamp.h>
+#endif
+
+static void
+alpha_file_start (void)
+{
+#ifdef OBJECT_FORMAT_ELF
+ /* If emitting dwarf2 debug information, we cannot generate a .file
+ directive to start the file, as it will conflict with dwarf2out
+ file numbers. So it's only useful when emitting mdebug output. */
+ targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
+#endif
+
+ default_file_start ();
+#ifdef MS_STAMP
+ fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
+#endif
+
+ fputs ("\t.set noreorder\n", asm_out_file);
+ fputs ("\t.set volatile\n", asm_out_file);
+ if (!TARGET_ABI_OPEN_VMS)
+ fputs ("\t.set noat\n", asm_out_file);
+ if (TARGET_EXPLICIT_RELOCS)
+ fputs ("\t.set nomacro\n", asm_out_file);
+ if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
+ {
+ const char *arch;
+
+ if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
+ arch = "ev6";
+ else if (TARGET_MAX)
+ arch = "pca56";
+ else if (TARGET_BWX)
+ arch = "ev56";
+ else if (alpha_cpu == PROCESSOR_EV5)
+ arch = "ev5";
+ else
+ arch = "ev4";
+
+ fprintf (asm_out_file, "\t.arch %s\n", arch);
+ }
+}
+#endif
+
+#ifdef OBJECT_FORMAT_ELF
+/* Since we don't have a .dynbss section, we should not allow global
+ relocations in the .rodata section. */
+
+static int
+alpha_elf_reloc_rw_mask (void)
+{
+ return flag_pic ? 3 : 2;
+}
+
+/* Return a section for X. The only special thing we do here is to
+ honor small data. */
+
+static section *
+alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align)
+{
+ if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
+ /* ??? Consider using mergeable sdata sections. */
+ return sdata_section;
+ else
+ return default_elf_select_rtx_section (mode, x, align);
+}
+
+static unsigned int
+alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
+{
+ unsigned int flags = 0;
+
+ if (strcmp (name, ".sdata") == 0
+ || strncmp (name, ".sdata.", 7) == 0
+ || strncmp (name, ".gnu.linkonce.s.", 16) == 0
+ || strcmp (name, ".sbss") == 0
+ || strncmp (name, ".sbss.", 6) == 0
+ || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
+ flags = SECTION_SMALL;
+
+ flags |= default_section_type_flags (decl, name, reloc);
+ return flags;
+}
+#endif /* OBJECT_FORMAT_ELF */
+
+/* Structure to collect function names for final output in link section. */
+/* Note that items marked with GTY can't be ifdef'ed out. */
+
+enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
+enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
+
+struct alpha_links GTY(())
+{
+ int num;
+ rtx linkage;
+ enum links_kind lkind;
+ enum reloc_kind rkind;
+};
+
+struct alpha_funcs GTY(())
+{
+ int num;
+ splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
+ links;
+};
+
+static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
+ splay_tree alpha_links_tree;
+static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
+ splay_tree alpha_funcs_tree;
+
+static GTY(()) int alpha_funcs_num;
+
+#if TARGET_ABI_OPEN_VMS
+
+/* Return the VMS argument type corresponding to MODE. */
+
+enum avms_arg_type
+alpha_arg_type (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case SFmode:
+ return TARGET_FLOAT_VAX ? FF : FS;
+ case DFmode:
+ return TARGET_FLOAT_VAX ? FD : FT;
+ default:
+ return I64;
+ }
+}
+
+/* Return an rtx for an integer representing the VMS Argument Information
+ register value. */
+
+rtx
+alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
+{
+ unsigned HOST_WIDE_INT regval = cum.num_args;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
+
+ return GEN_INT (regval);
+}
+
+/* Make (or fake) .linkage entry for function call.
+
+ IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
+
+ Return an SYMBOL_REF rtx for the linkage. */
+
+rtx
+alpha_need_linkage (const char *name, int is_local)
+{
+ splay_tree_node node;
+ struct alpha_links *al;
+
+ if (name[0] == '*')
+ name++;
+
+ if (is_local)
+ {
+ struct alpha_funcs *cfaf;
+
+ if (!alpha_funcs_tree)
+ alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
+ splay_tree_compare_pointers);
+
+ cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
+
+ cfaf->links = 0;
+ cfaf->num = ++alpha_funcs_num;
+
+ splay_tree_insert (alpha_funcs_tree,
+ (splay_tree_key) current_function_decl,
+ (splay_tree_value) cfaf);
+ }
+
+ if (alpha_links_tree)
+ {
+ /* Is this name already defined? */
+
+ node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
+ if (node)
+ {
+ al = (struct alpha_links *) node->value;
+ if (is_local)
+ {
+ /* Defined here but external assumed. */
+ if (al->lkind == KIND_EXTERN)
+ al->lkind = KIND_LOCAL;
+ }
+ else
+ {
+ /* Used here but unused assumed. */
+ if (al->lkind == KIND_UNUSED)
+ al->lkind = KIND_LOCAL;
+ }
+ return al->linkage;
+ }
+ }
+ else
+ alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
+
+ al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
+ name = ggc_strdup (name);
+
+ /* Assume external if no definition. */
+ al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
+
+ /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
+ get_identifier (name);
+
+ /* Construct a SYMBOL_REF for us to call. */
+ {
+ size_t name_len = strlen (name);
+ char *linksym = alloca (name_len + 6);
+ linksym[0] = '$';
+ memcpy (linksym + 1, name, name_len);
+ memcpy (linksym + 1 + name_len, "..lk", 5);
+ al->linkage = gen_rtx_SYMBOL_REF (Pmode,
+ ggc_alloc_string (linksym, name_len + 5));
+ }
+
+ splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
+ (splay_tree_value) al);
+
+ return al->linkage;
+}
+
+rtx
+alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
+{
+ splay_tree_node cfunnode;
+ struct alpha_funcs *cfaf;
+ struct alpha_links *al;
+ const char *name = XSTR (linkage, 0);
+
+ cfaf = (struct alpha_funcs *) 0;
+ al = (struct alpha_links *) 0;
+
+ cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
+ cfaf = (struct alpha_funcs *) cfunnode->value;
+
+ if (cfaf->links)
+ {
+ splay_tree_node lnode;
+
+ /* Is this name already defined? */
+
+ lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
+ if (lnode)
+ al = (struct alpha_links *) lnode->value;
+ }
+ else
+ cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
+
+ if (!al)
+ {
+ size_t name_len;
+ size_t buflen;
+ char buf [512];
+ char *linksym;
+ splay_tree_node node = 0;
+ struct alpha_links *anl;
+
+ if (name[0] == '*')
+ name++;
+
+ name_len = strlen (name);
+
+ al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
+ al->num = cfaf->num;
+
+ node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
+ if (node)
+ {
+ anl = (struct alpha_links *) node->value;
+ al->lkind = anl->lkind;
+ }
+
+ sprintf (buf, "$%d..%s..lk", cfaf->num, name);
+ buflen = strlen (buf);
+ linksym = alloca (buflen + 1);
+ memcpy (linksym, buf, buflen + 1);
+
+ al->linkage = gen_rtx_SYMBOL_REF
+ (Pmode, ggc_alloc_string (linksym, buflen + 1));
+
+ splay_tree_insert (cfaf->links, (splay_tree_key) name,
+ (splay_tree_value) al);
+ }
+
+ if (rflag)
+ al->rkind = KIND_CODEADDR;
+ else
+ al->rkind = KIND_LINKAGE;
+
+ if (lflag)
+ return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
+ else
+ return al->linkage;
+}
+
+static int
+alpha_write_one_linkage (splay_tree_node node, void *data)
+{
+ const char *const name = (const char *) node->key;
+ struct alpha_links *link = (struct alpha_links *) node->value;
+ FILE *stream = (FILE *) data;
+
+ fprintf (stream, "$%d..%s..lk:\n", link->num, name);
+ if (link->rkind == KIND_CODEADDR)
+ {
+ if (link->lkind == KIND_LOCAL)
+ {
+ /* Local and used */
+ fprintf (stream, "\t.quad %s..en\n", name);
+ }
+ else
+ {
+ /* External and used, request code address. */
+ fprintf (stream, "\t.code_address %s\n", name);
+ }
+ }
+ else
+ {
+ if (link->lkind == KIND_LOCAL)
+ {
+ /* Local and used, build linkage pair. */
+ fprintf (stream, "\t.quad %s..en\n", name);
+ fprintf (stream, "\t.quad %s\n", name);
+ }
+ else
+ {
+ /* External and used, request linkage pair. */
+ fprintf (stream, "\t.linkage %s\n", name);
+ }
+ }
+
+ return 0;
+}
+
+static void
+alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
+{
+ splay_tree_node node;
+ struct alpha_funcs *func;
+
+ fprintf (stream, "\t.link\n");
+ fprintf (stream, "\t.align 3\n");
+ in_section = NULL;
+
+ node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
+ func = (struct alpha_funcs *) node->value;
+
+ fputs ("\t.name ", stream);
+ assemble_name (stream, funname);
+ fputs ("..na\n", stream);
+ ASM_OUTPUT_LABEL (stream, funname);
+ fprintf (stream, "\t.pdesc ");
+ assemble_name (stream, funname);
+ fprintf (stream, "..en,%s\n",
+ alpha_procedure_type == PT_STACK ? "stack"
+ : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
+
+ if (func->links)
+ {
+ splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
+ /* splay_tree_delete (func->links); */
+ }
+}
+
+/* Given a decl, a section name, and whether the decl initializer
+ has relocs, choose attributes for the section. */
+
+#define SECTION_VMS_OVERLAY SECTION_FORGET
+#define SECTION_VMS_GLOBAL SECTION_MACH_DEP
+#define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
+
+static unsigned int
+vms_section_type_flags (tree decl, const char *name, int reloc)
+{
+ unsigned int flags = default_section_type_flags (decl, name, reloc);
+
+ if (decl && DECL_ATTRIBUTES (decl)
+ && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
+ flags |= SECTION_VMS_OVERLAY;
+ if (decl && DECL_ATTRIBUTES (decl)
+ && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
+ flags |= SECTION_VMS_GLOBAL;
+ if (decl && DECL_ATTRIBUTES (decl)
+ && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
+ flags |= SECTION_VMS_INITIALIZE;
+
+ return flags;
+}
+
+/* Switch to an arbitrary section NAME with attributes as specified
+ by FLAGS. ALIGN specifies any known alignment requirements for
+ the section; 0 if the default should be used. */
+
+static void
+vms_asm_named_section (const char *name, unsigned int flags,
+ tree decl ATTRIBUTE_UNUSED)
+{
+ fputc ('\n', asm_out_file);
+ fprintf (asm_out_file, ".section\t%s", name);
+
+ if (flags & SECTION_VMS_OVERLAY)
+ fprintf (asm_out_file, ",OVR");
+ if (flags & SECTION_VMS_GLOBAL)
+ fprintf (asm_out_file, ",GBL");
+ if (flags & SECTION_VMS_INITIALIZE)
+ fprintf (asm_out_file, ",NOMOD");
+ if (flags & SECTION_DEBUG)
+ fprintf (asm_out_file, ",NOWRT");
+
+ fputc ('\n', asm_out_file);
+}
+
+/* Record an element in the table of global constructors. SYMBOL is
+ a SYMBOL_REF of the function to be called; PRIORITY is a number
+ between 0 and MAX_INIT_PRIORITY.
+
+ Differs from default_ctors_section_asm_out_constructor in that the
+ width of the .ctors entry is always 64 bits, rather than the 32 bits
+ used by a normal pointer. */
+
+static void
+vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
+{
+ switch_to_section (ctors_section);
+ assemble_align (BITS_PER_WORD);
+ assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
+}
+
+static void
+vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
+{
+ switch_to_section (dtors_section);
+ assemble_align (BITS_PER_WORD);
+ assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
+}
+#else
+
+rtx
+alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
+ int is_local ATTRIBUTE_UNUSED)
+{
+ return NULL_RTX;
+}
+
+rtx
+alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
+ tree cfundecl ATTRIBUTE_UNUSED,
+ int lflag ATTRIBUTE_UNUSED,
+ int rflag ATTRIBUTE_UNUSED)
+{
+ return NULL_RTX;
+}
+
+#endif /* TARGET_ABI_OPEN_VMS */
+
+#if TARGET_ABI_UNICOSMK
+
+/* This evaluates to true if we do not know how to pass TYPE solely in
+ registers. This is the case for all arguments that do not fit in two
+ registers. */
+
+static bool
+unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
+{
+ if (type == NULL)
+ return false;
+
+ if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ return true;
+ if (TREE_ADDRESSABLE (type))
+ return true;
+
+ return ALPHA_ARG_SIZE (mode, type, 0) > 2;
+}
+
+/* Define the offset between two registers, one to be eliminated, and the
+ other its replacement, at the start of a routine. */
+
+int
+unicosmk_initial_elimination_offset (int from, int to)
+{
+ int fixed_size;
+
+ fixed_size = alpha_sa_size();
+ if (fixed_size != 0)
+ fixed_size += 48;
+
+ if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return -fixed_size;
+ else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return 0;
+ else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return (ALPHA_ROUND (current_function_outgoing_args_size)
+ + ALPHA_ROUND (get_frame_size()));
+ else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return (ALPHA_ROUND (fixed_size)
+ + ALPHA_ROUND (get_frame_size()
+ + current_function_outgoing_args_size));
+ else
+ gcc_unreachable ();
+}
+
+/* Output the module name for .ident and .end directives. We have to strip
+ directories and add make sure that the module name starts with a letter
+ or '$'. */
+
+static void
+unicosmk_output_module_name (FILE *file)
+{
+ const char *name = lbasename (main_input_filename);
+ unsigned len = strlen (name);
+ char *clean_name = alloca (len + 2);
+ char *ptr = clean_name;
+
+ /* CAM only accepts module names that start with a letter or '$'. We
+ prefix the module name with a '$' if necessary. */
+
+ if (!ISALPHA (*name))
+ *ptr++ = '$';
+ memcpy (ptr, name, len + 1);
+ clean_symbol_name (clean_name);
+ fputs (clean_name, file);
+}
+
+/* Output the definition of a common variable. */
+
+void
+unicosmk_output_common (FILE *file, const char *name, int size, int align)
+{
+ tree name_tree;
+ printf ("T3E__: common %s\n", name);
+
+ in_section = NULL;
+ fputs("\t.endp\n\n\t.psect ", file);
+ assemble_name(file, name);
+ fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
+ fprintf(file, "\t.byte\t0:%d\n", size);
+
+ /* Mark the symbol as defined in this module. */
+ name_tree = get_identifier (name);
+ TREE_ASM_WRITTEN (name_tree) = 1;
+}
+
+#define SECTION_PUBLIC SECTION_MACH_DEP
+#define SECTION_MAIN (SECTION_PUBLIC << 1)
+static int current_section_align;
+
+/* A get_unnamed_section callback for switching to the text section. */
+
+static void
+unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ static int count = 0;
+ fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
+}
+
+/* A get_unnamed_section callback for switching to the data section. */
+
+static void
+unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ static int count = 1;
+ fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS.
+
+ The Cray assembler is really weird with respect to sections. It has only
+ named sections and you can't reopen a section once it has been closed.
+ This means that we have to generate unique names whenever we want to
+ reenter the text or the data section. */
+
+static void
+unicosmk_init_sections (void)
+{
+ text_section = get_unnamed_section (SECTION_CODE,
+ unicosmk_output_text_section_asm_op,
+ NULL);
+ data_section = get_unnamed_section (SECTION_WRITE,
+ unicosmk_output_data_section_asm_op,
+ NULL);
+ readonly_data_section = data_section;
+}
+
+static unsigned int
+unicosmk_section_type_flags (tree decl, const char *name,
+ int reloc ATTRIBUTE_UNUSED)
+{
+ unsigned int flags = default_section_type_flags (decl, name, reloc);
+
+ if (!decl)
+ return flags;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
+ if (align_functions_log > current_section_align)
+ current_section_align = align_functions_log;
+
+ if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
+ flags |= SECTION_MAIN;
+ }
+ else
+ current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
+
+ if (TREE_PUBLIC (decl))
+ flags |= SECTION_PUBLIC;
+
+ return flags;
+}
+
+/* Generate a section name for decl and associate it with the
+ declaration. */
+
+static void
+unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
+{
+ const char *name;
+ int len;
+
+ gcc_assert (decl);
+
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ name = default_strip_name_encoding (name);
+ len = strlen (name);
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ char *string;
+
+ /* It is essential that we prefix the section name here because
+ otherwise the section names generated for constructors and
+ destructors confuse collect2. */
+
+ string = alloca (len + 6);
+ sprintf (string, "code@%s", name);
+ DECL_SECTION_NAME (decl) = build_string (len + 5, string);
+ }
+ else if (TREE_PUBLIC (decl))
+ DECL_SECTION_NAME (decl) = build_string (len, name);
+ else
+ {
+ char *string;
+
+ string = alloca (len + 6);
+ sprintf (string, "data@%s", name);
+ DECL_SECTION_NAME (decl) = build_string (len + 5, string);
+ }
+}
+
+/* Switch to an arbitrary section NAME with attributes as specified
+ by FLAGS. ALIGN specifies any known alignment requirements for
+ the section; 0 if the default should be used. */
+
+static void
+unicosmk_asm_named_section (const char *name, unsigned int flags,
+ tree decl ATTRIBUTE_UNUSED)
+{
+ const char *kind;
+
+ /* Close the previous section. */
+
+ fputs ("\t.endp\n\n", asm_out_file);
+
+ /* Find out what kind of section we are opening. */
+
+ if (flags & SECTION_MAIN)
+ fputs ("\t.start\tmain\n", asm_out_file);
+
+ if (flags & SECTION_CODE)
+ kind = "code";
+ else if (flags & SECTION_PUBLIC)
+ kind = "common";
+ else
+ kind = "data";
+
+ if (current_section_align != 0)
+ fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
+ current_section_align, kind);
+ else
+ fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
+}
+
+static void
+unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
+{
+ if (DECL_P (decl)
+ && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
+ unicosmk_unique_section (decl, 0);
+}
+
+/* Output an alignment directive. We have to use the macro 'gcc@code@align'
+ in code sections because .align fill unused space with zeroes. */
+
+void
+unicosmk_output_align (FILE *file, int align)
+{
+ if (inside_function)
+ fprintf (file, "\tgcc@code@align\t%d\n", align);
+ else
+ fprintf (file, "\t.align\t%d\n", align);
+}
+
+/* Add a case vector to the current function's list of deferred case
+ vectors. Case vectors have to be put into a separate section because CAM
+ does not allow data definitions in code sections. */
+
+void
+unicosmk_defer_case_vector (rtx lab, rtx vec)
+{
+ struct machine_function *machine = cfun->machine;
+
+ vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
+ machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
+ machine->addr_list);
+}
+
+/* Output a case vector. */
+
+static void
+unicosmk_output_addr_vec (FILE *file, rtx vec)
+{
+ rtx lab = XEXP (vec, 0);
+ rtx body = XEXP (vec, 1);
+ int vlen = XVECLEN (body, 0);
+ int idx;
+
+ (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
+
+ for (idx = 0; idx < vlen; idx++)
+ {
+ ASM_OUTPUT_ADDR_VEC_ELT
+ (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
+ }
+}
+
+/* Output current function's deferred case vectors. */
+
+static void
+unicosmk_output_deferred_case_vectors (FILE *file)
+{
+ struct machine_function *machine = cfun->machine;
+ rtx t;
+
+ if (machine->addr_list == NULL_RTX)
+ return;
+
+ switch_to_section (data_section);
+ for (t = machine->addr_list; t; t = XEXP (t, 1))
+ unicosmk_output_addr_vec (file, XEXP (t, 0));
+}
+
+/* Generate the name of the SSIB section for the current function. */
+
+#define SSIB_PREFIX "__SSIB_"
+#define SSIB_PREFIX_LEN 7
+
+static const char *
+unicosmk_ssib_name (void)
+{
+ /* This is ok since CAM won't be able to deal with names longer than that
+ anyway. */
+
+ static char name[256];
+
+ rtx x;
+ const char *fnname;
+ int len;
+
+ x = DECL_RTL (cfun->decl);
+ gcc_assert (GET_CODE (x) == MEM);
+ x = XEXP (x, 0);
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+ fnname = XSTR (x, 0);
+
+ len = strlen (fnname);
+ if (len + SSIB_PREFIX_LEN > 255)
+ len = 255 - SSIB_PREFIX_LEN;
+
+ strcpy (name, SSIB_PREFIX);
+ strncpy (name + SSIB_PREFIX_LEN, fnname, len);
+ name[len + SSIB_PREFIX_LEN] = 0;
+
+ return name;
+}
+
+/* Set up the dynamic subprogram information block (DSIB) and update the
+ frame pointer register ($15) for subroutines which have a frame. If the
+ subroutine doesn't have a frame, simply increment $15. */
+
+static void
+unicosmk_gen_dsib (unsigned long *imaskP)
+{
+ if (alpha_procedure_type == PT_STACK)
+ {
+ const char *ssib_name;
+ rtx mem;
+
+ /* Allocate 64 bytes for the DSIB. */
+
+ FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-64))));
+ emit_insn (gen_blockage ());
+
+ /* Save the return address. */
+
+ mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
+ (*imaskP) &= ~(1UL << REG_RA);
+
+ /* Save the old frame pointer. */
+
+ mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
+ (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
+
+ emit_insn (gen_blockage ());
+
+ /* Store the SSIB pointer. */
+
+ ssib_name = ggc_strdup (unicosmk_ssib_name ());
+ mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+
+ FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
+ gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
+ FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
+
+ /* Save the CIW index. */
+
+ mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+ FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
+
+ emit_insn (gen_blockage ());
+
+ /* Set the new frame pointer. */
+
+ FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
+ stack_pointer_rtx, GEN_INT (64))));
+
+ }
+ else
+ {
+ /* Increment the frame pointer register to indicate that we do not
+ have a frame. */
+
+ FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx, const1_rtx)));
+ }
+}
+
+/* Output the static subroutine information block for the current
+ function. */
+
+static void
+unicosmk_output_ssib (FILE *file, const char *fnname)
+{
+ int len;
+ int i;
+ rtx x;
+ rtx ciw;
+ struct machine_function *machine = cfun->machine;
+
+ in_section = NULL;
+ fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
+ unicosmk_ssib_name ());
+
+ /* Some required stuff and the function name length. */
+
+ len = strlen (fnname);
+ fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
+
+ /* Saved registers
+ ??? We don't do that yet. */
+
+ fputs ("\t.quad\t0\n", file);
+
+ /* Function address. */
+
+ fputs ("\t.quad\t", file);
+ assemble_name (file, fnname);
+ putc ('\n', file);
+
+ fputs ("\t.quad\t0\n", file);
+ fputs ("\t.quad\t0\n", file);
+
+ /* Function name.
+ ??? We do it the same way Cray CC does it but this could be
+ simplified. */
+
+ for( i = 0; i < len; i++ )
+ fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
+ if( (len % 8) == 0 )
+ fputs ("\t.quad\t0\n", file);
+ else
+ fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
+
+ /* All call information words used in the function. */
+
+ for (x = machine->first_ciw; x; x = XEXP (x, 1))
+ {
+ ciw = XEXP (x, 0);
+#if HOST_BITS_PER_WIDE_INT == 32
+ fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
+ CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
+#else
+ fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
+#endif
+ }
+}
+
+/* Add a call information word (CIW) to the list of the current function's
+ CIWs and return its index.
+
+ X is a CONST_INT or CONST_DOUBLE representing the CIW. */
+
+rtx
+unicosmk_add_call_info_word (rtx x)
+{
+ rtx node;
+ struct machine_function *machine = cfun->machine;
+
+ node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
+ if (machine->first_ciw == NULL_RTX)
+ machine->first_ciw = node;
+ else
+ XEXP (machine->last_ciw, 1) = node;
+
+ machine->last_ciw = node;
+ ++machine->ciw_count;
+
+ return GEN_INT (machine->ciw_count
+ + strlen (current_function_name ())/8 + 5);
+}
+
+/* The Cray assembler doesn't accept extern declarations for symbols which
+ are defined in the same file. We have to keep track of all global
+ symbols which are referenced and/or defined in a source file and output
+ extern declarations for those which are referenced but not defined at
+ the end of file. */
+
+/* List of identifiers for which an extern declaration might have to be
+ emitted. */
+/* FIXME: needs to use GC, so it can be saved and restored for PCH. */
+
+struct unicosmk_extern_list
+{
+ struct unicosmk_extern_list *next;
+ const char *name;
+};
+
+static struct unicosmk_extern_list *unicosmk_extern_head = 0;
+
+/* Output extern declarations which are required for every asm file. */
+
+static void
+unicosmk_output_default_externs (FILE *file)
+{
+ static const char *const externs[] =
+ { "__T3E_MISMATCH" };
+
+ int i;
+ int n;
+
+ n = ARRAY_SIZE (externs);
+
+ for (i = 0; i < n; i++)
+ fprintf (file, "\t.extern\t%s\n", externs[i]);
+}
+
+/* Output extern declarations for global symbols which are have been
+ referenced but not defined. */
+
+static void
+unicosmk_output_externs (FILE *file)
+{
+ struct unicosmk_extern_list *p;
+ const char *real_name;
+ int len;
+ tree name_tree;
+
+ len = strlen (user_label_prefix);
+ for (p = unicosmk_extern_head; p != 0; p = p->next)
+ {
+ /* We have to strip the encoding and possibly remove user_label_prefix
+ from the identifier in order to handle -fleading-underscore and
+ explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
+ real_name = default_strip_name_encoding (p->name);
+ if (len && p->name[0] == '*'
+ && !memcmp (real_name, user_label_prefix, len))
+ real_name += len;
+
+ name_tree = get_identifier (real_name);
+ if (! TREE_ASM_WRITTEN (name_tree))
+ {
+ TREE_ASM_WRITTEN (name_tree) = 1;
+ fputs ("\t.extern\t", file);
+ assemble_name (file, p->name);
+ putc ('\n', file);
+ }
+ }
+}
+
+/* Record an extern. */
+
+void
+unicosmk_add_extern (const char *name)
+{
+ struct unicosmk_extern_list *p;
+
+ p = (struct unicosmk_extern_list *)
+ xmalloc (sizeof (struct unicosmk_extern_list));
+ p->next = unicosmk_extern_head;
+ p->name = name;
+ unicosmk_extern_head = p;
+}
+
+/* The Cray assembler generates incorrect code if identifiers which
+ conflict with register names are used as instruction operands. We have
+ to replace such identifiers with DEX expressions. */
+
+/* Structure to collect identifiers which have been replaced by DEX
+ expressions. */
+/* FIXME: needs to use GC, so it can be saved and restored for PCH. */
+
+struct unicosmk_dex {
+ struct unicosmk_dex *next;
+ const char *name;
+};
+
+/* List of identifiers which have been replaced by DEX expressions. The DEX
+ number is determined by the position in the list. */
+
+static struct unicosmk_dex *unicosmk_dex_list = NULL;
+
+/* The number of elements in the DEX list. */
+
+static int unicosmk_dex_count = 0;
+
+/* Check if NAME must be replaced by a DEX expression. */
+
+static int
+unicosmk_special_name (const char *name)
+{
+ if (name[0] == '*')
+ ++name;
+
+ if (name[0] == '$')
+ ++name;
+
+ if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
+ return 0;
+
+ switch (name[1])
+ {
+ case '1': case '2':
+ return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
+
+ case '3':
+ return (name[2] == '\0'
+ || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
+
+ default:
+ return (ISDIGIT (name[1]) && name[2] == '\0');
+ }
+}
+
+/* Return the DEX number if X must be replaced by a DEX expression and 0
+ otherwise. */
+
+static int
+unicosmk_need_dex (rtx x)
+{
+ struct unicosmk_dex *dex;
+ const char *name;
+ int i;
+
+ if (GET_CODE (x) != SYMBOL_REF)
+ return 0;
+
+ name = XSTR (x,0);
+ if (! unicosmk_special_name (name))
+ return 0;
+
+ i = unicosmk_dex_count;
+ for (dex = unicosmk_dex_list; dex; dex = dex->next)
+ {
+ if (! strcmp (name, dex->name))
+ return i;
+ --i;
+ }
+
+ dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
+ dex->name = name;
+ dex->next = unicosmk_dex_list;
+ unicosmk_dex_list = dex;
+
+ ++unicosmk_dex_count;
+ return unicosmk_dex_count;
+}
+
+/* Output the DEX definitions for this file. */
+
+static void
+unicosmk_output_dex (FILE *file)
+{
+ struct unicosmk_dex *dex;
+ int i;
+
+ if (unicosmk_dex_list == NULL)
+ return;
+
+ fprintf (file, "\t.dexstart\n");
+
+ i = unicosmk_dex_count;
+ for (dex = unicosmk_dex_list; dex; dex = dex->next)
+ {
+ fprintf (file, "\tDEX (%d) = ", i);
+ assemble_name (file, dex->name);
+ putc ('\n', file);
+ --i;
+ }
+
+ fprintf (file, "\t.dexend\n");
+}
+
+/* Output text that to appear at the beginning of an assembler file. */
+
+static void
+unicosmk_file_start (void)
+{
+ int i;
+
+ fputs ("\t.ident\t", asm_out_file);
+ unicosmk_output_module_name (asm_out_file);
+ fputs ("\n\n", asm_out_file);
+
+ /* The Unicos/Mk assembler uses different register names. Instead of trying
+ to support them, we simply use micro definitions. */
+
+ /* CAM has different register names: rN for the integer register N and fN
+ for the floating-point register N. Instead of trying to use these in
+ alpha.md, we define the symbols $N and $fN to refer to the appropriate
+ register. */
+
+ for (i = 0; i < 32; ++i)
+ fprintf (asm_out_file, "$%d <- r%d\n", i, i);
+
+ for (i = 0; i < 32; ++i)
+ fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
+
+ putc ('\n', asm_out_file);
+
+ /* The .align directive fill unused space with zeroes which does not work
+ in code sections. We define the macro 'gcc@code@align' which uses nops
+ instead. Note that it assumes that code sections always have the
+ biggest possible alignment since . refers to the current offset from
+ the beginning of the section. */
+
+ fputs ("\t.macro gcc@code@align n\n", asm_out_file);
+ fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
+ fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
+ fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
+ fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
+ fputs ("\tbis r31,r31,r31\n", asm_out_file);
+ fputs ("\t.endr\n", asm_out_file);
+ fputs ("\t.endif\n", asm_out_file);
+ fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
+
+ /* Output extern declarations which should always be visible. */
+ unicosmk_output_default_externs (asm_out_file);
+
+ /* Open a dummy section. We always need to be inside a section for the
+ section-switching code to work correctly.
+ ??? This should be a module id or something like that. I still have to
+ figure out what the rules for those are. */
+ fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
+}
+
+/* Output text to appear at the end of an assembler file. This includes all
+ pending extern declarations and DEX expressions. */
+
+static void
+unicosmk_file_end (void)
+{
+ fputs ("\t.endp\n\n", asm_out_file);
+
+ /* Output all pending externs. */
+
+ unicosmk_output_externs (asm_out_file);
+
+ /* Output dex definitions used for functions whose names conflict with
+ register names. */
+
+ unicosmk_output_dex (asm_out_file);
+
+ fputs ("\t.end\t", asm_out_file);
+ unicosmk_output_module_name (asm_out_file);
+ putc ('\n', asm_out_file);
+}
+
+#else
+
+static void
+unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
+{}
+
+static void
+unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
+{}
+
+static void
+unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
+ const char * fnname ATTRIBUTE_UNUSED)
+{}
+
+rtx
+unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
+{
+ return NULL_RTX;
+}
+
+static int
+unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+#endif /* TARGET_ABI_UNICOSMK */
+
+static void
+alpha_init_libfuncs (void)
+{
+ if (TARGET_ABI_UNICOSMK)
+ {
+ /* Prevent gcc from generating calls to __divsi3. */
+ set_optab_libfunc (sdiv_optab, SImode, 0);
+ set_optab_libfunc (udiv_optab, SImode, 0);
+
+ /* Use the functions provided by the system library
+ for DImode integer division. */
+ set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
+ set_optab_libfunc (udiv_optab, DImode, "$uldiv");
+ }
+ else if (TARGET_ABI_OPEN_VMS)
+ {
+ /* Use the VMS runtime library functions for division and
+ remainder. */
+ set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
+ set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
+ set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
+ set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
+ set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
+ set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
+ set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
+ set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
+ }
+}
+
+
+/* Initialize the GCC target structure. */
+#if TARGET_ABI_OPEN_VMS
+# undef TARGET_ATTRIBUTE_TABLE
+# define TARGET_ATTRIBUTE_TABLE vms_attribute_table
+# undef TARGET_SECTION_TYPE_FLAGS
+# define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
+#endif
+
+#undef TARGET_IN_SMALL_DATA_P
+#define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
+
+#if TARGET_ABI_UNICOSMK
+# undef TARGET_INSERT_ATTRIBUTES
+# define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
+# undef TARGET_SECTION_TYPE_FLAGS
+# define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
+# undef TARGET_ASM_UNIQUE_SECTION
+# define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
+#undef TARGET_ASM_FUNCTION_RODATA_SECTION
+#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
+# undef TARGET_ASM_GLOBALIZE_LABEL
+# define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
+# undef TARGET_MUST_PASS_IN_STACK
+# define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
+#endif
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
+
+/* Default unaligned ops are provided for ELF systems. To get unaligned
+ data for non-ELF systems, we have to turn off auto alignment. */
+#ifndef OBJECT_FORMAT_ELF
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
+#endif
+
+#ifdef OBJECT_FORMAT_ELF
+#undef TARGET_ASM_RELOC_RW_MASK
+#define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
+#undef TARGET_SECTION_TYPE_FLAGS
+#define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
+#endif
+
+#undef TARGET_ASM_FUNCTION_END_PROLOGUE
+#define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
+
+#if TARGET_ABI_UNICOSMK
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START unicosmk_file_start
+#undef TARGET_ASM_FILE_END
+#define TARGET_ASM_FILE_END unicosmk_file_end
+#else
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START alpha_file_start
+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+#endif
+
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
+ alpha_multipass_dfa_lookahead
+
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS HAVE_AS_TLS
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS alpha_init_builtins
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN alpha_expand_builtin
+#undef TARGET_FOLD_BUILTIN
+#define TARGET_FOLD_BUILTIN alpha_fold_builtin
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
+#undef TARGET_CANNOT_COPY_INSN_P
+#define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
+
+#if TARGET_ABI_OSF
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
+#undef TARGET_STDARG_OPTIMIZE_HOOK
+#define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
+#endif
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS alpha_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST hook_int_rtx_0
+
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
+
+#undef TARGET_PROMOTE_FUNCTION_ARGS
+#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
+#undef TARGET_PROMOTE_FUNCTION_RETURN
+#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
+#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
+#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
+#undef TARGET_SPLIT_COMPLEX_ARG
+#define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
+
+/* The Alpha architecture does not require sequential consistency. See
+ http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
+ for an example of how it can be violated in practice. */
+#undef TARGET_RELAXED_ORDERING
+#define TARGET_RELAXED_ORDERING true
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS \
+ (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION alpha_handle_option
+
+#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
+#undef TARGET_MANGLE_FUNDAMENTAL_TYPE
+#define TARGET_MANGLE_FUNDAMENTAL_TYPE alpha_mangle_fundamental_type
+#endif
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+
+#include "gt-alpha.h"
diff --git a/gcc-4.2.1/gcc/config/alpha/alpha.h b/gcc-4.2.1/gcc/config/alpha/alpha.h
new file mode 100644
index 000000000..ed02c9f3e
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/alpha.h
@@ -0,0 +1,1593 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha.
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Target CPU builtins. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__alpha"); \
+ builtin_define ("__alpha__"); \
+ builtin_assert ("cpu=alpha"); \
+ builtin_assert ("machine=alpha"); \
+ if (TARGET_CIX) \
+ { \
+ builtin_define ("__alpha_cix__"); \
+ builtin_assert ("cpu=cix"); \
+ } \
+ if (TARGET_FIX) \
+ { \
+ builtin_define ("__alpha_fix__"); \
+ builtin_assert ("cpu=fix"); \
+ } \
+ if (TARGET_BWX) \
+ { \
+ builtin_define ("__alpha_bwx__"); \
+ builtin_assert ("cpu=bwx"); \
+ } \
+ if (TARGET_MAX) \
+ { \
+ builtin_define ("__alpha_max__"); \
+ builtin_assert ("cpu=max"); \
+ } \
+ if (alpha_cpu == PROCESSOR_EV6) \
+ { \
+ builtin_define ("__alpha_ev6__"); \
+ builtin_assert ("cpu=ev6"); \
+ } \
+ else if (alpha_cpu == PROCESSOR_EV5) \
+ { \
+ builtin_define ("__alpha_ev5__"); \
+ builtin_assert ("cpu=ev5"); \
+ } \
+ else /* Presumably ev4. */ \
+ { \
+ builtin_define ("__alpha_ev4__"); \
+ builtin_assert ("cpu=ev4"); \
+ } \
+ if (TARGET_IEEE || TARGET_IEEE_WITH_INEXACT) \
+ builtin_define ("_IEEE_FP"); \
+ if (TARGET_IEEE_WITH_INEXACT) \
+ builtin_define ("_IEEE_FP_INEXACT"); \
+ if (TARGET_LONG_DOUBLE_128) \
+ builtin_define ("__LONG_DOUBLE_128__"); \
+ \
+ /* Macros dependent on the C dialect. */ \
+ SUBTARGET_LANGUAGE_CPP_BUILTINS(); \
+} while (0)
+
+#ifndef SUBTARGET_LANGUAGE_CPP_BUILTINS
+#define SUBTARGET_LANGUAGE_CPP_BUILTINS() \
+ do \
+ { \
+ if (preprocessing_asm_p ()) \
+ builtin_define_std ("LANGUAGE_ASSEMBLY"); \
+ else if (c_dialect_cxx ()) \
+ { \
+ builtin_define ("__LANGUAGE_C_PLUS_PLUS"); \
+ builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); \
+ } \
+ else \
+ builtin_define_std ("LANGUAGE_C"); \
+ if (c_dialect_objc ()) \
+ { \
+ builtin_define ("__LANGUAGE_OBJECTIVE_C"); \
+ builtin_define ("__LANGUAGE_OBJECTIVE_C__"); \
+ } \
+ } \
+ while (0)
+#endif
+
+#define CPP_SPEC "%(cpp_subtarget)"
+
+#ifndef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC ""
+#endif
+
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (!strcmp (STR, "rpath") || DEFAULT_WORD_SWITCH_TAKES_ARG(STR))
+
+/* Print subsidiary information on the compiler version in use. */
+#define TARGET_VERSION
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+/* Which processor to schedule for. The cpu attribute defines a list that
+ mirrors this list, so changes to alpha.md must be made at the same time. */
+
+enum processor_type
+{
+ PROCESSOR_EV4, /* 2106[46]{a,} */
+ PROCESSOR_EV5, /* 21164{a,pc,} */
+ PROCESSOR_EV6, /* 21264 */
+ PROCESSOR_MAX
+};
+
+extern enum processor_type alpha_cpu;
+extern enum processor_type alpha_tune;
+
+enum alpha_trap_precision
+{
+ ALPHA_TP_PROG, /* No precision (default). */
+ ALPHA_TP_FUNC, /* Trap contained within originating function. */
+ ALPHA_TP_INSN /* Instruction accuracy and code is resumption safe. */
+};
+
+enum alpha_fp_rounding_mode
+{
+ ALPHA_FPRM_NORM, /* Normal rounding mode. */
+ ALPHA_FPRM_MINF, /* Round towards minus-infinity. */
+ ALPHA_FPRM_CHOP, /* Chopped rounding mode (towards 0). */
+ ALPHA_FPRM_DYN /* Dynamic rounding mode. */
+};
+
+enum alpha_fp_trap_mode
+{
+ ALPHA_FPTM_N, /* Normal trap mode. */
+ ALPHA_FPTM_U, /* Underflow traps enabled. */
+ ALPHA_FPTM_SU, /* Software completion, w/underflow traps */
+ ALPHA_FPTM_SUI /* Software completion, w/underflow & inexact traps */
+};
+
+extern int target_flags;
+
+extern enum alpha_trap_precision alpha_tp;
+extern enum alpha_fp_rounding_mode alpha_fprm;
+extern enum alpha_fp_trap_mode alpha_fptm;
+
+/* Invert the easy way to make options work. */
+#define TARGET_FP (!TARGET_SOFT_FP)
+
+/* These are for target os support and cannot be changed at runtime. */
+#define TARGET_ABI_WINDOWS_NT 0
+#define TARGET_ABI_OPEN_VMS 0
+#define TARGET_ABI_UNICOSMK 0
+#define TARGET_ABI_OSF (!TARGET_ABI_WINDOWS_NT \
+ && !TARGET_ABI_OPEN_VMS \
+ && !TARGET_ABI_UNICOSMK)
+
+#ifndef TARGET_AS_CAN_SUBTRACT_LABELS
+#define TARGET_AS_CAN_SUBTRACT_LABELS TARGET_GAS
+#endif
+#ifndef TARGET_AS_SLASH_BEFORE_SUFFIX
+#define TARGET_AS_SLASH_BEFORE_SUFFIX TARGET_GAS
+#endif
+#ifndef TARGET_CAN_FAULT_IN_PROLOGUE
+#define TARGET_CAN_FAULT_IN_PROLOGUE 0
+#endif
+#ifndef TARGET_HAS_XFLOATING_LIBS
+#define TARGET_HAS_XFLOATING_LIBS TARGET_LONG_DOUBLE_128
+#endif
+#ifndef TARGET_PROFILING_NEEDS_GP
+#define TARGET_PROFILING_NEEDS_GP 0
+#endif
+#ifndef TARGET_LD_BUGGY_LDGP
+#define TARGET_LD_BUGGY_LDGP 0
+#endif
+#ifndef TARGET_FIXUP_EV5_PREFETCH
+#define TARGET_FIXUP_EV5_PREFETCH 0
+#endif
+#ifndef HAVE_AS_TLS
+#define HAVE_AS_TLS 0
+#endif
+
+#define TARGET_DEFAULT MASK_FPREGS
+
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT 0
+#endif
+
+#ifndef TARGET_DEFAULT_EXPLICIT_RELOCS
+#ifdef HAVE_AS_EXPLICIT_RELOCS
+#define TARGET_DEFAULT_EXPLICIT_RELOCS MASK_EXPLICIT_RELOCS
+#define TARGET_SUPPORT_ARCH 1
+#else
+#define TARGET_DEFAULT_EXPLICIT_RELOCS 0
+#endif
+#endif
+
+#ifndef TARGET_SUPPORT_ARCH
+#define TARGET_SUPPORT_ARCH 0
+#endif
+
+/* Support for a compile-time default CPU, et cetera. The rules are:
+ --with-cpu is ignored if -mcpu is specified.
+ --with-tune is ignored if -mtune is specified. */
+#define OPTION_DEFAULT_SPECS \
+ {"cpu", "%{!mcpu=*:-mcpu=%(VALUE)}" }, \
+ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }
+
+/* This macro defines names of additional specifications to put in the
+ specs that can be used in various specifications like CC1_SPEC. Its
+ definition is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GCC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#define EXTRA_SPECS \
+ { "cpp_subtarget", CPP_SUBTARGET_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ On the Alpha, it is used to translate target-option strings into
+ numeric values. */
+
+#define OVERRIDE_OPTIONS override_options ()
+
+
+/* Define this macro to change register usage conditional on target flags.
+
+ On the Alpha, we use this to disable the floating-point registers when
+ they don't exist. */
+
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ int i; \
+ if (! TARGET_FPREGS) \
+ for (i = 32; i < 63; i++) \
+ fixed_regs[i] = call_used_regs[i] = 1; \
+}
+
+
+/* Show we can debug even without a frame pointer. */
+#define CAN_DEBUG_WITHOUT_FP
+
+/* target machine storage layout */
+
+/* Define the size of `int'. The default is the same as the word size. */
+#define INT_TYPE_SIZE 32
+
+/* Define the size of `long long'. The default is the twice the word size. */
+#define LONG_LONG_TYPE_SIZE 64
+
+/* We're IEEE unless someone says to use VAX. */
+#define TARGET_FLOAT_FORMAT \
+ (TARGET_FLOAT_VAX ? VAX_FLOAT_FORMAT : IEEE_FLOAT_FORMAT)
+
+/* The two floating-point formats we support are S-floating, which is
+ 4 bytes, and T-floating, which is 8 bytes. `float' is S and `double'
+ and `long double' are T. */
+
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE (TARGET_LONG_DOUBLE_128 ? 128 : 64)
+
+/* Define this to set long double type size to use in libgcc2.c, which can
+ not depend on target_flags. */
+#ifdef __LONG_DOUBLE_128__
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 128
+#else
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+#endif
+
+/* Work around target_flags dependency in ada/targtyps.c. */
+#define WIDEST_HARDWARE_FP_SIZE 64
+
+#define WCHAR_TYPE "unsigned int"
+#define WCHAR_TYPE_SIZE 32
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type.
+
+ For Alpha, we always store objects in a full register. 32-bit integers
+ are always sign-extended, but smaller objects retain their signedness.
+
+ Note that small vector types can get mapped onto integer modes at the
+ whim of not appearing in alpha-modes.def. We never promoted these
+ values before; don't do so now that we've trimmed the set of modes to
+ those actually implemented in the backend. */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && (TYPE == NULL || TREE_CODE (TYPE) != VECTOR_TYPE) \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ { \
+ if ((MODE) == SImode) \
+ (UNSIGNEDP) = 0; \
+ (MODE) = DImode; \
+ }
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields.
+
+ There are no such instructions on the Alpha, but the documentation
+ is little endian. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ This is false on the Alpha. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is lowest
+ numbered.
+
+ For Alpha we can decide arbitrarily since there are no machine instructions
+ for them. Might as well be consistent with bytes. */
+#define WORDS_BIG_ENDIAN 0
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 8
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 64
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 64
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 128
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 64
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bit-field declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 128
+
+/* For atomic access to objects, must have at least 32-bit alignment
+ unless the machine has byte operations. */
+#define MINIMUM_ATOMIC_ALIGNMENT ((unsigned int) (TARGET_BWX ? 8 : 32))
+
+/* Align all constants and variables to at least a word boundary so
+ we can pick up pieces of them faster. */
+/* ??? Only if block-move stuff knows about different source/destination
+ alignment. */
+#if 0
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) MAX ((ALIGN), BITS_PER_WORD)
+#define DATA_ALIGNMENT(EXP, ALIGN) MAX ((ALIGN), BITS_PER_WORD)
+#endif
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data.
+
+ Since we get an error message when we do one, call them invalid. */
+
+#define STRICT_ALIGNMENT 1
+
+/* Set this nonzero if unaligned move instructions are extremely slow.
+
+ On the Alpha, they trap. */
+
+#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ We define all 32 integer registers, even though $31 is always zero,
+ and all 32 floating-point registers, even though $f31 is also
+ always zero. We do not bother defining the FP status register and
+ there are no other registers.
+
+ Since $31 is always zero, we will use register number 31 as the
+ argument pointer. It will never appear in the generated code
+ because we will always be eliminating it in favor of the stack
+ pointer or hardware frame pointer.
+
+ Likewise, we use $f31 for the frame pointer, which will always
+ be eliminated in favor of the hardware frame pointer or the
+ stack pointer. */
+
+#define FIRST_PSEUDO_REGISTER 64
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+
+#define FIXED_REGISTERS \
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+#define CALL_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, \
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }
+
+/* List the order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS. */
+
+#define REG_ALLOC_ORDER { \
+ 1, 2, 3, 4, 5, 6, 7, 8, /* nonsaved integer registers */ \
+ 22, 23, 24, 25, 28, /* likewise */ \
+ 0, /* likewise, but return value */ \
+ 21, 20, 19, 18, 17, 16, /* likewise, but input args */ \
+ 27, /* likewise, but OSF procedure value */ \
+ \
+ 42, 43, 44, 45, 46, 47, /* nonsaved floating-point registers */ \
+ 54, 55, 56, 57, 58, 59, /* likewise */ \
+ 60, 61, 62, /* likewise */ \
+ 32, 33, /* likewise, but return values */ \
+ 53, 52, 51, 50, 49, 48, /* likewise, but input args */ \
+ \
+ 9, 10, 11, 12, 13, 14, /* saved integer registers */ \
+ 26, /* return address */ \
+ 15, /* hard frame pointer */ \
+ \
+ 34, 35, 36, 37, 38, 39, /* saved floating-point registers */ \
+ 40, 41, /* likewise */ \
+ \
+ 29, 30, 31, 63 /* gp, sp, ap, sfp */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers. */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ On Alpha, the integer registers can hold any mode. The floating-point
+ registers can hold 64-bit integers as well, but not smaller values. */
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((REGNO) >= 32 && (REGNO) <= 62 \
+ ? (MODE) == SFmode || (MODE) == DFmode || (MODE) == DImode \
+ || (MODE) == SCmode || (MODE) == DCmode \
+ : 1)
+
+/* A C expression that is nonzero if a value of mode
+ MODE1 is accessible in mode MODE2 without copying.
+
+ This asymmetric test is true when MODE1 could be put
+ in an FP register but MODE2 could not. */
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (HARD_REGNO_MODE_OK (32, (MODE1)) \
+ ? HARD_REGNO_MODE_OK (32, (MODE2)) \
+ : 1)
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Alpha pc isn't overloaded on a register that the compiler knows about. */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 30
+
+/* Base register for access to local variables of the function. */
+#define HARD_FRAME_POINTER_REGNUM 15
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED 0
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 31
+
+/* Base register for access to local variables of function. */
+#define FRAME_POINTER_REGNUM 63
+
+/* Register in which static-chain is passed to a function.
+
+ For the Alpha, this is based on an example; the calling sequence
+ doesn't seem to specify this. */
+#define STATIC_CHAIN_REGNUM 1
+
+/* The register number of the register used to address a table of
+ static data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM 29
+
+/* Define this macro if the register defined by `PIC_OFFSET_TABLE_REGNUM'
+ is clobbered by calls. */
+/* ??? It is and it isn't. It's required to be valid for a given
+ function when the function returns. It isn't clobbered by
+ current_file functions. Moreover, we do not expose the ldgp
+ until after reload, so we're probably safe. */
+/* #define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED */
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+enum reg_class {
+ NO_REGS, R0_REG, R24_REG, R25_REG, R27_REG,
+ GENERAL_REGS, FLOAT_REGS, ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+ {"NO_REGS", "R0_REG", "R24_REG", "R25_REG", "R27_REG", \
+ "GENERAL_REGS", "FLOAT_REGS", "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+{ {0x00000000, 0x00000000}, /* NO_REGS */ \
+ {0x00000001, 0x00000000}, /* R0_REG */ \
+ {0x01000000, 0x00000000}, /* R24_REG */ \
+ {0x02000000, 0x00000000}, /* R25_REG */ \
+ {0x08000000, 0x00000000}, /* R27_REG */ \
+ {0xffffffff, 0x80000000}, /* GENERAL_REGS */ \
+ {0x00000000, 0x7fffffff}, /* FLOAT_REGS */ \
+ {0xffffffff, 0xffffffff} }
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == 0 ? R0_REG \
+ : (REGNO) == 24 ? R24_REG \
+ : (REGNO) == 25 ? R25_REG \
+ : (REGNO) == 27 ? R27_REG \
+ : (REGNO) >= 32 && (REGNO) <= 62 ? FLOAT_REGS \
+ : GENERAL_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS NO_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description. */
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'a' ? R24_REG \
+ : (C) == 'b' ? R25_REG \
+ : (C) == 'c' ? R27_REG \
+ : (C) == 'f' ? FLOAT_REGS \
+ : (C) == 'v' ? R0_REG \
+ : NO_REGS)
+
+/* Define this macro to change register usage conditional on target flags. */
+/* #define CONDITIONAL_REGISTER_USAGE */
+
+/* The letters I, J, K, L, M, N, O, and P in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+
+ For Alpha:
+ `I' is used for the range of constants most insns can contain.
+ `J' is the constant zero.
+ `K' is used for the constant in an LDA insn.
+ `L' is used for the constant in a LDAH insn.
+ `M' is used for the constants that can be AND'ed with using a ZAP insn.
+ `N' is used for complemented 8-bit constants.
+ `O' is used for negated 8-bit constants.
+ `P' is used for the constants 1, 2 and 3. */
+
+#define CONST_OK_FOR_LETTER_P alpha_const_ok_for_letter_p
+
+/* Similar, but for floating or large integer constants, and defining letters
+ G and H. Here VALUE is the CONST_DOUBLE rtx itself.
+
+ For Alpha, `G' is the floating-point constant zero. `H' is a CONST_DOUBLE
+ that is the operand of a ZAP insn. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P alpha_const_double_ok_for_letter_p
+
+/* Optional extra constraints for this machine.
+
+ For the Alpha, `Q' means that this is a memory operand but not a
+ reference to an unaligned location.
+
+ `R' is a SYMBOL_REF that has SYMBOL_REF_FLAG set or is the current
+ function.
+
+ 'S' is a 6-bit constant (valid for a shift insn).
+
+ 'T' is a HIGH.
+
+ 'U' is a symbolic operand.
+
+ 'W' is a vector zero. */
+
+#define EXTRA_CONSTRAINT alpha_extra_constraint
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+
+#define PREFERRED_RELOAD_CLASS alpha_preferred_reload_class
+
+/* Loading and storing HImode or QImode values to and from memory
+ usually requires a scratch register. The exceptions are loading
+ QImode and HImode from an aligned address to a general register
+ unless byte instructions are permitted.
+ We also cannot load an unaligned address or a paradoxical SUBREG into an
+ FP register. */
+
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,IN) \
+ alpha_secondary_reload_class((CLASS), (MODE), (IN), 1)
+
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,OUT) \
+ alpha_secondary_reload_class((CLASS), (MODE), (OUT), 0)
+
+/* If we are copying between general and FP registers, we need a memory
+ location unless the FIX extension is available. */
+
+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
+ (! TARGET_FIX && (((CLASS1) == FLOAT_REGS && (CLASS2) != FLOAT_REGS) \
+ || ((CLASS2) == FLOAT_REGS && (CLASS1) != FLOAT_REGS)))
+
+/* Specify the mode to be used for memory when a secondary memory
+ location is needed. If MODE is floating-point, use it. Otherwise,
+ widen to a word like the default. This is needed because we always
+ store integers in FP registers in quadword format. This whole
+ area is very tricky! */
+#define SECONDARY_MEMORY_NEEDED_MODE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT ? (MODE) \
+ : GET_MODE_SIZE (MODE) >= 4 ? (MODE) \
+ : mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (MODE), 0))
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Return the class of registers that cannot change mode from FROM to TO. */
+
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
+ ? reg_classes_intersect_p (FLOAT_REGS, CLASS) : 0)
+
+/* Define the cost of moving between registers of various classes. Moving
+ between FLOAT_REGS and anything else except float regs is expensive.
+ In fact, we make it quite expensive because we really don't want to
+ do these moves unless it is clearly worth it. Optimizations may
+ reduce the impact of not being able to allocate a pseudo to a
+ hard register. */
+
+#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \
+ (((CLASS1) == FLOAT_REGS) == ((CLASS2) == FLOAT_REGS) ? 2 \
+ : TARGET_FIX ? ((CLASS1) == FLOAT_REGS ? 6 : 8) \
+ : 4+2*alpha_memory_latency)
+
+/* A C expressions returning the cost of moving data of MODE from a register to
+ or from memory.
+
+ On the Alpha, bump this up a bit. */
+
+extern int alpha_memory_latency;
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) (2*alpha_memory_latency)
+
+/* Provide the cost of a branch. Exact meaning under development. */
+#define BRANCH_COST 5
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this to nonzero if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+/* #define FRAME_GROWS_DOWNWARD 0 */
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On Alpha, don't define this because there are no push insns. */
+/* #define PUSH_ROUNDING(BYTES) */
+
+/* Define this to be nonzero if stack checking is built into the ABI. */
+#define STACK_CHECK_BUILTIN 1
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Offset of first parameter from the argument pointer register value. */
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Definitions for register eliminations.
+
+ We have two registers that can be eliminated on the Alpha. First, the
+ frame pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the argument pointer register can always be
+ eliminated; it is replaced with either the stack or frame pointer. */
+
+/* This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference. */
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are valid since the cases where FP can't be
+ eliminated are already handled. */
+
+#define CAN_ELIMINATE(FROM, TO) 1
+
+/* Round up to a multiple of 16 bytes. */
+#define ALPHA_ROUND(X) (((X) + 15) & ~ 15)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ ((OFFSET) = alpha_initial_elimination_offset(FROM, TO))
+
+/* Define this if stack space is still allocated for a parameter passed
+ in a register. */
+/* #define REG_PARM_STACK_SPACE */
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0.
+
+ On Alpha the value is found in $0 for integer functions and
+ $f0 for floating-point functions. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ function_value (VALTYPE, FUNC, VOIDmode)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) \
+ function_value (NULL, NULL, MODE)
+
+/* 1 if N is a possible register number for a function value
+ as seen by the caller. */
+
+#define FUNCTION_VALUE_REGNO_P(N) \
+ ((N) == 0 || (N) == 1 || (N) == 32 || (N) == 33)
+
+/* 1 if N is a possible register number for function argument passing.
+ On Alpha, these are $16-$21 and $f16-$f21. */
+
+#define FUNCTION_ARG_REGNO_P(N) \
+ (((N) >= 16 && (N) <= 21) || ((N) >= 16 + 32 && (N) <= 21 + 32))
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On Alpha, this is a single integer, which is a number of words
+ of arguments scanned so far.
+ Thus 6 or more means all following args should go on the stack. */
+
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ (CUM) = 0
+
+/* Define intermediate macro to compute the size (in registers) of an argument
+ for the Alpha. */
+
+#define ALPHA_ARG_SIZE(MODE, TYPE, NAMED) \
+ ((MODE) == TFmode || (MODE) == TCmode ? 1 \
+ : (((MODE) == BLKmode ? int_size_in_bytes (TYPE) : GET_MODE_SIZE (MODE)) \
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ ((CUM) += \
+ (targetm.calls.must_pass_in_stack (MODE, TYPE)) \
+ ? 6 : ALPHA_ARG_SIZE (MODE, TYPE, NAMED))
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On Alpha the first 6 words of args are normally in registers
+ and the rest are pushed. */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ function_arg((CUM), (MODE), (TYPE), (NAMED))
+
+/* Try to output insns to set TARGET equal to the constant C if it can be
+ done in less than N insns. Do all computations in MODE. Returns the place
+ where the output has been placed if it can be done and the insns have been
+ emitted. If it would take more than N insns, zero is returned and no
+ insns and emitted. */
+
+/* Define the information needed to generate branch and scc insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+struct alpha_compare
+{
+ struct rtx_def *op0, *op1;
+ int fp_p;
+};
+
+extern struct alpha_compare alpha_compare;
+
+/* Make (or fake) .linkage entry for function call.
+ IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */
+
+/* This macro defines the start of an assembly comment. */
+
+#define ASM_COMMENT_START " #"
+
+/* This macro produces the initial definition of a function. */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE,NAME,DECL) \
+ alpha_start_function(FILE,NAME,DECL);
+
+/* This macro closes up a function definition for the assembler. */
+
+#define ASM_DECLARE_FUNCTION_SIZE(FILE,NAME,DECL) \
+ alpha_end_function(FILE,NAME,DECL)
+
+/* Output any profiling code before the prologue. */
+
+#define PROFILE_BEFORE_PROLOGUE 1
+
+/* Never use profile counters. */
+
+#define NO_PROFILE_COUNTERS 1
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. Under OSF/1, profiling is enabled
+ by simply passing -pg to the assembler and linker. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+/* Define registers used by the epilogue and return instruction. */
+
+#define EPILOGUE_USES(REGNO) ((REGNO) == 26)
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ The trampoline should set the static chain pointer to value placed
+ into the trampoline and should branch to the specified routine.
+ Note that $27 has been set to the address of the trampoline, so we can
+ use it for addressability of the two data items. */
+
+#define TRAMPOLINE_TEMPLATE(FILE) \
+do { \
+ fprintf (FILE, "\tldq $1,24($27)\n"); \
+ fprintf (FILE, "\tldq $27,16($27)\n"); \
+ fprintf (FILE, "\tjmp $31,($27),0\n"); \
+ fprintf (FILE, "\tnop\n"); \
+ fprintf (FILE, "\t.quad 0,0\n"); \
+} while (0)
+
+/* Section in which to place the trampoline. On Alpha, instructions
+ may only be placed in a text segment. */
+
+#define TRAMPOLINE_SECTION text_section
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 32
+
+/* The alignment of a trampoline, in bits. */
+
+#define TRAMPOLINE_ALIGNMENT 64
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+ alpha_initialize_trampoline (TRAMP, FNADDR, CXT, 16, 24, 8)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame.
+ FRAMEADDR is the frame pointer of the COUNT frame, or the frame pointer of
+ the COUNT-1 frame if RETURN_ADDR_IN_PREVIOUS_FRAME is defined. */
+
+#define RETURN_ADDR_RTX alpha_return_addr
+
+/* Before the prologue, RA lives in $26. */
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, 26)
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (26)
+#define DWARF_ALT_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (64)
+#define DWARF_ZERO_REG 31
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 16 : INVALID_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 28)
+#define EH_RETURN_HANDLER_RTX \
+ gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, \
+ current_function_outgoing_args_size))
+
+/* Addressing modes, and classification of registers for them. */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < 32 \
+ || (REGNO) == 63 || reg_renumber[REGNO] == 63)
+
+/* Maximum number of registers that can appear in a valid memory address. */
+#define MAX_REGS_PER_ADDRESS 1
+
+/* Recognize any constant value that is a valid address. For the Alpha,
+ there are only constants none since we want to use LDA to load any
+ symbolic addresses into registers. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) (INTVAL (X) + 0x8000) < 0x10000)
+
+/* Include all constant integers and constant doubles, but not
+ floating-point, except for floating-point zero. */
+
+#define LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) 0
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define NONSTRICT_REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 32 || REGNO (X) == 63 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+/* ??? Nonzero if X is the frame pointer, or some virtual register
+ that may eliminate to the frame pointer. These will be allowed to
+ have offsets greater than 32K. This is done because register
+ elimination offsets will change the hi/lo split, and if we split
+ before reload, we will require additional instructions. */
+#define NONSTRICT_REG_OK_FP_BASE_P(X) \
+ (REGNO (X) == 31 || REGNO (X) == 63 \
+ || (REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ && REGNO (X) < LAST_VIRTUAL_REGISTER))
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define STRICT_REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#ifdef REG_OK_STRICT
+#define REG_OK_FOR_BASE_P(X) STRICT_REG_OK_FOR_BASE_P (X)
+#else
+#define REG_OK_FOR_BASE_P(X) NONSTRICT_REG_OK_FOR_BASE_P (X)
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a
+ valid memory address for an instruction. */
+
+#ifdef REG_OK_STRICT
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN) \
+do { \
+ if (alpha_legitimate_address_p (MODE, X, 1)) \
+ goto WIN; \
+} while (0)
+#else
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN) \
+do { \
+ if (alpha_legitimate_address_p (MODE, X, 0)) \
+ goto WIN; \
+} while (0)
+#endif
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
+do { \
+ rtx new_x = alpha_legitimize_address (X, NULL_RTX, MODE); \
+ if (new_x) \
+ { \
+ X = new_x; \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c. */
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_L,WIN) \
+do { \
+ rtx new_x = alpha_legitimize_reload_address (X, MODE, OPNUM, TYPE, IND_L); \
+ if (new_x) \
+ { \
+ X = new_x; \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ On the Alpha this is true only for the unaligned modes. We can
+ simplify this test since we know that the address must be valid. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ if (GET_CODE (ADDR) == AND) goto LABEL; }
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+
+ Do not define this if the table should contain absolute addresses.
+ On the Alpha, the table is really GP-relative, not relative to the PC
+ of the table, but we pretend that it is PC-relative; this should be OK,
+ but we should try to find some better way sometime. */
+#define CASE_VECTOR_PC_RELATIVE 1
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* Max number of bytes we can move to or from memory
+ in one reasonably fast instruction. */
+
+#define MOVE_MAX 8
+
+/* If a memory-to-memory move would take MOVE_RATIO or more simple
+ move-instruction pairs, we will do a movmem or libcall instead.
+
+ Without byte/word accesses, we want no more than four instructions;
+ with, several single byte accesses are better. */
+
+#define MOVE_RATIO (TARGET_BWX ? 7 : 2)
+
+/* Largest number of bytes of an object that can be placed in a register.
+ On the Alpha we have plenty of registers, so use TImode. */
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode)
+
+/* Nonzero if access to memory by bytes is no faster than for words.
+ Also nonzero if doing byte operations (specifically shifts) in registers
+ is undesirable.
+
+ On the Alpha, we want to not use the byte operation and instead use
+ masking operations to access fields; these will save instructions. */
+
+#define SLOW_BYTE_ACCESS 1
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, UNKNOWN if none. */
+#define LOAD_EXTEND_OP(MODE) ((MODE) == SImode ? SIGN_EXTEND : ZERO_EXTEND)
+
+/* Define if loading short immediate values into registers sign extends. */
+#define SHORT_IMMEDIATES_SIGN_EXTEND
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* The CIX ctlz and cttz instructions return 64 for zero. */
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 64, TARGET_CIX)
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 64, TARGET_CIX)
+
+/* Define the value returned by a floating-point comparison instruction. */
+
+#define FLOAT_STORE_FLAG_VALUE(MODE) \
+ REAL_VALUE_ATOF ((TARGET_FLOAT_VAX ? "0.5" : "2.0"), (MODE))
+
+/* Canonicalize a comparison from one we don't have to one we do have. */
+
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+ do { \
+ if (((CODE) == GE || (CODE) == GT || (CODE) == GEU || (CODE) == GTU) \
+ && (GET_CODE (OP1) == REG || (OP1) == const0_rtx)) \
+ { \
+ rtx tem = (OP0); \
+ (OP0) = (OP1); \
+ (OP1) = tem; \
+ (CODE) = swap_condition (CODE); \
+ } \
+ if (((CODE) == LT || (CODE) == LTU) \
+ && GET_CODE (OP1) == CONST_INT && INTVAL (OP1) == 256) \
+ { \
+ (CODE) = (CODE) == LT ? LE : LEU; \
+ (OP1) = GEN_INT (255); \
+ } \
+ } while (0)
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode DImode
+
+/* Mode of a function address in a call instruction (for indexing purposes). */
+
+#define FUNCTION_MODE Pmode
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on machines where ordinary constants are expensive
+ but a CALL with constant address is cheap.
+
+ We define this on the Alpha so that gen_call and gen_call_value
+ get to see the SYMBOL_REF (for the hint field of the jsr). It will
+ then copy it into a register, thus actually letting the address be
+ cse'ed. */
+
+#define NO_FUNCTION_CSE
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Control the assembler format that we output. */
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+#define ASM_APP_ON (TARGET_EXPLICIT_RELOCS ? "\t.set\tmacro\n" : "")
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+#define ASM_APP_OFF (TARGET_EXPLICIT_RELOCS ? "\t.set\tnomacro\n" : "")
+
+#define TEXT_SECTION_ASM_OP "\t.text"
+
+/* Output before read-only data. */
+
+#define READONLY_DATA_SECTION_ASM_OP "\t.rdata"
+
+/* Output before writable data. */
+
+#define DATA_SECTION_ASM_OP "\t.data"
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES \
+{"$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \
+ "$9", "$10", "$11", "$12", "$13", "$14", "$15", \
+ "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23", \
+ "$24", "$25", "$26", "$27", "$28", "$29", "$30", "AP", \
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", \
+ "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", \
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",\
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "FP"}
+
+/* Strip name encoding when emitting labels. */
+
+#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
+do { \
+ const char *name_ = NAME; \
+ if (*name_ == '@' || *name_ == '%') \
+ name_ += 2; \
+ if (*name_ == '*') \
+ name_++; \
+ else \
+ fputs (user_label_prefix, STREAM); \
+ fputs (name_, STREAM); \
+} while (0)
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.globl "
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#define USER_LABEL_PREFIX ""
+
+/* This is how to output a label for a jump table. Arguments are the same as
+ for (*targetm.asm_out.internal_label), except the insn for the jump table is
+ passed. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,TABLEINSN) \
+{ ASM_OUTPUT_ALIGN (FILE, 2); (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); }
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf ((LABEL), "*$%s%ld", (PREFIX), (long)(NUM))
+
+/* We use the default ASCII-output routine, except that we don't write more
+ than 50 characters since the assembler doesn't support very long lines. */
+
+#define ASM_OUTPUT_ASCII(MYFILE, MYSTRING, MYLENGTH) \
+ do { \
+ FILE *_hide_asm_out_file = (MYFILE); \
+ const unsigned char *_hide_p = (const unsigned char *) (MYSTRING); \
+ int _hide_thissize = (MYLENGTH); \
+ int _size_so_far = 0; \
+ { \
+ FILE *asm_out_file = _hide_asm_out_file; \
+ const unsigned char *p = _hide_p; \
+ int thissize = _hide_thissize; \
+ int i; \
+ fprintf (asm_out_file, "\t.ascii \""); \
+ \
+ for (i = 0; i < thissize; i++) \
+ { \
+ register int c = p[i]; \
+ \
+ if (_size_so_far ++ > 50 && i < thissize - 4) \
+ _size_so_far = 0, fprintf (asm_out_file, "\"\n\t.ascii \""); \
+ \
+ if (c == '\"' || c == '\\') \
+ putc ('\\', asm_out_file); \
+ if (c >= ' ' && c < 0177) \
+ putc (c, asm_out_file); \
+ else \
+ { \
+ fprintf (asm_out_file, "\\%o", c); \
+ /* After an octal-escape, if a digit follows, \
+ terminate one string constant and start another. \
+ The VAX assembler fails to stop reading the escape \
+ after three digits, so this is the only way we \
+ can get it to parse the data properly. */ \
+ if (i < thissize - 1 && ISDIGIT (p[i + 1])) \
+ _size_so_far = 0, fprintf (asm_out_file, "\"\n\t.ascii \""); \
+ } \
+ } \
+ fprintf (asm_out_file, "\"\n"); \
+ } \
+ } \
+ while (0)
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t.%s $L%d\n", TARGET_ABI_WINDOWS_NT ? "long" : "gprel32", \
+ (VALUE))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align %d\n", LOG);
+
+/* This is how to advance the location counter by SIZE bytes. */
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.space "HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs ("\t.comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE)))
+
+/* This says how to output an assembler line
+ to define a local common symbol. */
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE,ROUNDED) \
+( fputs ("\t.lcomm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE)))
+
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
+
+/* Determine which codes are valid without a following integer. These must
+ not be alphabetic.
+
+ ~ Generates the name of the current function.
+
+ / Generates the instruction suffix. The TRAP_SUFFIX and ROUND_SUFFIX
+ attributes are examined to determine what is appropriate.
+
+ , Generates single precision suffix for floating point
+ instructions (s for IEEE, f for VAX)
+
+ - Generates double precision suffix for floating point
+ instructions (t for IEEE, g for VAX)
+
+ + Generates a nop instruction after a noreturn call at the very end
+ of the function
+ */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '/' || (CODE) == ',' || (CODE) == '-' || (CODE) == '~' \
+ || (CODE) == '#' || (CODE) == '*' || (CODE) == '&' || (CODE) == '+')
+
+/* Print a memory address as an operand to reference that memory location. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ print_operand_address((FILE), (ADDR))
+
+/* Implement `va_start' for varargs and stdarg. */
+#define EXPAND_BUILTIN_VA_START(valist, nextarg) \
+ alpha_va_start (valist, nextarg)
+
+/* Tell collect that the object format is ECOFF. */
+#define OBJECT_FORMAT_COFF
+#define EXTENDED_COFF
+
+/* If we use NM, pass -g to it so it only lists globals. */
+#define NM_FLAGS "-pg"
+
+/* Definitions for debugging. */
+
+#define SDB_DEBUGGING_INFO 1 /* generate info for mips-tfile */
+#define DBX_DEBUGGING_INFO 1 /* generate embedded stabs */
+#define MIPS_DEBUGGING_INFO 1 /* MIPS specific debugging info */
+
+#ifndef PREFERRED_DEBUGGING_TYPE /* assume SDB_DEBUGGING_INFO */
+#define PREFERRED_DEBUGGING_TYPE SDB_DEBUG
+#endif
+
+
+/* Correct the offset of automatic variables and arguments. Note that
+ the Alpha debug format wants all automatic variables and arguments
+ to be in terms of two different offsets from the virtual frame pointer,
+ which is the stack pointer before any adjustment in the function.
+ The offset for the argument pointer is fixed for the native compiler,
+ it is either zero (for the no arguments case) or large enough to hold
+ all argument registers.
+ The offset for the auto pointer is the fourth argument to the .frame
+ directive (local_offset).
+ To stay compatible with the native tools we use the same offsets
+ from the virtual frame pointer and adjust the debugger arg/auto offsets
+ accordingly. These debugger offsets are set up in output_prolog. */
+
+extern long alpha_arg_offset;
+extern long alpha_auto_offset;
+#define DEBUGGER_AUTO_OFFSET(X) \
+ ((GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0) + alpha_auto_offset)
+#define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET + alpha_arg_offset)
+
+/* mips-tfile doesn't understand .stabd directives. */
+#define DBX_OUTPUT_SOURCE_LINE(STREAM, LINE, COUNTER) do { \
+ dbxout_begin_stabn_sline (LINE); \
+ dbxout_stab_value_internal_label ("LM", &COUNTER); \
+} while (0)
+
+/* We want to use MIPS-style .loc directives for SDB line numbers. */
+extern int num_source_filenames;
+#define SDB_OUTPUT_SOURCE_LINE(STREAM, LINE) \
+ fprintf (STREAM, "\t.loc\t%d %d\n", num_source_filenames, LINE)
+
+#define ASM_OUTPUT_SOURCE_FILENAME(STREAM, NAME) \
+ alpha_output_filename (STREAM, NAME)
+
+/* mips-tfile.c limits us to strings of one page. We must underestimate this
+ number, because the real length runs past this up to the next
+ continuation point. This is really a dbxout.c bug. */
+#define DBX_CONTIN_LENGTH 3000
+
+/* By default, turn on GDB extensions. */
+#define DEFAULT_GDB_EXTENSIONS 1
+
+/* Stabs-in-ECOFF can't handle dbxout_function_end(). */
+#define NO_DBX_FUNCTION_END 1
+
+/* If we are smuggling stabs through the ALPHA ECOFF object
+ format, put a comment in front of the .stab<x> operation so
+ that the ALPHA assembler does not choke. The mips-tfile program
+ will correctly put the stab into the object file. */
+
+#define ASM_STABS_OP ((TARGET_GAS) ? "\t.stabs\t" : " #.stabs\t")
+#define ASM_STABN_OP ((TARGET_GAS) ? "\t.stabn\t" : " #.stabn\t")
+#define ASM_STABD_OP ((TARGET_GAS) ? "\t.stabd\t" : " #.stabd\t")
+
+/* Forward references to tags are allowed. */
+#define SDB_ALLOW_FORWARD_REFERENCES
+
+/* Unknown tags are also allowed. */
+#define SDB_ALLOW_UNKNOWN_REFERENCES
+
+#define PUT_SDB_DEF(a) \
+do { \
+ fprintf (asm_out_file, "\t%s.def\t", \
+ (TARGET_GAS) ? "" : "#"); \
+ ASM_OUTPUT_LABELREF (asm_out_file, a); \
+ fputc (';', asm_out_file); \
+} while (0)
+
+#define PUT_SDB_PLAIN_DEF(a) \
+do { \
+ fprintf (asm_out_file, "\t%s.def\t.%s;", \
+ (TARGET_GAS) ? "" : "#", (a)); \
+} while (0)
+
+#define PUT_SDB_TYPE(a) \
+do { \
+ fprintf (asm_out_file, "\t.type\t0x%x;", (a)); \
+} while (0)
+
+/* For block start and end, we create labels, so that
+ later we can figure out where the correct offset is.
+ The normal .ent/.end serve well enough for functions,
+ so those are just commented out. */
+
+extern int sdb_label_count; /* block start/end next label # */
+
+#define PUT_SDB_BLOCK_START(LINE) \
+do { \
+ fprintf (asm_out_file, \
+ "$Lb%d:\n\t%s.begin\t$Lb%d\t%d\n", \
+ sdb_label_count, \
+ (TARGET_GAS) ? "" : "#", \
+ sdb_label_count, \
+ (LINE)); \
+ sdb_label_count++; \
+} while (0)
+
+#define PUT_SDB_BLOCK_END(LINE) \
+do { \
+ fprintf (asm_out_file, \
+ "$Le%d:\n\t%s.bend\t$Le%d\t%d\n", \
+ sdb_label_count, \
+ (TARGET_GAS) ? "" : "#", \
+ sdb_label_count, \
+ (LINE)); \
+ sdb_label_count++; \
+} while (0)
+
+#define PUT_SDB_FUNCTION_START(LINE)
+
+#define PUT_SDB_FUNCTION_END(LINE)
+
+#define PUT_SDB_EPILOGUE_END(NAME) ((void)(NAME))
+
+/* Macros for mips-tfile.c to encapsulate stabs in ECOFF, and for
+ mips-tdump.c to print them out.
+
+ These must match the corresponding definitions in gdb/mipsread.c.
+ Unfortunately, gcc and gdb do not currently share any directories. */
+
+#define CODE_MASK 0x8F300
+#define MIPS_IS_STAB(sym) (((sym)->index & 0xFFF00) == CODE_MASK)
+#define MIPS_MARK_STAB(code) ((code)+CODE_MASK)
+#define MIPS_UNMARK_STAB(code) ((code)-CODE_MASK)
+
+/* Override some mips-tfile definitions. */
+
+#define SHASH_SIZE 511
+#define THASH_SIZE 55
+
+/* Align ecoff symbol tables to avoid OSF1/1.3 nm complaints. */
+
+#define ALIGN_SYMTABLE_OFFSET(OFFSET) (((OFFSET) + 7) & ~7)
+
+/* The system headers under Alpha systems are generally C++-aware. */
+#define NO_IMPLICIT_EXTERN_C
diff --git a/gcc-4.2.1/gcc/config/alpha/alpha.md b/gcc-4.2.1/gcc/config/alpha/alpha.md
new file mode 100644
index 000000000..1f33c58fc
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/alpha.md
@@ -0,0 +1,8122 @@
+;; Machine description for DEC Alpha for GNU C compiler
+;; Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+;; 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; Uses of UNSPEC in this file:
+
+(define_constants
+ [(UNSPEC_ARG_HOME 0)
+ (UNSPEC_INSXH 2)
+ (UNSPEC_MSKXH 3)
+ (UNSPEC_CVTQL 4)
+ (UNSPEC_CVTLQ 5)
+ (UNSPEC_UMK_LAUM 6)
+ (UNSPEC_UMK_LALM 7)
+ (UNSPEC_UMK_LAL 8)
+ (UNSPEC_UMK_LOAD_CIW 9)
+ (UNSPEC_LDGP2 10)
+ (UNSPEC_LITERAL 11)
+ (UNSPEC_LITUSE 12)
+ (UNSPEC_SIBCALL 13)
+ (UNSPEC_SYMBOL 14)
+
+ ;; TLS Support
+ (UNSPEC_TLSGD_CALL 15)
+ (UNSPEC_TLSLDM_CALL 16)
+ (UNSPEC_TLSGD 17)
+ (UNSPEC_TLSLDM 18)
+ (UNSPEC_DTPREL 19)
+ (UNSPEC_TPREL 20)
+ (UNSPEC_TP 21)
+
+ ;; Builtins
+ (UNSPEC_CMPBGE 22)
+ (UNSPEC_ZAP 23)
+ (UNSPEC_AMASK 24)
+ (UNSPEC_IMPLVER 25)
+ (UNSPEC_PERR 26)
+ (UNSPEC_COPYSIGN 27)
+
+ ;; Atomic operations
+ (UNSPEC_MB 28)
+ (UNSPEC_ATOMIC 31)
+ (UNSPEC_CMPXCHG 32)
+ (UNSPEC_XCHG 33)
+ ])
+
+;; UNSPEC_VOLATILE:
+
+(define_constants
+ [(UNSPECV_IMB 0)
+ (UNSPECV_BLOCKAGE 1)
+ (UNSPECV_SETJMPR 2) ; builtin_setjmp_receiver
+ (UNSPECV_LONGJMP 3) ; builtin_longjmp
+ (UNSPECV_TRAPB 4)
+ (UNSPECV_PSPL 5) ; prologue_stack_probe_loop
+ (UNSPECV_REALIGN 6)
+ (UNSPECV_EHR 7) ; exception_receiver
+ (UNSPECV_MCOUNT 8)
+ (UNSPECV_FORCE_MOV 9)
+ (UNSPECV_LDGP1 10)
+ (UNSPECV_PLDGP2 11) ; prologue ldgp
+ (UNSPECV_SET_TP 12)
+ (UNSPECV_RPCC 13)
+ (UNSPECV_SETJMPR_ER 14) ; builtin_setjmp_receiver fragment
+ (UNSPECV_LL 15) ; load-locked
+ (UNSPECV_SC 16) ; store-conditional
+ ])
+
+;; Where necessary, the suffixes _le and _be are used to distinguish between
+;; little-endian and big-endian patterns.
+;;
+;; Note that the Unicos/Mk assembler does not support the following
+;; opcodes: mov, fmov, nop, fnop, unop.
+
+;; Processor type -- this attribute must exactly match the processor_type
+;; enumeration in alpha.h.
+
+(define_attr "tune" "ev4,ev5,ev6"
+ (const (symbol_ref "alpha_tune")))
+
+;; Define an insn type attribute. This is used in function unit delay
+;; computations, among other purposes. For the most part, we use the names
+;; defined in the EV4 documentation, but add a few that we have to know about
+;; separately.
+
+(define_attr "type"
+ "ild,fld,ldsym,ist,fst,ibr,callpal,fbr,jsr,iadd,ilog,shift,icmov,fcmov,
+ icmp,imul,fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,mb,ld_l,st_c,
+ multi,none"
+ (const_string "iadd"))
+
+;; Describe a user's asm statement.
+(define_asm_attributes
+ [(set_attr "type" "multi")])
+
+;; Define the operand size an insn operates on. Used primarily by mul
+;; and div operations that have size dependent timings.
+
+(define_attr "opsize" "si,di,udi"
+ (const_string "di"))
+
+;; The TRAP attribute marks instructions that may generate traps
+;; (which are imprecise and may need a trapb if software completion
+;; is desired).
+
+(define_attr "trap" "no,yes"
+ (const_string "no"))
+
+;; The ROUND_SUFFIX attribute marks which instructions require a
+;; rounding-mode suffix. The value NONE indicates no suffix,
+;; the value NORMAL indicates a suffix controlled by alpha_fprm.
+
+(define_attr "round_suffix" "none,normal,c"
+ (const_string "none"))
+
+;; The TRAP_SUFFIX attribute marks instructions requiring a trap-mode suffix:
+;; NONE no suffix
+;; SU accepts only /su (cmpt et al)
+;; SUI accepts only /sui (cvtqt and cvtqs)
+;; V_SV accepts /v and /sv (cvtql only)
+;; V_SV_SVI accepts /v, /sv and /svi (cvttq only)
+;; U_SU_SUI accepts /u, /su and /sui (most fp instructions)
+;;
+;; The actual suffix emitted is controlled by alpha_fptm.
+
+(define_attr "trap_suffix" "none,su,sui,v_sv,v_sv_svi,u_su_sui"
+ (const_string "none"))
+
+;; The length of an instruction sequence in bytes.
+
+(define_attr "length" ""
+ (const_int 4))
+
+;; The USEGP attribute marks instructions that have relocations that use
+;; the GP.
+
+(define_attr "usegp" "no,yes"
+ (cond [(eq_attr "type" "ldsym,jsr")
+ (const_string "yes")
+ (eq_attr "type" "ild,fld,ist,fst")
+ (symbol_ref "alpha_find_lo_sum_using_gp(insn)")
+ ]
+ (const_string "no")))
+
+;; The CANNOT_COPY attribute marks instructions with relocations that
+;; cannot easily be duplicated. This includes insns with gpdisp relocs
+;; since they have to stay in 1-1 correspondence with one another. This
+;; also includes jsr insns, since they must stay in correspondence with
+;; the immediately following gpdisp instructions.
+
+(define_attr "cannot_copy" "false,true"
+ (const_string "false"))
+
+;; Include scheduling descriptions.
+
+(include "ev4.md")
+(include "ev5.md")
+(include "ev6.md")
+
+
+;; Include predicate definitions
+
+(include "predicates.md")
+
+
+;; First define the arithmetic insns. Note that the 32-bit forms also
+;; sign-extend.
+
+;; Handle 32-64 bit extension from memory to a floating point register
+;; specially, since this occurs frequently in int->double conversions.
+;;
+;; Note that while we must retain the =f case in the insn for reload's
+;; benefit, it should be eliminated after reload, so we should never emit
+;; code for that case. But we don't reject the possibility.
+
+(define_expand "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))]
+ ""
+ "")
+
+(define_insn "*cvtlq"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unspec:DI [(match_operand:SF 1 "reg_or_0_operand" "fG")]
+ UNSPEC_CVTLQ))]
+ ""
+ "cvtlq %1,%0"
+ [(set_attr "type" "fadd")])
+
+(define_insn "*extendsidi2_1"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,!*f")
+ (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "r,m,m")))]
+ ""
+ "@
+ addl $31,%1,%0
+ ldl %0,%1
+ lds %0,%1\;cvtlq %0,%0"
+ [(set_attr "type" "iadd,ild,fld")
+ (set_attr "length" "*,*,8")])
+
+(define_split
+ [(set (match_operand:DI 0 "hard_fp_register_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "memory_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (unspec:DI [(match_dup 2)] UNSPEC_CVTLQ))]
+{
+ operands[1] = adjust_address (operands[1], SFmode, 0);
+ operands[2] = gen_rtx_REG (SFmode, REGNO (operands[0]));
+})
+
+;; Optimize sign-extension of SImode loads. This shows up in the wake of
+;; reload when converting fp->int.
+
+(define_peephole2
+ [(set (match_operand:SI 0 "hard_int_register_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_operand:DI 2 "hard_int_register_operand" "")
+ (sign_extend:DI (match_dup 0)))]
+ "true_regnum (operands[0]) == true_regnum (operands[2])
+ || peep2_reg_dead_p (2, operands[0])"
+ [(set (match_dup 2)
+ (sign_extend:DI (match_dup 1)))]
+ "")
+
+;; Don't say we have addsi3 if optimizing. This generates better code. We
+;; have the anonymous addsi3 pattern below in case combine wants to make it.
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "")
+ (match_operand:SI 2 "add_operand" "")))]
+ "! optimize"
+ "")
+
+(define_insn "*addsi_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ,rJ,rJ")
+ (match_operand:SI 2 "add_operand" "rI,O,K,L")))]
+ ""
+ "@
+ addl %r1,%2,%0
+ subl %r1,%n2,%0
+ lda %0,%2(%r1)
+ ldah %0,%h2(%r1)")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! add_operand (operands[2], SImode)"
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 4)))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000);
+ HOST_WIDE_INT rest = val - low;
+
+ operands[3] = GEN_INT (rest);
+ operands[4] = GEN_INT (low);
+})
+
+(define_insn "*addsi_se"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:SI 2 "sext_add_operand" "rI,O"))))]
+ ""
+ "@
+ addl %r1,%2,%0
+ subl %r1,%n2,%0")
+
+(define_insn "*addsi_se2"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI
+ (subreg:SI (plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:DI 2 "sext_add_operand" "rI,O"))
+ 0)))]
+ ""
+ "@
+ addl %r1,%2,%0
+ subl %r1,%n2,%0")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI
+ (plus:SI (match_operand:SI 1 "reg_not_elim_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))))
+ (clobber (match_operand:SI 3 "reg_not_elim_operand" ""))]
+ "! sext_add_operand (operands[2], SImode) && INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) % 4 == 0"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (sign_extend:DI (plus:SI (mult:SI (match_dup 3)
+ (match_dup 5))
+ (match_dup 1))))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]) / 4;
+ int mult = 4;
+
+ if (val % 2 == 0)
+ val /= 2, mult = 8;
+
+ operands[4] = GEN_INT (val);
+ operands[5] = GEN_INT (mult);
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI
+ (plus:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "" "")
+ (match_operand 3 "" "")])
+ (match_operand:SI 4 "add_operand" ""))))
+ (clobber (match_operand:DI 5 "register_operand" ""))]
+ ""
+ [(set (match_dup 5) (match_dup 6))
+ (set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 7) (match_dup 4))))]
+{
+ operands[6] = gen_rtx_fmt_ee (GET_CODE (operands[1]), DImode,
+ operands[2], operands[3]);
+ operands[7] = gen_lowpart (SImode, operands[5]);
+})
+
+(define_insn "addvsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:SI 2 "sext_add_operand" "rI,O")))
+ (trap_if (ne (plus:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (sign_extend:DI (plus:SI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ ""
+ "@
+ addlv %r1,%2,%0
+ sublv %r1,%n2,%0")
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "add_operand" "")))]
+ ""
+ "")
+
+(define_insn "*adddi_er_lo16_dtp"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "dtp16_symbolic_operand" "")))]
+ "HAVE_AS_TLS"
+ "lda %0,%2(%1)\t\t!dtprel")
+
+(define_insn "*adddi_er_hi32_dtp"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_operand" "r")
+ (high:DI (match_operand:DI 2 "dtp32_symbolic_operand" ""))))]
+ "HAVE_AS_TLS"
+ "ldah %0,%2(%1)\t\t!dtprelhi")
+
+(define_insn "*adddi_er_lo32_dtp"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "dtp32_symbolic_operand" "")))]
+ "HAVE_AS_TLS"
+ "lda %0,%2(%1)\t\t!dtprello")
+
+(define_insn "*adddi_er_lo16_tp"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "tp16_symbolic_operand" "")))]
+ "HAVE_AS_TLS"
+ "lda %0,%2(%1)\t\t!tprel")
+
+(define_insn "*adddi_er_hi32_tp"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_operand" "r")
+ (high:DI (match_operand:DI 2 "tp32_symbolic_operand" ""))))]
+ "HAVE_AS_TLS"
+ "ldah %0,%2(%1)\t\t!tprelhi")
+
+(define_insn "*adddi_er_lo32_tp"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "tp32_symbolic_operand" "")))]
+ "HAVE_AS_TLS"
+ "lda %0,%2(%1)\t\t!tprello")
+
+(define_insn "*adddi_er_high_l"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_operand" "r")
+ (high:DI (match_operand:DI 2 "local_symbolic_operand" ""))))]
+ "TARGET_EXPLICIT_RELOCS && reload_completed"
+ "ldah %0,%2(%1)\t\t!gprelhigh"
+ [(set_attr "usegp" "yes")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (high:DI (match_operand:DI 1 "local_symbolic_operand" "")))]
+ "TARGET_EXPLICIT_RELOCS && reload_completed"
+ [(set (match_dup 0)
+ (plus:DI (match_dup 2) (high:DI (match_dup 1))))]
+ "operands[2] = pic_offset_table_rtx;")
+
+;; We used to expend quite a lot of effort choosing addq/subq/lda.
+;; With complications like
+;;
+;; The NT stack unwind code can't handle a subq to adjust the stack
+;; (that's a bug, but not one we can do anything about). As of NT4.0 SP3,
+;; the exception handling code will loop if a subq is used and an
+;; exception occurs.
+;;
+;; The 19980616 change to emit prologues as RTL also confused some
+;; versions of GDB, which also interprets prologues. This has been
+;; fixed as of GDB 4.18, but it does not harm to unconditionally
+;; use lda here.
+;;
+;; and the fact that the three insns schedule exactly the same, it's
+;; just not worth the effort.
+
+(define_insn "*adddi_internal"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (plus:DI (match_operand:DI 1 "register_operand" "%r,r,r")
+ (match_operand:DI 2 "add_operand" "r,K,L")))]
+ ""
+ "@
+ addq %1,%2,%0
+ lda %0,%2(%1)
+ ldah %0,%h2(%1)")
+
+;; ??? Allow large constants when basing off the frame pointer or some
+;; virtual register that may eliminate to the frame pointer. This is
+;; done because register elimination offsets will change the hi/lo split,
+;; and if we split before reload, we will require additional instructions.
+
+(define_insn "*adddi_fp_hack"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (plus:DI (match_operand:DI 1 "reg_no_subreg_operand" "r,r,r")
+ (match_operand:DI 2 "const_int_operand" "K,L,n")))]
+ "NONSTRICT_REG_OK_FP_BASE_P (operands[1])
+ && INTVAL (operands[2]) >= 0
+ /* This is the largest constant an lda+ldah pair can add, minus
+ an upper bound on the displacement between SP and AP during
+ register elimination. See INITIAL_ELIMINATION_OFFSET. */
+ && INTVAL (operands[2])
+ < (0x7fff8000
+ - FIRST_PSEUDO_REGISTER * UNITS_PER_WORD
+ - ALPHA_ROUND(current_function_outgoing_args_size)
+ - (ALPHA_ROUND (get_frame_size ()
+ + max_reg_num () * UNITS_PER_WORD
+ + current_function_pretend_args_size)
+ - current_function_pretend_args_size))"
+ "@
+ lda %0,%2(%1)
+ ldah %0,%h2(%1)
+ #")
+
+;; Don't do this if we are adjusting SP since we don't want to do it
+;; in two steps. Don't split FP sources for the reason listed above.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "const_int_operand" "")))]
+ "! add_operand (operands[2], DImode)
+ && operands[0] != stack_pointer_rtx
+ && operands[1] != frame_pointer_rtx
+ && operands[1] != arg_pointer_rtx"
+ [(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000);
+ HOST_WIDE_INT rest = val - low;
+
+ operands[4] = GEN_INT (low);
+ if (CONST_OK_FOR_LETTER_P (rest, 'L'))
+ operands[3] = GEN_INT (rest);
+ else if (! no_new_pseudos)
+ {
+ operands[3] = gen_reg_rtx (DImode);
+ emit_move_insn (operands[3], operands[2]);
+ emit_insn (gen_adddi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ else
+ FAIL;
+})
+
+(define_insn "*saddl"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r,r")
+ (match_operand:SI 2 "const48_operand" "I,I"))
+ (match_operand:SI 3 "sext_add_operand" "rI,O")))]
+ ""
+ "@
+ s%2addl %1,%3,%0
+ s%2subl %1,%n3,%0")
+
+(define_insn "*saddl_se"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI
+ (plus:SI (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r,r")
+ (match_operand:SI 2 "const48_operand" "I,I"))
+ (match_operand:SI 3 "sext_add_operand" "rI,O"))))]
+ ""
+ "@
+ s%2addl %1,%3,%0
+ s%2subl %1,%n3,%0")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI
+ (plus:SI (mult:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "" "")
+ (match_operand 3 "" "")])
+ (match_operand:SI 4 "const48_operand" ""))
+ (match_operand:SI 5 "sext_add_operand" ""))))
+ (clobber (match_operand:DI 6 "reg_not_elim_operand" ""))]
+ ""
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (sign_extend:DI (plus:SI (mult:SI (match_dup 8) (match_dup 4))
+ (match_dup 5))))]
+{
+ operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[1]), DImode,
+ operands[2], operands[3]);
+ operands[8] = gen_lowpart (SImode, operands[6]);
+})
+
+(define_insn "*saddq"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (plus:DI (mult:DI (match_operand:DI 1 "reg_not_elim_operand" "r,r")
+ (match_operand:DI 2 "const48_operand" "I,I"))
+ (match_operand:DI 3 "sext_add_operand" "rI,O")))]
+ ""
+ "@
+ s%2addq %1,%3,%0
+ s%2subq %1,%n3,%0")
+
+(define_insn "addvdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:DI 2 "sext_add_operand" "rI,O")))
+ (trap_if (ne (plus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (plus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ ""
+ "@
+ addqv %r1,%2,%0
+ subqv %r1,%n2,%0")
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "reg_or_8bit_operand" "rI")))]
+ ""
+ "subl $31,%1,%0")
+
+(define_insn "*negsi_se"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (neg:SI
+ (match_operand:SI 1 "reg_or_8bit_operand" "rI"))))]
+ ""
+ "subl $31,%1,%0")
+
+(define_insn "negvsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "register_operand" "r")))
+ (trap_if (ne (neg:DI (sign_extend:DI (match_dup 1)))
+ (sign_extend:DI (neg:SI (match_dup 1))))
+ (const_int 0))]
+ ""
+ "sublv $31,%1,%0")
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI")))]
+ ""
+ "subq $31,%1,%0")
+
+(define_insn "negvdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))
+ (trap_if (ne (neg:TI (sign_extend:TI (match_dup 1)))
+ (sign_extend:TI (neg:DI (match_dup 1))))
+ (const_int 0))]
+ ""
+ "subqv $31,%1,%0")
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "")
+ (match_operand:SI 2 "reg_or_8bit_operand" "")))]
+ "! optimize"
+ "")
+
+(define_insn "*subsi_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "reg_or_8bit_operand" "rI")))]
+ ""
+ "subl %r1,%2,%0")
+
+(define_insn "*subsi_se"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "reg_or_8bit_operand" "rI"))))]
+ ""
+ "subl %r1,%2,%0")
+
+(define_insn "*subsi_se2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (subreg:SI (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI"))
+ 0)))]
+ ""
+ "subl %r1,%2,%0")
+
+(define_insn "subvsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "reg_or_8bit_operand" "rI")))
+ (trap_if (ne (minus:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (sign_extend:DI (minus:SI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ ""
+ "sublv %r1,%2,%0")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")))]
+ ""
+ "subq %r1,%2,%0")
+
+(define_insn "*ssubl"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r")
+ (match_operand:SI 2 "const48_operand" "I"))
+ (match_operand:SI 3 "reg_or_8bit_operand" "rI")))]
+ ""
+ "s%2subl %1,%3,%0")
+
+(define_insn "*ssubl_se"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (minus:SI (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r")
+ (match_operand:SI 2 "const48_operand" "I"))
+ (match_operand:SI 3 "reg_or_8bit_operand" "rI"))))]
+ ""
+ "s%2subl %1,%3,%0")
+
+(define_insn "*ssubq"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (mult:DI (match_operand:DI 1 "reg_not_elim_operand" "r")
+ (match_operand:DI 2 "const48_operand" "I"))
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")))]
+ ""
+ "s%2subq %1,%3,%0")
+
+(define_insn "subvdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")))
+ (trap_if (ne (minus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (minus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ ""
+ "subqv %r1,%2,%0")
+
+;; The Unicos/Mk assembler doesn't support mull.
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 2 "reg_or_8bit_operand" "rI")))]
+ "!TARGET_ABI_UNICOSMK"
+ "mull %r1,%2,%0"
+ [(set_attr "type" "imul")
+ (set_attr "opsize" "si")])
+
+(define_insn "*mulsi_se"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (mult:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 2 "reg_or_8bit_operand" "rI"))))]
+ "!TARGET_ABI_UNICOSMK"
+ "mull %r1,%2,%0"
+ [(set_attr "type" "imul")
+ (set_attr "opsize" "si")])
+
+(define_insn "mulvsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 2 "reg_or_8bit_operand" "rI")))
+ (trap_if (ne (mult:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (sign_extend:DI (mult:SI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ "!TARGET_ABI_UNICOSMK"
+ "mullv %r1,%2,%0"
+ [(set_attr "type" "imul")
+ (set_attr "opsize" "si")])
+
+(define_insn "muldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")))]
+ ""
+ "mulq %r1,%2,%0"
+ [(set_attr "type" "imul")])
+
+(define_insn "mulvdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")))
+ (trap_if (ne (mult:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (mult:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ ""
+ "mulqv %r1,%2,%0"
+ [(set_attr "type" "imul")])
+
+(define_expand "umuldi3_highpart"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "register_operand" ""))
+ (match_operand:DI 2 "reg_or_8bit_operand" ""))
+ (const_int 64))))]
+ ""
+{
+ if (REG_P (operands[2]))
+ operands[2] = gen_rtx_ZERO_EXTEND (TImode, operands[2]);
+})
+
+(define_insn "*umuldi3_highpart_reg"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "register_operand" "r"))
+ (zero_extend:TI
+ (match_operand:DI 2 "register_operand" "r")))
+ (const_int 64))))]
+ ""
+ "umulh %1,%2,%0"
+ [(set_attr "type" "imul")
+ (set_attr "opsize" "udi")])
+
+(define_insn "*umuldi3_highpart_const"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:TI 2 "cint8_operand" "I"))
+ (const_int 64))))]
+ ""
+ "umulh %1,%2,%0"
+ [(set_attr "type" "imul")
+ (set_attr "opsize" "udi")])
+
+;; The divide and remainder operations take their inputs from r24 and
+;; r25, put their output in r27, and clobber r23 and r28 on all
+;; systems except Unicos/Mk. On Unicos, the standard library provides
+;; subroutines which use the standard calling convention and work on
+;; DImode operands.
+
+;; ??? Force sign-extension here because some versions of OSF/1 and
+;; Interix/NT don't do the right thing if the inputs are not properly
+;; sign-extended. But Linux, for instance, does not have this
+;; problem. Is it worth the complication here to eliminate the sign
+;; extension?
+
+(define_expand "divsi3"
+ [(set (match_dup 3)
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))
+ (set (match_dup 4)
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "")))
+ (parallel [(set (match_dup 5)
+ (sign_extend:DI (div:SI (match_dup 3) (match_dup 4))))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:SI 0 "nonimmediate_operand" "")
+ (subreg:SI (match_dup 5) 0))]
+ "! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK"
+{
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+ operands[5] = gen_reg_rtx (DImode);
+})
+
+(define_expand "udivsi3"
+ [(set (match_dup 3)
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))
+ (set (match_dup 4)
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "")))
+ (parallel [(set (match_dup 5)
+ (sign_extend:DI (udiv:SI (match_dup 3) (match_dup 4))))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:SI 0 "nonimmediate_operand" "")
+ (subreg:SI (match_dup 5) 0))]
+ "! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK"
+{
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+ operands[5] = gen_reg_rtx (DImode);
+})
+
+(define_expand "modsi3"
+ [(set (match_dup 3)
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))
+ (set (match_dup 4)
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "")))
+ (parallel [(set (match_dup 5)
+ (sign_extend:DI (mod:SI (match_dup 3) (match_dup 4))))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:SI 0 "nonimmediate_operand" "")
+ (subreg:SI (match_dup 5) 0))]
+ "! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK"
+{
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+ operands[5] = gen_reg_rtx (DImode);
+})
+
+(define_expand "umodsi3"
+ [(set (match_dup 3)
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))
+ (set (match_dup 4)
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "")))
+ (parallel [(set (match_dup 5)
+ (sign_extend:DI (umod:SI (match_dup 3) (match_dup 4))))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])
+ (set (match_operand:SI 0 "nonimmediate_operand" "")
+ (subreg:SI (match_dup 5) 0))]
+ "! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK"
+{
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+ operands[5] = gen_reg_rtx (DImode);
+})
+
+(define_expand "divdi3"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (div:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])]
+ "! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK"
+ "")
+
+(define_expand "udivdi3"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (udiv:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])]
+ "! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK"
+ "")
+
+(define_expand "moddi3"
+ [(use (match_operand:DI 0 "register_operand" ""))
+ (use (match_operand:DI 1 "register_operand" ""))
+ (use (match_operand:DI 2 "register_operand" ""))]
+ "!TARGET_ABI_OPEN_VMS"
+{
+ if (TARGET_ABI_UNICOSMK)
+ emit_insn (gen_moddi3_umk (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_moddi3_dft (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "moddi3_dft"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (mod:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])]
+ "! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK"
+ "")
+
+;; On Unicos/Mk, we do as the system's C compiler does:
+;; compute the quotient, multiply and subtract.
+
+(define_expand "moddi3_umk"
+ [(use (match_operand:DI 0 "register_operand" ""))
+ (use (match_operand:DI 1 "register_operand" ""))
+ (use (match_operand:DI 2 "register_operand" ""))]
+ "TARGET_ABI_UNICOSMK"
+{
+ rtx div, mul = gen_reg_rtx (DImode);
+
+ div = expand_binop (DImode, sdiv_optab, operands[1], operands[2],
+ NULL_RTX, 0, OPTAB_LIB);
+ div = force_reg (DImode, div);
+ emit_insn (gen_muldi3 (mul, operands[2], div));
+ emit_insn (gen_subdi3 (operands[0], operands[1], mul));
+ DONE;
+})
+
+(define_expand "umoddi3"
+ [(use (match_operand:DI 0 "register_operand" ""))
+ (use (match_operand:DI 1 "register_operand" ""))
+ (use (match_operand:DI 2 "register_operand" ""))]
+ "! TARGET_ABI_OPEN_VMS"
+{
+ if (TARGET_ABI_UNICOSMK)
+ emit_insn (gen_umoddi3_umk (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_umoddi3_dft (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "umoddi3_dft"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (umod:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])]
+ "! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK"
+ "")
+
+(define_expand "umoddi3_umk"
+ [(use (match_operand:DI 0 "register_operand" ""))
+ (use (match_operand:DI 1 "register_operand" ""))
+ (use (match_operand:DI 2 "register_operand" ""))]
+ "TARGET_ABI_UNICOSMK"
+{
+ rtx div, mul = gen_reg_rtx (DImode);
+
+ div = expand_binop (DImode, udiv_optab, operands[1], operands[2],
+ NULL_RTX, 1, OPTAB_LIB);
+ div = force_reg (DImode, div);
+ emit_insn (gen_muldi3 (mul, operands[2], div));
+ emit_insn (gen_subdi3 (operands[0], operands[1], mul));
+ DONE;
+})
+
+;; Lengths of 8 for ldq $t12,__divq($gp); jsr $t9,($t12),__divq as
+;; expanded by the assembler.
+
+(define_insn_and_split "*divmodsi_internal_er"
+ [(set (match_operand:DI 0 "register_operand" "=c")
+ (sign_extend:DI (match_operator:SI 3 "divmod_operator"
+ [(match_operand:DI 1 "register_operand" "a")
+ (match_operand:DI 2 "register_operand" "b")])))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))]
+ "TARGET_EXPLICIT_RELOCS && ! TARGET_ABI_OPEN_VMS"
+ "#"
+ "&& reload_completed"
+ [(parallel [(set (match_dup 0)
+ (sign_extend:DI (match_dup 3)))
+ (use (match_dup 0))
+ (use (match_dup 4))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])]
+{
+ const char *str;
+ switch (GET_CODE (operands[3]))
+ {
+ case DIV:
+ str = "__divl";
+ break;
+ case UDIV:
+ str = "__divlu";
+ break;
+ case MOD:
+ str = "__reml";
+ break;
+ case UMOD:
+ str = "__remlu";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ operands[4] = GEN_INT (alpha_next_sequence_number++);
+ emit_insn (gen_movdi_er_high_g (operands[0], pic_offset_table_rtx,
+ gen_rtx_SYMBOL_REF (DImode, str),
+ operands[4]));
+}
+ [(set_attr "type" "jsr")
+ (set_attr "length" "8")])
+
+(define_insn "*divmodsi_internal_er_1"
+ [(set (match_operand:DI 0 "register_operand" "=c")
+ (sign_extend:DI (match_operator:SI 3 "divmod_operator"
+ [(match_operand:DI 1 "register_operand" "a")
+ (match_operand:DI 2 "register_operand" "b")])))
+ (use (match_operand:DI 4 "register_operand" "c"))
+ (use (match_operand 5 "const_int_operand" ""))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))]
+ "TARGET_EXPLICIT_RELOCS && ! TARGET_ABI_OPEN_VMS"
+ "jsr $23,($27),__%E3%j5"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "4")])
+
+(define_insn "*divmodsi_internal"
+ [(set (match_operand:DI 0 "register_operand" "=c")
+ (sign_extend:DI (match_operator:SI 3 "divmod_operator"
+ [(match_operand:DI 1 "register_operand" "a")
+ (match_operand:DI 2 "register_operand" "b")])))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))]
+ "! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK"
+ "%E3 %1,%2,%0"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "8")])
+
+(define_insn_and_split "*divmoddi_internal_er"
+ [(set (match_operand:DI 0 "register_operand" "=c")
+ (match_operator:DI 3 "divmod_operator"
+ [(match_operand:DI 1 "register_operand" "a")
+ (match_operand:DI 2 "register_operand" "b")]))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))]
+ "TARGET_EXPLICIT_RELOCS && ! TARGET_ABI_OPEN_VMS"
+ "#"
+ "&& reload_completed"
+ [(parallel [(set (match_dup 0) (match_dup 3))
+ (use (match_dup 0))
+ (use (match_dup 4))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))])]
+{
+ const char *str;
+ switch (GET_CODE (operands[3]))
+ {
+ case DIV:
+ str = "__divq";
+ break;
+ case UDIV:
+ str = "__divqu";
+ break;
+ case MOD:
+ str = "__remq";
+ break;
+ case UMOD:
+ str = "__remqu";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ operands[4] = GEN_INT (alpha_next_sequence_number++);
+ emit_insn (gen_movdi_er_high_g (operands[0], pic_offset_table_rtx,
+ gen_rtx_SYMBOL_REF (DImode, str),
+ operands[4]));
+}
+ [(set_attr "type" "jsr")
+ (set_attr "length" "8")])
+
+(define_insn "*divmoddi_internal_er_1"
+ [(set (match_operand:DI 0 "register_operand" "=c")
+ (match_operator:DI 3 "divmod_operator"
+ [(match_operand:DI 1 "register_operand" "a")
+ (match_operand:DI 2 "register_operand" "b")]))
+ (use (match_operand:DI 4 "register_operand" "c"))
+ (use (match_operand 5 "const_int_operand" ""))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))]
+ "TARGET_EXPLICIT_RELOCS && ! TARGET_ABI_OPEN_VMS"
+ "jsr $23,($27),__%E3%j5"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "4")])
+
+(define_insn "*divmoddi_internal"
+ [(set (match_operand:DI 0 "register_operand" "=c")
+ (match_operator:DI 3 "divmod_operator"
+ [(match_operand:DI 1 "register_operand" "a")
+ (match_operand:DI 2 "register_operand" "b")]))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 28))]
+ "! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK"
+ "%E3 %1,%2,%0"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "8")])
+
+;; Next are the basic logical operations. We only expose the DImode operations
+;; to the rtl expanders, but SImode versions exist for combine as well as for
+;; the atomic operation splitters.
+
+(define_insn "*andsi_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (and:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ,rJ")
+ (match_operand:SI 2 "and_operand" "rI,N,MH")))]
+ ""
+ "@
+ and %r1,%2,%0
+ bic %r1,%N2,%0
+ zapnot %r1,%m2,%0"
+ [(set_attr "type" "ilog,ilog,shift")])
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (and:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ,rJ")
+ (match_operand:DI 2 "and_operand" "rI,N,MH")))]
+ ""
+ "@
+ and %r1,%2,%0
+ bic %r1,%N2,%0
+ zapnot %r1,%m2,%0"
+ [(set_attr "type" "ilog,ilog,shift")])
+
+;; There are times when we can split an AND into two AND insns. This occurs
+;; when we can first clear any bytes and then clear anything else. For
+;; example "I & 0xffff07" is "(I & 0xffffff) & 0xffffffffffffff07".
+;; Only do this when running on 64-bit host since the computations are
+;; too messy otherwise.
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (and:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "const_int_operand" "")))]
+ "HOST_BITS_PER_WIDE_INT == 64 && ! and_operand (operands[2], DImode)"
+ [(set (match_dup 0) (and:DI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (and:DI (match_dup 0) (match_dup 4)))]
+{
+ unsigned HOST_WIDE_INT mask1 = INTVAL (operands[2]);
+ unsigned HOST_WIDE_INT mask2 = mask1;
+ int i;
+
+ /* For each byte that isn't all zeros, make it all ones. */
+ for (i = 0; i < 64; i += 8)
+ if ((mask1 & ((HOST_WIDE_INT) 0xff << i)) != 0)
+ mask1 |= (HOST_WIDE_INT) 0xff << i;
+
+ /* Now turn on any bits we've just turned off. */
+ mask2 |= ~ mask1;
+
+ operands[3] = GEN_INT (mask1);
+ operands[4] = GEN_INT (mask2);
+})
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+{
+ if (! TARGET_BWX)
+ operands[1] = force_reg (QImode, operands[1]);
+})
+
+(define_insn "*zero_extendqihi2_bwx"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_BWX"
+ "@
+ and %1,0xff,%0
+ ldbu %0,%1"
+ [(set_attr "type" "ilog,ild")])
+
+(define_insn "*zero_extendqihi2_nobwx"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (zero_extend:HI (match_operand:QI 1 "register_operand" "r")))]
+ "! TARGET_BWX"
+ "and %1,0xff,%0"
+ [(set_attr "type" "ilog")])
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+{
+ if (! TARGET_BWX)
+ operands[1] = force_reg (QImode, operands[1]);
+})
+
+(define_insn "*zero_extendqisi2_bwx"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_BWX"
+ "@
+ and %1,0xff,%0
+ ldbu %0,%1"
+ [(set_attr "type" "ilog,ild")])
+
+(define_insn "*zero_extendqisi2_nobwx"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "register_operand" "r")))]
+ "! TARGET_BWX"
+ "and %1,0xff,%0"
+ [(set_attr "type" "ilog")])
+
+(define_expand "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+{
+ if (! TARGET_BWX)
+ operands[1] = force_reg (QImode, operands[1]);
+})
+
+(define_insn "*zero_extendqidi2_bwx"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_BWX"
+ "@
+ and %1,0xff,%0
+ ldbu %0,%1"
+ [(set_attr "type" "ilog,ild")])
+
+(define_insn "*zero_extendqidi2_nobwx"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:QI 1 "register_operand" "r")))]
+ "! TARGET_BWX"
+ "and %1,0xff,%0"
+ [(set_attr "type" "ilog")])
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+{
+ if (! TARGET_BWX)
+ operands[1] = force_reg (HImode, operands[1]);
+})
+
+(define_insn "*zero_extendhisi2_bwx"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_BWX"
+ "@
+ zapnot %1,3,%0
+ ldwu %0,%1"
+ [(set_attr "type" "shift,ild")])
+
+(define_insn "*zero_extendhisi2_nobwx"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "register_operand" "r")))]
+ "! TARGET_BWX"
+ "zapnot %1,3,%0"
+ [(set_attr "type" "shift")])
+
+(define_expand "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+{
+ if (! TARGET_BWX)
+ operands[1] = force_reg (HImode, operands[1]);
+})
+
+(define_insn "*zero_extendhidi2_bwx"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_BWX"
+ "@
+ zapnot %1,3,%0
+ ldwu %0,%1"
+ [(set_attr "type" "shift,ild")])
+
+(define_insn "*zero_extendhidi2_nobwx"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:HI 1 "register_operand" "r")))]
+ ""
+ "zapnot %1,3,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "zapnot %1,15,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "*andnotsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 1 "reg_or_8bit_operand" "rI"))
+ (match_operand:SI 2 "reg_or_0_operand" "rJ")))]
+ ""
+ "bic %r2,%1,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "andnotdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (not:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI"))
+ (match_operand:DI 2 "reg_or_0_operand" "rJ")))]
+ ""
+ "bic %r2,%1,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "*iorsi_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:SI 2 "or_operand" "rI,N")))]
+ ""
+ "@
+ bis %r1,%2,%0
+ ornot %r1,%N2,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (ior:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:DI 2 "or_operand" "rI,N")))]
+ ""
+ "@
+ bis %r1,%2,%0
+ ornot %r1,%N2,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "*one_cmplsi_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "reg_or_8bit_operand" "rI")))]
+ ""
+ "ornot $31,%1,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI")))]
+ ""
+ "ornot $31,%1,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "*iornotsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ior:SI (not:SI (match_operand:SI 1 "reg_or_8bit_operand" "rI"))
+ (match_operand:SI 2 "reg_or_0_operand" "rJ")))]
+ ""
+ "ornot %r2,%1,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "*iornotdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (not:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI"))
+ (match_operand:DI 2 "reg_or_0_operand" "rJ")))]
+ ""
+ "ornot %r2,%1,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "*xorsi_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (xor:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:SI 2 "or_operand" "rI,N")))]
+ ""
+ "@
+ xor %r1,%2,%0
+ eqv %r1,%N2,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (xor:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:DI 2 "or_operand" "rI,N")))]
+ ""
+ "@
+ xor %r1,%2,%0
+ eqv %r1,%N2,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "*xornotsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (xor:SI (match_operand:SI 1 "register_operand" "%rJ")
+ (match_operand:SI 2 "register_operand" "rI"))))]
+ ""
+ "eqv %r1,%2,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "*xornotdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (xor:DI (match_operand:DI 1 "register_operand" "%rJ")
+ (match_operand:DI 2 "register_operand" "rI"))))]
+ ""
+ "eqv %r1,%2,%0"
+ [(set_attr "type" "ilog")])
+
+;; Handle FFS and related insns iff we support CIX.
+
+(define_expand "ffsdi2"
+ [(set (match_dup 2)
+ (ctz:DI (match_operand:DI 1 "register_operand" "")))
+ (set (match_dup 3)
+ (plus:DI (match_dup 2) (const_int 1)))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (eq (match_dup 1) (const_int 0))
+ (const_int 0) (match_dup 3)))]
+ "TARGET_CIX"
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_reg_rtx (DImode);
+})
+
+(define_insn "clzdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (clz:DI (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_CIX"
+ "ctlz %1,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "ctzdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ctz:DI (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_CIX"
+ "cttz %1,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "popcountdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (popcount:DI (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_CIX"
+ "ctpop %1,%0"
+ [(set_attr "type" "mvi")])
+
+;; Next come the shifts and the various extract and insert operations.
+
+(define_insn "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,rJ")
+ (match_operand:DI 2 "reg_or_6bit_operand" "P,rS")))]
+ ""
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (operands[2] == const1_rtx)
+ return "addq %r1,%r1,%0";
+ else
+ return "s%P2addq %r1,0,%0";
+ case 1:
+ return "sll %r1,%2,%0";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "iadd,shift")])
+
+(define_insn "*ashldi_se"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (subreg:SI (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "const_int_operand" "P"))
+ 0)))]
+ "INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3"
+{
+ if (operands[2] == const1_rtx)
+ return "addl %r1,%r1,%0";
+ else
+ return "s%P2addl %r1,0,%0";
+}
+ [(set_attr "type" "iadd")])
+
+(define_insn "lshrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "reg_or_6bit_operand" "rS")))]
+ ""
+ "srl %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "ashrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "reg_or_6bit_operand" "rS")))]
+ ""
+ "sra %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_expand "extendqihi2"
+ [(set (match_dup 2)
+ (ashift:DI (match_operand:QI 1 "some_operand" "")
+ (const_int 56)))
+ (set (match_operand:HI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 2)
+ (const_int 56)))]
+ ""
+{
+ if (TARGET_BWX)
+ {
+ emit_insn (gen_extendqihi2x (operands[0],
+ force_reg (QImode, operands[1])));
+ DONE;
+ }
+
+ /* If we have an unaligned MEM, extend to DImode (which we do
+ specially) and then copy to the result. */
+ if (unaligned_memory_operand (operands[1], HImode))
+ {
+ rtx temp = gen_reg_rtx (DImode);
+
+ emit_insn (gen_extendqidi2 (temp, operands[1]));
+ emit_move_insn (operands[0], gen_lowpart (HImode, temp));
+ DONE;
+ }
+
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, force_reg (QImode, operands[1]));
+ operands[2] = gen_reg_rtx (DImode);
+})
+
+(define_insn "extendqidi2x"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:QI 1 "register_operand" "r")))]
+ "TARGET_BWX"
+ "sextb %1,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extendhidi2x"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:HI 1 "register_operand" "r")))]
+ "TARGET_BWX"
+ "sextw %1,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extendqisi2x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "r")))]
+ "TARGET_BWX"
+ "sextb %1,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extendhisi2x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "r")))]
+ "TARGET_BWX"
+ "sextw %1,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extendqihi2x"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "register_operand" "r")))]
+ "TARGET_BWX"
+ "sextb %1,%0"
+ [(set_attr "type" "shift")])
+
+(define_expand "extendqisi2"
+ [(set (match_dup 2)
+ (ashift:DI (match_operand:QI 1 "some_operand" "")
+ (const_int 56)))
+ (set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 2)
+ (const_int 56)))]
+ ""
+{
+ if (TARGET_BWX)
+ {
+ emit_insn (gen_extendqisi2x (operands[0],
+ force_reg (QImode, operands[1])));
+ DONE;
+ }
+
+ /* If we have an unaligned MEM, extend to a DImode form of
+ the result (which we do specially). */
+ if (unaligned_memory_operand (operands[1], QImode))
+ {
+ rtx temp = gen_reg_rtx (DImode);
+
+ emit_insn (gen_extendqidi2 (temp, operands[1]));
+ emit_move_insn (operands[0], gen_lowpart (SImode, temp));
+ DONE;
+ }
+
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, force_reg (QImode, operands[1]));
+ operands[2] = gen_reg_rtx (DImode);
+})
+
+(define_expand "extendqidi2"
+ [(set (match_dup 2)
+ (ashift:DI (match_operand:QI 1 "some_operand" "")
+ (const_int 56)))
+ (set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 2)
+ (const_int 56)))]
+ ""
+{
+ if (TARGET_BWX)
+ {
+ emit_insn (gen_extendqidi2x (operands[0],
+ force_reg (QImode, operands[1])));
+ DONE;
+ }
+
+ if (unaligned_memory_operand (operands[1], QImode))
+ {
+ rtx seq = gen_unaligned_extendqidi (operands[0], XEXP (operands[1], 0));
+ alpha_set_memflags (seq, operands[1]);
+ emit_insn (seq);
+ DONE;
+ }
+
+ operands[1] = gen_lowpart (DImode, force_reg (QImode, operands[1]));
+ operands[2] = gen_reg_rtx (DImode);
+})
+
+(define_expand "extendhisi2"
+ [(set (match_dup 2)
+ (ashift:DI (match_operand:HI 1 "some_operand" "")
+ (const_int 48)))
+ (set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 2)
+ (const_int 48)))]
+ ""
+{
+ if (TARGET_BWX)
+ {
+ emit_insn (gen_extendhisi2x (operands[0],
+ force_reg (HImode, operands[1])));
+ DONE;
+ }
+
+ /* If we have an unaligned MEM, extend to a DImode form of
+ the result (which we do specially). */
+ if (unaligned_memory_operand (operands[1], HImode))
+ {
+ rtx temp = gen_reg_rtx (DImode);
+
+ emit_insn (gen_extendhidi2 (temp, operands[1]));
+ emit_move_insn (operands[0], gen_lowpart (SImode, temp));
+ DONE;
+ }
+
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, force_reg (HImode, operands[1]));
+ operands[2] = gen_reg_rtx (DImode);
+})
+
+(define_expand "extendhidi2"
+ [(set (match_dup 2)
+ (ashift:DI (match_operand:HI 1 "some_operand" "")
+ (const_int 48)))
+ (set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 2)
+ (const_int 48)))]
+ ""
+{
+ if (TARGET_BWX)
+ {
+ emit_insn (gen_extendhidi2x (operands[0],
+ force_reg (HImode, operands[1])));
+ DONE;
+ }
+
+ if (unaligned_memory_operand (operands[1], HImode))
+ {
+ rtx seq = gen_unaligned_extendhidi (operands[0], XEXP (operands[1], 0));
+
+ alpha_set_memflags (seq, operands[1]);
+ emit_insn (seq);
+ DONE;
+ }
+
+ operands[1] = gen_lowpart (DImode, force_reg (HImode, operands[1]));
+ operands[2] = gen_reg_rtx (DImode);
+})
+
+;; Here's how we sign extend an unaligned byte and halfword. Doing this
+;; as a pattern saves one instruction. The code is similar to that for
+;; the unaligned loads (see below).
+;;
+;; Operand 1 is the address, operand 0 is the result.
+(define_expand "unaligned_extendqidi"
+ [(use (match_operand:QI 0 "register_operand" ""))
+ (use (match_operand:DI 1 "address_operand" ""))]
+ ""
+{
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ if (WORDS_BIG_ENDIAN)
+ emit_insn (gen_unaligned_extendqidi_be (operands[0], operands[1]));
+ else
+ emit_insn (gen_unaligned_extendqidi_le (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "unaligned_extendqidi_le"
+ [(set (match_dup 3)
+ (mem:DI (and:DI (match_operand:DI 1 "" "") (const_int -8))))
+ (set (match_dup 4)
+ (ashift:DI (match_dup 3)
+ (minus:DI (const_int 64)
+ (ashift:DI
+ (and:DI (match_dup 2) (const_int 7))
+ (const_int 3)))))
+ (set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 4) (const_int 56)))]
+ "! WORDS_BIG_ENDIAN"
+{
+ operands[2] = get_unaligned_offset (operands[1], 1);
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+})
+
+(define_expand "unaligned_extendqidi_be"
+ [(set (match_dup 3)
+ (mem:DI (and:DI (match_operand:DI 1 "" "") (const_int -8))))
+ (set (match_dup 4)
+ (ashift:DI (match_dup 3)
+ (ashift:DI
+ (and:DI
+ (plus:DI (match_dup 2) (const_int 1))
+ (const_int 7))
+ (const_int 3))))
+ (set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 4) (const_int 56)))]
+ "WORDS_BIG_ENDIAN"
+{
+ operands[2] = get_unaligned_offset (operands[1], -1);
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+})
+
+(define_expand "unaligned_extendhidi"
+ [(use (match_operand:QI 0 "register_operand" ""))
+ (use (match_operand:DI 1 "address_operand" ""))]
+ ""
+{
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ if (WORDS_BIG_ENDIAN)
+ emit_insn (gen_unaligned_extendhidi_be (operands[0], operands[1]));
+ else
+ emit_insn (gen_unaligned_extendhidi_le (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "unaligned_extendhidi_le"
+ [(set (match_dup 3)
+ (mem:DI (and:DI (match_operand:DI 1 "" "") (const_int -8))))
+ (set (match_dup 4)
+ (ashift:DI (match_dup 3)
+ (minus:DI (const_int 64)
+ (ashift:DI
+ (and:DI (match_dup 2) (const_int 7))
+ (const_int 3)))))
+ (set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 4) (const_int 48)))]
+ "! WORDS_BIG_ENDIAN"
+{
+ operands[2] = get_unaligned_offset (operands[1], 2);
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+})
+
+(define_expand "unaligned_extendhidi_be"
+ [(set (match_dup 3)
+ (mem:DI (and:DI (match_operand:DI 1 "" "") (const_int -8))))
+ (set (match_dup 4)
+ (ashift:DI (match_dup 3)
+ (ashift:DI
+ (and:DI
+ (plus:DI (match_dup 2) (const_int 1))
+ (const_int 7))
+ (const_int 3))))
+ (set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_dup 4) (const_int 48)))]
+ "WORDS_BIG_ENDIAN"
+{
+ operands[2] = get_unaligned_offset (operands[1], -1);
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+})
+
+(define_insn "*extxl_const"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "mode_width_operand" "n")
+ (match_operand:DI 3 "mul8_operand" "I")))]
+ ""
+ "ext%M2l %r1,%s3,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extxl_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "mode_width_operand" "n")
+ (ashift:DI (match_operand:DI 3 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ "! WORDS_BIG_ENDIAN"
+ "ext%M2l %r1,%3,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extxl_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "mode_width_operand" "n")
+ (minus:DI
+ (const_int 56)
+ (ashift:DI
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")
+ (const_int 3)))))]
+ "WORDS_BIG_ENDIAN"
+ "ext%M2l %r1,%3,%0"
+ [(set_attr "type" "shift")])
+
+;; Combine has some strange notion of preserving existing undefined behavior
+;; in shifts larger than a word size. So capture these patterns that it
+;; should have turned into zero_extracts.
+
+(define_insn "*extxl_1_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3)))
+ (match_operand:DI 3 "mode_mask_operand" "n")))]
+ "! WORDS_BIG_ENDIAN"
+ "ext%U3l %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "*extxl_1_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (lshiftrt:DI
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (minus:DI (const_int 56)
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))
+ (match_operand:DI 3 "mode_mask_operand" "n")))]
+ "WORDS_BIG_ENDIAN"
+ "ext%U3l %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "*extql_2_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ "! WORDS_BIG_ENDIAN"
+ "extql %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "*extql_2_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (minus:DI (const_int 56)
+ (ashift:DI
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3)))))]
+ "WORDS_BIG_ENDIAN"
+ "extql %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extqh_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (minus:DI (const_int 64)
+ (ashift:DI
+ (and:DI
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 7))
+ (const_int 3)))))]
+ "! WORDS_BIG_ENDIAN"
+ "extqh %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extqh_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (ashift:DI
+ (and:DI
+ (plus:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 1))
+ (const_int 7))
+ (const_int 3))))]
+ "WORDS_BIG_ENDIAN"
+ "extqh %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extlh_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI
+ (and:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 2147483647))
+ (minus:DI (const_int 64)
+ (ashift:DI
+ (and:DI
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 7))
+ (const_int 3)))))]
+ "! WORDS_BIG_ENDIAN"
+ "extlh %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extlh_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI
+ (ashift:DI
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (ashift:DI
+ (and:DI
+ (plus:DI
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 1))
+ (const_int 7))
+ (const_int 3)))
+ (const_int 2147483647)))]
+ "WORDS_BIG_ENDIAN"
+ "extlh %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extwh_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI
+ (and:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 65535))
+ (minus:DI (const_int 64)
+ (ashift:DI
+ (and:DI
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 7))
+ (const_int 3)))))]
+ "! WORDS_BIG_ENDIAN"
+ "extwh %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extwh_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI
+ (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (ashift:DI
+ (and:DI
+ (plus:DI
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 1))
+ (const_int 7))
+ (const_int 3)))
+ (const_int 65535)))]
+ "WORDS_BIG_ENDIAN"
+ "extwh %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+;; This converts an extXl into an extXh with an appropriate adjustment
+;; to the address calculation.
+
+;;(define_split
+;; [(set (match_operand:DI 0 "register_operand" "")
+;; (ashift:DI (zero_extract:DI (match_operand:DI 1 "register_operand" "")
+;; (match_operand:DI 2 "mode_width_operand" "")
+;; (ashift:DI (match_operand:DI 3 "" "")
+;; (const_int 3)))
+;; (match_operand:DI 4 "const_int_operand" "")))
+;; (clobber (match_operand:DI 5 "register_operand" ""))]
+;; "INTVAL (operands[4]) == 64 - INTVAL (operands[2])"
+;; [(set (match_dup 5) (match_dup 6))
+;; (set (match_dup 0)
+;; (ashift:DI (zero_extract:DI (match_dup 1) (match_dup 2)
+;; (ashift:DI (plus:DI (match_dup 5)
+;; (match_dup 7))
+;; (const_int 3)))
+;; (match_dup 4)))]
+;; "
+;;{
+;; operands[6] = plus_constant (operands[3],
+;; INTVAL (operands[2]) / BITS_PER_UNIT);
+;; operands[7] = GEN_INT (- INTVAL (operands[2]) / BITS_PER_UNIT);
+;;}")
+
+(define_insn "*insbl_const"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:DI 2 "mul8_operand" "I")))]
+ ""
+ "insbl %1,%s2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "*inswl_const"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" "r"))
+ (match_operand:DI 2 "mul8_operand" "I")))]
+ ""
+ "inswl %1,%s2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "*insll_const"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:DI 2 "mul8_operand" "I")))]
+ ""
+ "insll %1,%s2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "insbl_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ "! WORDS_BIG_ENDIAN"
+ "insbl %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "insbl_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
+ (minus:DI (const_int 56)
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3)))))]
+ "WORDS_BIG_ENDIAN"
+ "insbl %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "inswl_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" "r"))
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ "! WORDS_BIG_ENDIAN"
+ "inswl %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "inswl_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" "r"))
+ (minus:DI (const_int 56)
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3)))))]
+ "WORDS_BIG_ENDIAN"
+ "inswl %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "insll_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ "! WORDS_BIG_ENDIAN"
+ "insll %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "insll_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (minus:DI (const_int 56)
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3)))))]
+ "WORDS_BIG_ENDIAN"
+ "insll %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "insql_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ "! WORDS_BIG_ENDIAN"
+ "insql %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "insql_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (minus:DI (const_int 56)
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3)))))]
+ "WORDS_BIG_ENDIAN"
+ "insql %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+;; Combine has this sometimes habit of moving the and outside of the
+;; shift, making life more interesting.
+
+(define_insn "*insxl"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "mul8_operand" "I"))
+ (match_operand:DI 3 "immediate_operand" "i")))]
+ "HOST_BITS_PER_WIDE_INT == 64
+ && GET_CODE (operands[3]) == CONST_INT
+ && (((unsigned HOST_WIDE_INT) 0xff << INTVAL (operands[2])
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))
+ || ((unsigned HOST_WIDE_INT) 0xffff << INTVAL (operands[2])
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))
+ || ((unsigned HOST_WIDE_INT) 0xffffffff << INTVAL (operands[2])
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3])))"
+{
+#if HOST_BITS_PER_WIDE_INT == 64
+ if ((unsigned HOST_WIDE_INT) 0xff << INTVAL (operands[2])
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))
+ return "insbl %1,%s2,%0";
+ if ((unsigned HOST_WIDE_INT) 0xffff << INTVAL (operands[2])
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))
+ return "inswl %1,%s2,%0";
+ if ((unsigned HOST_WIDE_INT) 0xffffffff << INTVAL (operands[2])
+ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))
+ return "insll %1,%s2,%0";
+#endif
+ gcc_unreachable ();
+}
+ [(set_attr "type" "shift")])
+
+;; We do not include the insXh insns because they are complex to express
+;; and it does not appear that we would ever want to generate them.
+;;
+;; Since we need them for block moves, though, cop out and use unspec.
+
+(define_insn "insxh"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "mode_width_operand" "n")
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")]
+ UNSPEC_INSXH))]
+ ""
+ "ins%M2h %1,%3,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "mskxl_le"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (not:DI (ashift:DI
+ (match_operand:DI 2 "mode_mask_operand" "n")
+ (ashift:DI
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")
+ (const_int 3))))
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+ "! WORDS_BIG_ENDIAN"
+ "msk%U2l %r1,%3,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "mskxl_be"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (not:DI (ashift:DI
+ (match_operand:DI 2 "mode_mask_operand" "n")
+ (minus:DI (const_int 56)
+ (ashift:DI
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")
+ (const_int 3)))))
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+ "WORDS_BIG_ENDIAN"
+ "msk%U2l %r1,%3,%0"
+ [(set_attr "type" "shift")])
+
+;; We do not include the mskXh insns because it does not appear we would
+;; ever generate one.
+;;
+;; Again, we do for block moves and we use unspec again.
+
+(define_insn "mskxh"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "mode_width_operand" "n")
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")]
+ UNSPEC_MSKXH))]
+ ""
+ "msk%M2h %1,%3,%0"
+ [(set_attr "type" "shift")])
+
+;; Prefer AND + NE over LSHIFTRT + AND.
+
+(define_insn_and_split "*ze_and_ne"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 1)
+ (match_operand 2 "const_int_operand" "I")))]
+ "(unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 8"
+ "#"
+ "(unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 8"
+ [(set (match_dup 0)
+ (and:DI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0)
+ (ne:DI (match_dup 0) (const_int 0)))]
+ "operands[3] = GEN_INT (1 << INTVAL (operands[2]));")
+
+;; Floating-point operations. All the double-precision insns can extend
+;; from single, so indicate that. The exception are the ones that simply
+;; play with the sign bits; it's not clear what to do there.
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "cpys $f31,%R1,%0"
+ [(set_attr "type" "fcpys")])
+
+(define_insn "*nabssf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (abs:SF (match_operand:SF 1 "reg_or_0_operand" "fG"))))]
+ "TARGET_FP"
+ "cpysn $f31,%R1,%0"
+ [(set_attr "type" "fadd")])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "cpys $f31,%R1,%0"
+ [(set_attr "type" "fcpys")])
+
+(define_insn "*nabsdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (neg:DF (abs:DF (match_operand:DF 1 "reg_or_0_operand" "fG"))))]
+ "TARGET_FP"
+ "cpysn $f31,%R1,%0"
+ [(set_attr "type" "fadd")])
+
+(define_expand "abstf2"
+ [(parallel [(set (match_operand:TF 0 "register_operand" "")
+ (abs:TF (match_operand:TF 1 "reg_or_0_operand" "")))
+ (use (match_dup 2))])]
+ "TARGET_HAS_XFLOATING_LIBS"
+{
+#if HOST_BITS_PER_WIDE_INT >= 64
+ operands[2] = force_reg (DImode, GEN_INT ((HOST_WIDE_INT) 1 << 63));
+#else
+ operands[2] = force_reg (DImode, immed_double_const (0, 0x80000000, DImode));
+#endif
+})
+
+(define_insn_and_split "*abstf_internal"
+ [(set (match_operand:TF 0 "register_operand" "=r")
+ (abs:TF (match_operand:TF 1 "reg_or_0_operand" "rG")))
+ (use (match_operand:DI 2 "register_operand" "r"))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ "alpha_split_tfmode_frobsign (operands, gen_andnotdi3); DONE;")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "cpysn %R1,%R1,%0"
+ [(set_attr "type" "fadd")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "cpysn %R1,%R1,%0"
+ [(set_attr "type" "fadd")])
+
+(define_expand "negtf2"
+ [(parallel [(set (match_operand:TF 0 "register_operand" "")
+ (neg:TF (match_operand:TF 1 "reg_or_0_operand" "")))
+ (use (match_dup 2))])]
+ "TARGET_HAS_XFLOATING_LIBS"
+{
+#if HOST_BITS_PER_WIDE_INT >= 64
+ operands[2] = force_reg (DImode, GEN_INT ((HOST_WIDE_INT) 1 << 63));
+#else
+ operands[2] = force_reg (DImode, immed_double_const (0, 0x80000000, DImode));
+#endif
+})
+
+(define_insn_and_split "*negtf_internal"
+ [(set (match_operand:TF 0 "register_operand" "=r")
+ (neg:TF (match_operand:TF 1 "reg_or_0_operand" "rG")))
+ (use (match_operand:DI 2 "register_operand" "r"))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ "alpha_split_tfmode_frobsign (operands, gen_xordi3); DONE;")
+
+(define_insn "copysignsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "reg_or_0_operand" "fG")
+ (match_operand:SF 2 "reg_or_0_operand" "fG")]
+ UNSPEC_COPYSIGN))]
+ "TARGET_FP"
+ "cpys %R2,%R1,%0"
+ [(set_attr "type" "fadd")])
+
+(define_insn "*ncopysignsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (unspec:SF [(match_operand:SF 1 "reg_or_0_operand" "fG")
+ (match_operand:SF 2 "reg_or_0_operand" "fG")]
+ UNSPEC_COPYSIGN)))]
+ "TARGET_FP"
+ "cpysn %R2,%R1,%0"
+ [(set_attr "type" "fadd")])
+
+(define_insn "copysigndf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "reg_or_0_operand" "fG")
+ (match_operand:DF 2 "reg_or_0_operand" "fG")]
+ UNSPEC_COPYSIGN))]
+ "TARGET_FP"
+ "cpys %R2,%R1,%0"
+ [(set_attr "type" "fadd")])
+
+(define_insn "*ncopysigndf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (neg:DF (unspec:DF [(match_operand:DF 1 "reg_or_0_operand" "fG")
+ (match_operand:DF 2 "reg_or_0_operand" "fG")]
+ UNSPEC_COPYSIGN)))]
+ "TARGET_FP"
+ "cpysn %R2,%R1,%0"
+ [(set_attr "type" "fadd")])
+
+(define_insn "*addsf_ieee"
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (plus:SF (match_operand:SF 1 "reg_or_0_operand" "%fG")
+ (match_operand:SF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "add%,%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (plus:SF (match_operand:SF 1 "reg_or_0_operand" "%fG")
+ (match_operand:SF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "add%,%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*adddf_ieee"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (plus:DF (match_operand:DF 1 "reg_or_0_operand" "%fG")
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "add%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (plus:DF (match_operand:DF 1 "reg_or_0_operand" "%fG")
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "add%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*adddf_ext1"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_0_operand" "fG"))
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "add%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*adddf_ext2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_0_operand" "%fG"))
+ (float_extend:DF
+ (match_operand:SF 2 "reg_or_0_operand" "fG"))))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "add%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_expand "addtf3"
+ [(use (match_operand 0 "register_operand" ""))
+ (use (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "general_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "alpha_emit_xfloating_arith (PLUS, operands); DONE;")
+
+;; Define conversion operators between DFmode and SImode, using the cvtql
+;; instruction. To allow combine et al to do useful things, we keep the
+;; operation as a unit until after reload, at which point we split the
+;; instructions.
+;;
+;; Note that we (attempt to) only consider this optimization when the
+;; ultimate destination is memory. If we will be doing further integer
+;; processing, it is cheaper to do the truncation in the int regs.
+
+(define_insn "*cvtql"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:DI 1 "reg_or_0_operand" "fG")]
+ UNSPEC_CVTQL))]
+ "TARGET_FP"
+ "cvtql%/ %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "trap_suffix" "v_sv")])
+
+(define_insn_and_split "*fix_truncdfsi_ieee"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (subreg:SI
+ (match_operator:DI 4 "fix_operator"
+ [(match_operand:DF 1 "reg_or_0_operand" "fG")]) 0))
+ (clobber (match_scratch:DI 2 "=&f"))
+ (clobber (match_scratch:SF 3 "=&f"))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (match_op_dup 4 [(match_dup 1)]))
+ (set (match_dup 3) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL))
+ (set (match_dup 5) (match_dup 3))]
+{
+ operands[5] = adjust_address (operands[0], SFmode, 0);
+}
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn_and_split "*fix_truncdfsi_internal"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (subreg:SI
+ (match_operator:DI 3 "fix_operator"
+ [(match_operand:DF 1 "reg_or_0_operand" "fG")]) 0))
+ (clobber (match_scratch:DI 2 "=f"))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (match_op_dup 3 [(match_dup 1)]))
+ (set (match_dup 4) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL))
+ (set (match_dup 5) (match_dup 4))]
+{
+ operands[4] = gen_rtx_REG (SFmode, REGNO (operands[2]));
+ operands[5] = adjust_address (operands[0], SFmode, 0);
+}
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn "*fix_truncdfdi_ieee"
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=&f")
+ (match_operator:DI 2 "fix_operator"
+ [(match_operand:DF 1 "reg_or_0_operand" "fG")]))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "cvt%-q%/ %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "c")
+ (set_attr "trap_suffix" "v_sv_svi")])
+
+(define_insn "*fix_truncdfdi2"
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=f")
+ (match_operator:DI 2 "fix_operator"
+ [(match_operand:DF 1 "reg_or_0_operand" "fG")]))]
+ "TARGET_FP"
+ "cvt%-q%/ %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "c")
+ (set_attr "trap_suffix" "v_sv_svi")])
+
+(define_expand "fix_truncdfdi2"
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "")
+ (fix:DI (match_operand:DF 1 "reg_or_0_operand" "")))]
+ "TARGET_FP"
+ "")
+
+(define_expand "fixuns_truncdfdi2"
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "")
+ (unsigned_fix:DI (match_operand:DF 1 "reg_or_0_operand" "")))]
+ "TARGET_FP"
+ "")
+
+;; Likewise between SFmode and SImode.
+
+(define_insn_and_split "*fix_truncsfsi_ieee"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (subreg:SI
+ (match_operator:DI 4 "fix_operator"
+ [(float_extend:DF
+ (match_operand:SF 1 "reg_or_0_operand" "fG"))]) 0))
+ (clobber (match_scratch:DI 2 "=&f"))
+ (clobber (match_scratch:SF 3 "=&f"))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (match_op_dup 4 [(float_extend:DF (match_dup 1))]))
+ (set (match_dup 3) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL))
+ (set (match_dup 5) (match_dup 3))]
+{
+ operands[5] = adjust_address (operands[0], SFmode, 0);
+}
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn_and_split "*fix_truncsfsi_internal"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (subreg:SI
+ (match_operator:DI 3 "fix_operator"
+ [(float_extend:DF
+ (match_operand:SF 1 "reg_or_0_operand" "fG"))]) 0))
+ (clobber (match_scratch:DI 2 "=f"))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (match_op_dup 3 [(float_extend:DF (match_dup 1))]))
+ (set (match_dup 4) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL))
+ (set (match_dup 5) (match_dup 4))]
+{
+ operands[4] = gen_rtx_REG (SFmode, REGNO (operands[2]));
+ operands[5] = adjust_address (operands[0], SFmode, 0);
+}
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn "*fix_truncsfdi_ieee"
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=&f")
+ (match_operator:DI 2 "fix_operator"
+ [(float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG"))]))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "cvt%-q%/ %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "c")
+ (set_attr "trap_suffix" "v_sv_svi")])
+
+(define_insn "*fix_truncsfdi2"
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=f")
+ (match_operator:DI 2 "fix_operator"
+ [(float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG"))]))]
+ "TARGET_FP"
+ "cvt%-q%/ %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "c")
+ (set_attr "trap_suffix" "v_sv_svi")])
+
+(define_expand "fix_truncsfdi2"
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "")
+ (fix:DI (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" ""))))]
+ "TARGET_FP"
+ "")
+
+(define_expand "fixuns_truncsfdi2"
+ [(set (match_operand:DI 0 "reg_no_subreg_operand" "")
+ (unsigned_fix:DI
+ (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" ""))))]
+ "TARGET_FP"
+ "")
+
+(define_expand "fix_trunctfdi2"
+ [(use (match_operand:DI 0 "register_operand" ""))
+ (use (match_operand:TF 1 "general_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "alpha_emit_xfloating_cvt (FIX, operands); DONE;")
+
+(define_expand "fixuns_trunctfdi2"
+ [(use (match_operand:DI 0 "register_operand" ""))
+ (use (match_operand:TF 1 "general_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "alpha_emit_xfloating_cvt (UNSIGNED_FIX, operands); DONE;")
+
+(define_insn "*floatdisf_ieee"
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (float:SF (match_operand:DI 1 "reg_no_subreg_operand" "f")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "cvtq%,%/ %1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "sui")])
+
+(define_insn "floatdisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:DI 1 "reg_no_subreg_operand" "f")))]
+ "TARGET_FP"
+ "cvtq%,%/ %1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "sui")])
+
+(define_insn_and_split "*floatsisf2_ieee"
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (float:SF (match_operand:SI 1 "memory_operand" "m")))
+ (clobber (match_scratch:DI 2 "=&f"))
+ (clobber (match_scratch:SF 3 "=&f"))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 2) (unspec:DI [(match_dup 3)] UNSPEC_CVTLQ))
+ (set (match_dup 0) (float:SF (match_dup 2)))]
+{
+ operands[1] = adjust_address (operands[1], SFmode, 0);
+})
+
+(define_insn_and_split "*floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:SI 1 "memory_operand" "m")))]
+ "TARGET_FP"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (unspec:DI [(match_dup 0)] UNSPEC_CVTLQ))
+ (set (match_dup 0) (float:SF (match_dup 2)))]
+{
+ operands[1] = adjust_address (operands[1], SFmode, 0);
+ operands[2] = gen_rtx_REG (DImode, REGNO (operands[0]));
+})
+
+(define_insn "*floatdidf_ieee"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (float:DF (match_operand:DI 1 "reg_no_subreg_operand" "f")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "cvtq%-%/ %1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "sui")])
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float:DF (match_operand:DI 1 "reg_no_subreg_operand" "f")))]
+ "TARGET_FP"
+ "cvtq%-%/ %1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "sui")])
+
+(define_insn_and_split "*floatsidf2_ieee"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (float:DF (match_operand:SI 1 "memory_operand" "m")))
+ (clobber (match_scratch:DI 2 "=&f"))
+ (clobber (match_scratch:SF 3 "=&f"))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 2) (unspec:DI [(match_dup 3)] UNSPEC_CVTLQ))
+ (set (match_dup 0) (float:DF (match_dup 2)))]
+{
+ operands[1] = adjust_address (operands[1], SFmode, 0);
+})
+
+(define_insn_and_split "*floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float:DF (match_operand:SI 1 "memory_operand" "m")))]
+ "TARGET_FP"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 2) (unspec:DI [(match_dup 3)] UNSPEC_CVTLQ))
+ (set (match_dup 0) (float:DF (match_dup 2)))]
+{
+ operands[1] = adjust_address (operands[1], SFmode, 0);
+ operands[2] = gen_rtx_REG (DImode, REGNO (operands[0]));
+ operands[3] = gen_rtx_REG (SFmode, REGNO (operands[0]));
+})
+
+(define_expand "floatditf2"
+ [(use (match_operand:TF 0 "register_operand" ""))
+ (use (match_operand:DI 1 "general_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "alpha_emit_xfloating_cvt (FLOAT, operands); DONE;")
+
+(define_expand "floatunsdisf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:DI 1 "register_operand" ""))]
+ "TARGET_FP"
+ "alpha_emit_floatuns (operands); DONE;")
+
+(define_expand "floatunsdidf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DI 1 "register_operand" ""))]
+ "TARGET_FP"
+ "alpha_emit_floatuns (operands); DONE;")
+
+(define_expand "floatunsditf2"
+ [(use (match_operand:TF 0 "register_operand" ""))
+ (use (match_operand:DI 1 "general_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "alpha_emit_xfloating_cvt (UNSIGNED_FLOAT, operands); DONE;")
+
+(define_expand "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "")))]
+ "TARGET_FP"
+{
+ if (alpha_fptm >= ALPHA_FPTM_SU)
+ operands[1] = force_reg (SFmode, operands[1]);
+})
+
+;; The Unicos/Mk assembler doesn't support cvtst, but we've already
+;; asserted that alpha_fptm == ALPHA_FPTM_N.
+
+(define_insn "*extendsfdf2_ieee"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "cvtsts %1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn "*extendsfdf2_internal"
+ [(set (match_operand:DF 0 "register_operand" "=f,f,m")
+ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "f,m,f")))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "@
+ cpys %1,%1,%0
+ ld%, %0,%1
+ st%- %1,%0"
+ [(set_attr "type" "fcpys,fld,fst")])
+
+;; Use register_operand for operand 1 to prevent compress_float_constant
+;; from doing something silly. When optimizing we'll put things back
+;; together anyway.
+(define_expand "extendsftf2"
+ [(use (match_operand:TF 0 "register_operand" ""))
+ (use (match_operand:SF 1 "register_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+{
+ rtx tmp = gen_reg_rtx (DFmode);
+ emit_insn (gen_extendsfdf2 (tmp, operands[1]));
+ emit_insn (gen_extenddftf2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "extenddftf2"
+ [(use (match_operand:TF 0 "register_operand" ""))
+ (use (match_operand:DF 1 "register_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "alpha_emit_xfloating_cvt (FLOAT_EXTEND, operands); DONE;")
+
+(define_insn "*truncdfsf2_ieee"
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (float_truncate:SF (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "cvt%-%,%/ %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "cvt%-%,%/ %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_expand "trunctfdf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:TF 1 "general_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "alpha_emit_xfloating_cvt (FLOAT_TRUNCATE, operands); DONE;")
+
+(define_expand "trunctfsf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:TF 1 "general_operand" ""))]
+ "TARGET_FP && TARGET_HAS_XFLOATING_LIBS"
+{
+ rtx tmpf, sticky, arg, lo, hi;
+
+ tmpf = gen_reg_rtx (DFmode);
+ sticky = gen_reg_rtx (DImode);
+ arg = copy_to_mode_reg (TFmode, operands[1]);
+ lo = gen_lowpart (DImode, arg);
+ hi = gen_highpart (DImode, arg);
+
+ /* Convert the low word of the TFmode value into a sticky rounding bit,
+ then or it into the low bit of the high word. This leaves the sticky
+ bit at bit 48 of the fraction, which is representable in DFmode,
+ which prevents rounding error in the final conversion to SFmode. */
+
+ emit_insn (gen_rtx_SET (VOIDmode, sticky,
+ gen_rtx_NE (DImode, lo, const0_rtx)));
+ emit_insn (gen_iordi3 (hi, hi, sticky));
+ emit_insn (gen_trunctfdf2 (tmpf, arg));
+ emit_insn (gen_truncdfsf2 (operands[0], tmpf));
+ DONE;
+})
+
+(define_insn "*divsf3_ieee"
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (div:SF (match_operand:SF 1 "reg_or_0_operand" "fG")
+ (match_operand:SF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "div%,%/ %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "opsize" "si")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (div:SF (match_operand:SF 1 "reg_or_0_operand" "fG")
+ (match_operand:SF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "div%,%/ %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "opsize" "si")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*divdf3_ieee"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (div:DF (match_operand:DF 1 "reg_or_0_operand" "fG")
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "div%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (div:DF (match_operand:DF 1 "reg_or_0_operand" "fG")
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "div%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*divdf_ext1"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (div:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG"))
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "div%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*divdf_ext2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (div:DF (match_operand:DF 1 "reg_or_0_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "reg_or_0_operand" "fG"))))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "div%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*divdf_ext3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (div:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG"))
+ (float_extend:DF (match_operand:SF 2 "reg_or_0_operand" "fG"))))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "div%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_expand "divtf3"
+ [(use (match_operand 0 "register_operand" ""))
+ (use (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "general_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "alpha_emit_xfloating_arith (DIV, operands); DONE;")
+
+(define_insn "*mulsf3_ieee"
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (mult:SF (match_operand:SF 1 "reg_or_0_operand" "%fG")
+ (match_operand:SF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "mul%,%/ %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "reg_or_0_operand" "%fG")
+ (match_operand:SF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "mul%,%/ %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*muldf3_ieee"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (mult:DF (match_operand:DF 1 "reg_or_0_operand" "%fG")
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "mul%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "reg_or_0_operand" "%fG")
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "mul%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*muldf_ext1"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_0_operand" "fG"))
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "mul%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*muldf_ext2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_0_operand" "%fG"))
+ (float_extend:DF
+ (match_operand:SF 2 "reg_or_0_operand" "fG"))))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "mul%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_expand "multf3"
+ [(use (match_operand 0 "register_operand" ""))
+ (use (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "general_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "alpha_emit_xfloating_arith (MULT, operands); DONE;")
+
+(define_insn "*subsf3_ieee"
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (minus:SF (match_operand:SF 1 "reg_or_0_operand" "fG")
+ (match_operand:SF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "sub%,%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (minus:SF (match_operand:SF 1 "reg_or_0_operand" "fG")
+ (match_operand:SF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "sub%,%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*subdf3_ieee"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (minus:DF (match_operand:DF 1 "reg_or_0_operand" "fG")
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "sub%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (minus:DF (match_operand:DF 1 "reg_or_0_operand" "fG")
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP"
+ "sub%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*subdf_ext1"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_0_operand" "fG"))
+ (match_operand:DF 2 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "sub%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*subdf_ext2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (minus:DF (match_operand:DF 1 "reg_or_0_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "reg_or_0_operand" "fG"))))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "sub%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*subdf_ext3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "reg_or_0_operand" "fG"))
+ (float_extend:DF
+ (match_operand:SF 2 "reg_or_0_operand" "fG"))))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "sub%-%/ %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_expand "subtf3"
+ [(use (match_operand 0 "register_operand" ""))
+ (use (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "general_operand" ""))]
+ "TARGET_HAS_XFLOATING_LIBS"
+ "alpha_emit_xfloating_arith (MINUS, operands); DONE;")
+
+(define_insn "*sqrtsf2_ieee"
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (sqrt:SF (match_operand:SF 1 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && TARGET_FIX && alpha_fptm >= ALPHA_FPTM_SU"
+ "sqrt%,%/ %R1,%0"
+ [(set_attr "type" "fsqrt")
+ (set_attr "opsize" "si")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && TARGET_FIX"
+ "sqrt%,%/ %R1,%0"
+ [(set_attr "type" "fsqrt")
+ (set_attr "opsize" "si")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "*sqrtdf2_ieee"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (sqrt:DF (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && TARGET_FIX && alpha_fptm >= ALPHA_FPTM_SU"
+ "sqrt%-%/ %R1,%0"
+ [(set_attr "type" "fsqrt")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+ "TARGET_FP && TARGET_FIX"
+ "sqrt%-%/ %R1,%0"
+ [(set_attr "type" "fsqrt")
+ (set_attr "trap" "yes")
+ (set_attr "round_suffix" "normal")
+ (set_attr "trap_suffix" "u_su_sui")])
+
+;; Next are all the integer comparisons, and conditional moves and branches
+;; and some of the related define_expand's and define_split's.
+
+(define_insn "*setcc_internal"
+ [(set (match_operand 0 "register_operand" "=r")
+ (match_operator 1 "alpha_comparison_operator"
+ [(match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")]))]
+ "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= 8
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])"
+ "cmp%C1 %2,%3,%0"
+ [(set_attr "type" "icmp")])
+
+;; Yes, we can technically support reg_or_8bit_operand in operand 2,
+;; but that's non-canonical rtl and allowing that causes inefficiencies
+;; from cse on.
+(define_insn "*setcc_swapped_internal"
+ [(set (match_operand 0 "register_operand" "=r")
+ (match_operator 1 "alpha_swapped_comparison_operator"
+ [(match_operand:DI 2 "register_operand" "r")
+ (match_operand:DI 3 "reg_or_0_operand" "rJ")]))]
+ "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= 8
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])"
+ "cmp%c1 %r3,%2,%0"
+ [(set_attr "type" "icmp")])
+
+;; Use match_operator rather than ne directly so that we can match
+;; multiple integer modes.
+(define_insn "*setne_internal"
+ [(set (match_operand 0 "register_operand" "=r")
+ (match_operator 1 "signed_comparison_operator"
+ [(match_operand:DI 2 "register_operand" "r")
+ (const_int 0)]))]
+ "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= 8
+ && GET_CODE (operands[1]) == NE
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])"
+ "cmpult $31,%2,%0"
+ [(set_attr "type" "icmp")])
+
+;; The mode folding trick can't be used with const_int operands, since
+;; reload needs to know the proper mode.
+;;
+;; Use add_operand instead of the more seemingly natural reg_or_8bit_operand
+;; in order to create more pairs of constants. As long as we're allowing
+;; two constants at the same time, and will have to reload one of them...
+
+(define_insn "*movqicc_internal"
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:QI
+ (match_operator 2 "signed_comparison_operator"
+ [(match_operand:DI 3 "reg_or_0_operand" "rJ,rJ,J,J")
+ (match_operand:DI 4 "reg_or_0_operand" "J,J,rJ,rJ")])
+ (match_operand:QI 1 "add_operand" "rI,0,rI,0")
+ (match_operand:QI 5 "add_operand" "0,rI,0,rI")))]
+ "(operands[3] == const0_rtx) ^ (operands[4] == const0_rtx)"
+ "@
+ cmov%C2 %r3,%1,%0
+ cmov%D2 %r3,%5,%0
+ cmov%c2 %r4,%1,%0
+ cmov%d2 %r4,%5,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movhicc_internal"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:HI
+ (match_operator 2 "signed_comparison_operator"
+ [(match_operand:DI 3 "reg_or_0_operand" "rJ,rJ,J,J")
+ (match_operand:DI 4 "reg_or_0_operand" "J,J,rJ,rJ")])
+ (match_operand:HI 1 "add_operand" "rI,0,rI,0")
+ (match_operand:HI 5 "add_operand" "0,rI,0,rI")))]
+ "(operands[3] == const0_rtx) ^ (operands[4] == const0_rtx)"
+ "@
+ cmov%C2 %r3,%1,%0
+ cmov%D2 %r3,%5,%0
+ cmov%c2 %r4,%1,%0
+ cmov%d2 %r4,%5,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movsicc_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:SI
+ (match_operator 2 "signed_comparison_operator"
+ [(match_operand:DI 3 "reg_or_0_operand" "rJ,rJ,J,J")
+ (match_operand:DI 4 "reg_or_0_operand" "J,J,rJ,rJ")])
+ (match_operand:SI 1 "add_operand" "rI,0,rI,0")
+ (match_operand:SI 5 "add_operand" "0,rI,0,rI")))]
+ "(operands[3] == const0_rtx) ^ (operands[4] == const0_rtx)"
+ "@
+ cmov%C2 %r3,%1,%0
+ cmov%D2 %r3,%5,%0
+ cmov%c2 %r4,%1,%0
+ cmov%d2 %r4,%5,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movdicc_internal"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:DI
+ (match_operator 2 "signed_comparison_operator"
+ [(match_operand:DI 3 "reg_or_0_operand" "rJ,rJ,J,J")
+ (match_operand:DI 4 "reg_or_0_operand" "J,J,rJ,rJ")])
+ (match_operand:DI 1 "add_operand" "rI,0,rI,0")
+ (match_operand:DI 5 "add_operand" "0,rI,0,rI")))]
+ "(operands[3] == const0_rtx) ^ (operands[4] == const0_rtx)"
+ "@
+ cmov%C2 %r3,%1,%0
+ cmov%D2 %r3,%5,%0
+ cmov%c2 %r4,%1,%0
+ cmov%d2 %r4,%5,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movqicc_lbc"
+ [(set (match_operand:QI 0 "register_operand" "=r,r")
+ (if_then_else:QI
+ (eq (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (match_operand:QI 1 "reg_or_8bit_operand" "rI,0")
+ (match_operand:QI 3 "reg_or_8bit_operand" "0,rI")))]
+ ""
+ "@
+ cmovlbc %r2,%1,%0
+ cmovlbs %r2,%3,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movhicc_lbc"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (if_then_else:HI
+ (eq (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (match_operand:HI 1 "reg_or_8bit_operand" "rI,0")
+ (match_operand:HI 3 "reg_or_8bit_operand" "0,rI")))]
+ ""
+ "@
+ cmovlbc %r2,%1,%0
+ cmovlbs %r2,%3,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movsicc_lbc"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI
+ (eq (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (match_operand:SI 1 "reg_or_8bit_operand" "rI,0")
+ (match_operand:SI 3 "reg_or_8bit_operand" "0,rI")))]
+ ""
+ "@
+ cmovlbc %r2,%1,%0
+ cmovlbs %r2,%3,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movdicc_lbc"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (if_then_else:DI
+ (eq (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (match_operand:DI 1 "reg_or_8bit_operand" "rI,0")
+ (match_operand:DI 3 "reg_or_8bit_operand" "0,rI")))]
+ ""
+ "@
+ cmovlbc %r2,%1,%0
+ cmovlbs %r2,%3,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movqicc_lbs"
+ [(set (match_operand:QI 0 "register_operand" "=r,r")
+ (if_then_else:QI
+ (ne (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (match_operand:QI 1 "reg_or_8bit_operand" "rI,0")
+ (match_operand:QI 3 "reg_or_8bit_operand" "0,rI")))]
+ ""
+ "@
+ cmovlbs %r2,%1,%0
+ cmovlbc %r2,%3,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movhicc_lbs"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (if_then_else:HI
+ (ne (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (match_operand:HI 1 "reg_or_8bit_operand" "rI,0")
+ (match_operand:HI 3 "reg_or_8bit_operand" "0,rI")))]
+ ""
+ "@
+ cmovlbs %r2,%1,%0
+ cmovlbc %r2,%3,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movsicc_lbs"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI
+ (ne (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (match_operand:SI 1 "reg_or_8bit_operand" "rI,0")
+ (match_operand:SI 3 "reg_or_8bit_operand" "0,rI")))]
+ ""
+ "@
+ cmovlbs %r2,%1,%0
+ cmovlbc %r2,%3,%0"
+ [(set_attr "type" "icmov")])
+
+(define_insn "*movdicc_lbs"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (if_then_else:DI
+ (ne (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (match_operand:DI 1 "reg_or_8bit_operand" "rI,0")
+ (match_operand:DI 3 "reg_or_8bit_operand" "0,rI")))]
+ ""
+ "@
+ cmovlbs %r2,%1,%0
+ cmovlbc %r2,%3,%0"
+ [(set_attr "type" "icmov")])
+
+;; For ABS, we have two choices, depending on whether the input and output
+;; registers are the same or not.
+(define_expand "absdi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (abs:DI (match_operand:DI 1 "register_operand" "")))]
+ ""
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ emit_insn (gen_absdi2_same (operands[0], gen_reg_rtx (DImode)));
+ else
+ emit_insn (gen_absdi2_diff (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "absdi2_same"
+ [(set (match_operand:DI 1 "register_operand" "")
+ (neg:DI (match_operand:DI 0 "register_operand" "")))
+ (set (match_dup 0)
+ (if_then_else:DI (ge (match_dup 0) (const_int 0))
+ (match_dup 0)
+ (match_dup 1)))]
+ ""
+ "")
+
+(define_expand "absdi2_diff"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (match_operand:DI 1 "register_operand" "")))
+ (set (match_dup 0)
+ (if_then_else:DI (lt (match_dup 1) (const_int 0))
+ (match_dup 0)
+ (match_dup 1)))]
+ ""
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (abs:DI (match_dup 0)))
+ (clobber (match_operand:DI 1 "register_operand" ""))]
+ ""
+ [(set (match_dup 1) (neg:DI (match_dup 0)))
+ (set (match_dup 0) (if_then_else:DI (ge (match_dup 0) (const_int 0))
+ (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (abs:DI (match_operand:DI 1 "register_operand" "")))]
+ "! rtx_equal_p (operands[0], operands[1])"
+ [(set (match_dup 0) (neg:DI (match_dup 1)))
+ (set (match_dup 0) (if_then_else:DI (lt (match_dup 1) (const_int 0))
+ (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (abs:DI (match_dup 0))))
+ (clobber (match_operand:DI 1 "register_operand" ""))]
+ ""
+ [(set (match_dup 1) (neg:DI (match_dup 0)))
+ (set (match_dup 0) (if_then_else:DI (le (match_dup 0) (const_int 0))
+ (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (abs:DI (match_operand:DI 1 "register_operand" ""))))]
+ "! rtx_equal_p (operands[0], operands[1])"
+ [(set (match_dup 0) (neg:DI (match_dup 1)))
+ (set (match_dup 0) (if_then_else:DI (gt (match_dup 1) (const_int 0))
+ (match_dup 0) (match_dup 1)))]
+ "")
+
+(define_insn "sminqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (smin:QI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "minsb8 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "uminqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (umin:QI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "minub8 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "smaxqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (smax:QI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "maxsb8 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "umaxqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (umax:QI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "maxub8 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "sminhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (smin:HI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "minsw4 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "uminhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (umin:HI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "minuw4 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "smaxhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (smax:HI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "maxsw4 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "umaxhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (umax:HI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "maxuw4 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_expand "smaxdi3"
+ [(set (match_dup 3)
+ (le:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (eq (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ ""
+ { operands[3] = gen_reg_rtx (DImode); })
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (smax:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 3 "register_operand" ""))]
+ "operands[2] != const0_rtx"
+ [(set (match_dup 3) (le:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (if_then_else:DI (eq (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ "")
+
+(define_insn "*smax_const0"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (smax:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 0)))]
+ ""
+ "cmovlt %0,0,%0"
+ [(set_attr "type" "icmov")])
+
+(define_expand "smindi3"
+ [(set (match_dup 3)
+ (lt:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (ne (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ ""
+ { operands[3] = gen_reg_rtx (DImode); })
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (smin:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 3 "register_operand" ""))]
+ "operands[2] != const0_rtx"
+ [(set (match_dup 3) (lt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (if_then_else:DI (ne (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ "")
+
+(define_insn "*smin_const0"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (smin:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 0)))]
+ ""
+ "cmovgt %0,0,%0"
+ [(set_attr "type" "icmov")])
+
+(define_expand "umaxdi3"
+ [(set (match_dup 3)
+ (leu:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (eq (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ ""
+ "operands[3] = gen_reg_rtx (DImode);")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (umax:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 3 "register_operand" ""))]
+ "operands[2] != const0_rtx"
+ [(set (match_dup 3) (leu:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (if_then_else:DI (eq (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ "")
+
+(define_expand "umindi3"
+ [(set (match_dup 3)
+ (ltu:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (ne (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ ""
+ "operands[3] = gen_reg_rtx (DImode);")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (umin:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 3 "register_operand" ""))]
+ "operands[2] != const0_rtx"
+ [(set (match_dup 3) (ltu:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (if_then_else:DI (ne (match_dup 3) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ "")
+
+(define_insn "*bcc_normal"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "signed_comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "rJ")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "b%C1 %r2,%0"
+ [(set_attr "type" "ibr")])
+
+(define_insn "*bcc_reverse"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "signed_comparison_operator"
+ [(match_operand:DI 2 "register_operand" "r")
+ (const_int 0)])
+
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "b%c1 %2,%0"
+ [(set_attr "type" "ibr")])
+
+(define_insn "*blbs_normal"
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "blbs %r1,%0"
+ [(set_attr "type" "ibr")])
+
+(define_insn "*blbc_normal"
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 1)
+ (const_int 0))
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "blbc %r1,%0"
+ [(set_attr "type" "ibr")])
+
+(define_split
+ [(parallel
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "comparison_operator"
+ [(zero_extract:DI (match_operand:DI 2 "register_operand" "")
+ (const_int 1)
+ (match_operand:DI 3 "const_int_operand" ""))
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (clobber (match_operand:DI 4 "register_operand" ""))])]
+ "INTVAL (operands[3]) != 0"
+ [(set (match_dup 4)
+ (lshiftrt:DI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (match_op_dup 1
+ [(zero_extract:DI (match_dup 4)
+ (const_int 1)
+ (const_int 0))
+ (const_int 0)])
+ (label_ref (match_dup 0))
+ (pc)))]
+ "")
+
+;; The following are the corresponding floating-point insns. Recall
+;; we need to have variants that expand the arguments from SFmode
+;; to DFmode.
+
+(define_insn "*cmpdf_ieee"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (match_operator:DF 1 "alpha_fp_comparison_operator"
+ [(match_operand:DF 2 "reg_or_0_operand" "fG")
+ (match_operand:DF 3 "reg_or_0_operand" "fG")]))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "cmp%-%C1%/ %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "trap_suffix" "su")])
+
+(define_insn "*cmpdf_internal"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 1 "alpha_fp_comparison_operator"
+ [(match_operand:DF 2 "reg_or_0_operand" "fG")
+ (match_operand:DF 3 "reg_or_0_operand" "fG")]))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "cmp%-%C1%/ %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "trap_suffix" "su")])
+
+(define_insn "*cmpdf_ieee_ext1"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (match_operator:DF 1 "alpha_fp_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 2 "reg_or_0_operand" "fG"))
+ (match_operand:DF 3 "reg_or_0_operand" "fG")]))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "cmp%-%C1%/ %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "trap_suffix" "su")])
+
+(define_insn "*cmpdf_ext1"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 1 "alpha_fp_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 2 "reg_or_0_operand" "fG"))
+ (match_operand:DF 3 "reg_or_0_operand" "fG")]))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "cmp%-%C1%/ %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "trap_suffix" "su")])
+
+(define_insn "*cmpdf_ieee_ext2"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (match_operator:DF 1 "alpha_fp_comparison_operator"
+ [(match_operand:DF 2 "reg_or_0_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 3 "reg_or_0_operand" "fG"))]))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "cmp%-%C1%/ %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "trap_suffix" "su")])
+
+(define_insn "*cmpdf_ext2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 1 "alpha_fp_comparison_operator"
+ [(match_operand:DF 2 "reg_or_0_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 3 "reg_or_0_operand" "fG"))]))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "cmp%-%C1%/ %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "trap_suffix" "su")])
+
+(define_insn "*cmpdf_ieee_ext3"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (match_operator:DF 1 "alpha_fp_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 2 "reg_or_0_operand" "fG"))
+ (float_extend:DF
+ (match_operand:SF 3 "reg_or_0_operand" "fG"))]))]
+ "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU"
+ "cmp%-%C1%/ %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "trap_suffix" "su")])
+
+(define_insn "*cmpdf_ext3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 1 "alpha_fp_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 2 "reg_or_0_operand" "fG"))
+ (float_extend:DF
+ (match_operand:SF 3 "reg_or_0_operand" "fG"))]))]
+ "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU"
+ "cmp%-%C1%/ %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")
+ (set_attr "trap_suffix" "su")])
+
+(define_insn "*movdfcc_internal"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF
+ (match_operator 3 "signed_comparison_operator"
+ [(match_operand:DF 4 "reg_or_0_operand" "fG,fG")
+ (match_operand:DF 2 "const0_operand" "G,G")])
+ (match_operand:DF 1 "reg_or_0_operand" "fG,0")
+ (match_operand:DF 5 "reg_or_0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fcmov")])
+
+(define_insn "*movsfcc_internal"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF
+ (match_operator 3 "signed_comparison_operator"
+ [(match_operand:DF 4 "reg_or_0_operand" "fG,fG")
+ (match_operand:DF 2 "const0_operand" "G,G")])
+ (match_operand:SF 1 "reg_or_0_operand" "fG,0")
+ (match_operand:SF 5 "reg_or_0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fcmov")])
+
+(define_insn "*movdfcc_ext1"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF
+ (match_operator 3 "signed_comparison_operator"
+ [(match_operand:DF 4 "reg_or_0_operand" "fG,fG")
+ (match_operand:DF 2 "const0_operand" "G,G")])
+ (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG,0"))
+ (match_operand:DF 5 "reg_or_0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fcmov")])
+
+(define_insn "*movdfcc_ext2"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF
+ (match_operator 3 "signed_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 4 "reg_or_0_operand" "fG,fG"))
+ (match_operand:DF 2 "const0_operand" "G,G")])
+ (match_operand:DF 1 "reg_or_0_operand" "fG,0")
+ (match_operand:DF 5 "reg_or_0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fcmov")])
+
+(define_insn "*movdfcc_ext3"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF
+ (match_operator 3 "signed_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 4 "reg_or_0_operand" "fG,fG"))
+ (match_operand:DF 2 "const0_operand" "G,G")])
+ (match_operand:SF 1 "reg_or_0_operand" "fG,0")
+ (match_operand:SF 5 "reg_or_0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fcmov")])
+
+(define_insn "*movdfcc_ext4"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF
+ (match_operator 3 "signed_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 4 "reg_or_0_operand" "fG,fG"))
+ (match_operand:DF 2 "const0_operand" "G,G")])
+ (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG,0"))
+ (match_operand:DF 5 "reg_or_0_operand" "0,fG")))]
+ "TARGET_FP"
+ "@
+ fcmov%C3 %R4,%R1,%0
+ fcmov%D3 %R4,%R5,%0"
+ [(set_attr "type" "fcmov")])
+
+(define_expand "smaxdf3"
+ [(set (match_dup 3)
+ (le:DF (match_operand:DF 1 "reg_or_0_operand" "")
+ (match_operand:DF 2 "reg_or_0_operand" "")))
+ (set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (eq (match_dup 3) (match_dup 4))
+ (match_dup 1) (match_dup 2)))]
+ "TARGET_FP"
+{
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = CONST0_RTX (DFmode);
+})
+
+(define_expand "smindf3"
+ [(set (match_dup 3)
+ (lt:DF (match_operand:DF 1 "reg_or_0_operand" "")
+ (match_operand:DF 2 "reg_or_0_operand" "")))
+ (set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (ne (match_dup 3) (match_dup 4))
+ (match_dup 1) (match_dup 2)))]
+ "TARGET_FP"
+{
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = CONST0_RTX (DFmode);
+})
+
+(define_expand "smaxsf3"
+ [(set (match_dup 3)
+ (le:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" ""))
+ (float_extend:DF (match_operand:SF 2 "reg_or_0_operand" ""))))
+ (set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (eq (match_dup 3) (match_dup 4))
+ (match_dup 1) (match_dup 2)))]
+ "TARGET_FP"
+{
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = CONST0_RTX (DFmode);
+})
+
+(define_expand "sminsf3"
+ [(set (match_dup 3)
+ (lt:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" ""))
+ (float_extend:DF (match_operand:SF 2 "reg_or_0_operand" ""))))
+ (set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (ne (match_dup 3) (match_dup 4))
+ (match_dup 1) (match_dup 2)))]
+ "TARGET_FP"
+{
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = CONST0_RTX (DFmode);
+})
+
+(define_insn "*fbcc_normal"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "signed_comparison_operator"
+ [(match_operand:DF 2 "reg_or_0_operand" "fG")
+ (match_operand:DF 3 "const0_operand" "G")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_FP"
+ "fb%C1 %R2,%0"
+ [(set_attr "type" "fbr")])
+
+(define_insn "*fbcc_ext_normal"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "signed_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 2 "reg_or_0_operand" "fG"))
+ (match_operand:DF 3 "const0_operand" "G")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_FP"
+ "fb%C1 %R2,%0"
+ [(set_attr "type" "fbr")])
+
+;; These are the main define_expand's used to make conditional branches
+;; and compares.
+
+(define_expand "cmpdf"
+ [(set (cc0) (compare (match_operand:DF 0 "reg_or_0_operand" "")
+ (match_operand:DF 1 "reg_or_0_operand" "")))]
+ "TARGET_FP"
+{
+ alpha_compare.op0 = operands[0];
+ alpha_compare.op1 = operands[1];
+ alpha_compare.fp_p = 1;
+ DONE;
+})
+
+(define_expand "cmptf"
+ [(set (cc0) (compare (match_operand:TF 0 "general_operand" "")
+ (match_operand:TF 1 "general_operand" "")))]
+ "TARGET_HAS_XFLOATING_LIBS"
+{
+ alpha_compare.op0 = operands[0];
+ alpha_compare.op1 = operands[1];
+ alpha_compare.fp_p = 1;
+ DONE;
+})
+
+(define_expand "cmpdi"
+ [(set (cc0) (compare (match_operand:DI 0 "some_operand" "")
+ (match_operand:DI 1 "some_operand" "")))]
+ ""
+{
+ alpha_compare.op0 = operands[0];
+ alpha_compare.op1 = operands[1];
+ alpha_compare.fp_p = 0;
+ DONE;
+})
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (EQ); }")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (NE); }")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (LT); }")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (LE); }")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (GT); }")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (GE); }")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (LTU); }")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (LEU); }")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (GTU); }")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (GEU); }")
+
+(define_expand "bunordered"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (UNORDERED); }")
+
+(define_expand "bordered"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "{ operands[1] = alpha_emit_conditional_branch (ORDERED); }")
+
+(define_expand "seq"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (EQ)) == NULL_RTX) FAIL; }")
+
+(define_expand "sne"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (NE)) == NULL_RTX) FAIL; }")
+
+(define_expand "slt"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (LT)) == NULL_RTX) FAIL; }")
+
+(define_expand "sle"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (LE)) == NULL_RTX) FAIL; }")
+
+(define_expand "sgt"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (GT)) == NULL_RTX) FAIL; }")
+
+(define_expand "sge"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (GE)) == NULL_RTX) FAIL; }")
+
+(define_expand "sltu"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (LTU)) == NULL_RTX) FAIL; }")
+
+(define_expand "sleu"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (LEU)) == NULL_RTX) FAIL; }")
+
+(define_expand "sgtu"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (GTU)) == NULL_RTX) FAIL; }")
+
+(define_expand "sgeu"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (GEU)) == NULL_RTX) FAIL; }")
+
+(define_expand "sunordered"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (UNORDERED)) == NULL_RTX) FAIL; }")
+
+(define_expand "sordered"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "{ if ((operands[1] = alpha_emit_setcc (ORDERED)) == NULL_RTX) FAIL; }")
+
+;; These are the main define_expand's used to make conditional moves.
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "reg_or_8bit_operand" "")
+ (match_operand:SI 3 "reg_or_8bit_operand" "")))]
+ ""
+{
+ if ((operands[1] = alpha_emit_conditional_move (operands[1], SImode)) == 0)
+ FAIL;
+})
+
+(define_expand "movdicc"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (match_operand 1 "comparison_operator" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")
+ (match_operand:DI 3 "reg_or_8bit_operand" "")))]
+ ""
+{
+ if ((operands[1] = alpha_emit_conditional_move (operands[1], DImode)) == 0)
+ FAIL;
+})
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "reg_or_8bit_operand" "")
+ (match_operand:SF 3 "reg_or_8bit_operand" "")))]
+ ""
+{
+ if ((operands[1] = alpha_emit_conditional_move (operands[1], SFmode)) == 0)
+ FAIL;
+})
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "reg_or_8bit_operand" "")
+ (match_operand:DF 3 "reg_or_8bit_operand" "")))]
+ ""
+{
+ if ((operands[1] = alpha_emit_conditional_move (operands[1], DFmode)) == 0)
+ FAIL;
+})
+
+;; These define_split definitions are used in cases when comparisons have
+;; not be stated in the correct way and we need to reverse the second
+;; comparison. For example, x >= 7 has to be done as x < 6 with the
+;; comparison that tests the result being reversed. We have one define_split
+;; for each use of a comparison. They do not match valid insns and need
+;; not generate valid insns.
+;;
+;; We can also handle equality comparisons (and inequality comparisons in
+;; cases where the resulting add cannot overflow) by doing an add followed by
+;; a comparison with zero. This is faster since the addition takes one
+;; less cycle than a compare when feeding into a conditional move.
+;; For this case, we also have an SImode pattern since we can merge the add
+;; and sign extend and the order doesn't matter.
+;;
+;; We do not do this for floating-point, since it isn't clear how the "wrong"
+;; operation could have been generated.
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "")
+ (match_operand:DI 3 "reg_or_cint_operand" "")])
+ (match_operand:DI 4 "reg_or_cint_operand" "")
+ (match_operand:DI 5 "reg_or_cint_operand" "")))
+ (clobber (match_operand:DI 6 "register_operand" ""))]
+ "operands[3] != const0_rtx"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:DI (match_dup 8) (match_dup 4) (match_dup 5)))]
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ int unsignedp = (code == GEU || code == LEU || code == GTU || code == LTU);
+
+ /* If we are comparing for equality with a constant and that constant
+ appears in the arm when the register equals the constant, use the
+ register since that is more likely to match (and to produce better code
+ if both would). */
+
+ if (code == EQ && GET_CODE (operands[3]) == CONST_INT
+ && rtx_equal_p (operands[4], operands[3]))
+ operands[4] = operands[2];
+
+ else if (code == NE && GET_CODE (operands[3]) == CONST_INT
+ && rtx_equal_p (operands[5], operands[3]))
+ operands[5] = operands[2];
+
+ if (code == NE || code == EQ
+ || (extended_count (operands[2], DImode, unsignedp) >= 1
+ && extended_count (operands[3], DImode, unsignedp) >= 1))
+ {
+ if (GET_CODE (operands[3]) == CONST_INT)
+ operands[7] = gen_rtx_PLUS (DImode, operands[2],
+ GEN_INT (- INTVAL (operands[3])));
+ else
+ operands[7] = gen_rtx_MINUS (DImode, operands[2], operands[3]);
+
+ operands[8] = gen_rtx_fmt_ee (code, VOIDmode, operands[6], const0_rtx);
+ }
+
+ else if (code == EQ || code == LE || code == LT
+ || code == LEU || code == LTU)
+ {
+ operands[7] = gen_rtx_fmt_ee (code, DImode, operands[2], operands[3]);
+ operands[8] = gen_rtx_NE (VOIDmode, operands[6], const0_rtx);
+ }
+ else
+ {
+ operands[7] = gen_rtx_fmt_ee (reverse_condition (code), DImode,
+ operands[2], operands[3]);
+ operands[8] = gen_rtx_EQ (VOIDmode, operands[6], const0_rtx);
+ }
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "reg_or_0_operand" "")
+ (match_operand:SI 3 "reg_or_cint_operand" "")])
+ (match_operand:DI 4 "reg_or_8bit_operand" "")
+ (match_operand:DI 5 "reg_or_8bit_operand" "")))
+ (clobber (match_operand:DI 6 "register_operand" ""))]
+ "operands[3] != const0_rtx
+ && (GET_CODE (operands[1]) == EQ || GET_CODE (operands[1]) == NE)"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:DI (match_dup 8) (match_dup 4) (match_dup 5)))]
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ int unsignedp = (code == GEU || code == LEU || code == GTU || code == LTU);
+ rtx tem;
+
+ if ((code != NE && code != EQ
+ && ! (extended_count (operands[2], DImode, unsignedp) >= 1
+ && extended_count (operands[3], DImode, unsignedp) >= 1)))
+ FAIL;
+
+ if (GET_CODE (operands[3]) == CONST_INT)
+ tem = gen_rtx_PLUS (SImode, operands[2],
+ GEN_INT (- INTVAL (operands[3])));
+ else
+ tem = gen_rtx_MINUS (SImode, operands[2], operands[3]);
+
+ operands[7] = gen_rtx_SIGN_EXTEND (DImode, tem);
+ operands[8] = gen_rtx_fmt_ee (GET_CODE (operands[1]), VOIDmode,
+ operands[6], const0_rtx);
+})
+
+;; Prefer to use cmp and arithmetic when possible instead of a cmove.
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (if_then_else (match_operator 1 "signed_comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "")
+ (const_int 0)])
+ (match_operand 3 "const_int_operand" "")
+ (match_operand 4 "const_int_operand" "")))]
+ ""
+ [(const_int 0)]
+{
+ if (alpha_split_conditional_move (GET_CODE (operands[1]), operands[0],
+ operands[2], operands[3], operands[4]))
+ DONE;
+ else
+ FAIL;
+})
+
+;; ??? Why combine is allowed to create such non-canonical rtl, I don't know.
+;; Oh well, we match it in movcc, so it must be partially our fault.
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (if_then_else (match_operator 1 "signed_comparison_operator"
+ [(const_int 0)
+ (match_operand:DI 2 "reg_or_0_operand" "")])
+ (match_operand 3 "const_int_operand" "")
+ (match_operand 4 "const_int_operand" "")))]
+ ""
+ [(const_int 0)]
+{
+ if (alpha_split_conditional_move (swap_condition (GET_CODE (operands[1])),
+ operands[0], operands[2], operands[3],
+ operands[4]))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_insn_and_split "*cmp_sadd_di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (if_then_else:DI
+ (match_operator 1 "alpha_zero_comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "rJ")
+ (const_int 0)])
+ (match_operand:DI 3 "const48_operand" "I")
+ (const_int 0))
+ (match_operand:DI 4 "sext_add_operand" "rIO")))
+ (clobber (match_scratch:DI 5 "=r"))]
+ ""
+ "#"
+ "! no_new_pseudos || reload_completed"
+ [(set (match_dup 5)
+ (match_op_dup:DI 1 [(match_dup 2) (const_int 0)]))
+ (set (match_dup 0)
+ (plus:DI (mult:DI (match_dup 5) (match_dup 3))
+ (match_dup 4)))]
+{
+ if (! no_new_pseudos)
+ operands[5] = gen_reg_rtx (DImode);
+ else if (reg_overlap_mentioned_p (operands[5], operands[4]))
+ operands[5] = operands[0];
+})
+
+(define_insn_and_split "*cmp_sadd_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (if_then_else:SI
+ (match_operator 1 "alpha_zero_comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "rJ")
+ (const_int 0)])
+ (match_operand:SI 3 "const48_operand" "I")
+ (const_int 0))
+ (match_operand:SI 4 "sext_add_operand" "rIO")))
+ (clobber (match_scratch:SI 5 "=r"))]
+ ""
+ "#"
+ "! no_new_pseudos || reload_completed"
+ [(set (match_dup 5)
+ (match_op_dup:SI 1 [(match_dup 2) (const_int 0)]))
+ (set (match_dup 0)
+ (plus:SI (mult:SI (match_dup 5) (match_dup 3))
+ (match_dup 4)))]
+{
+ if (! no_new_pseudos)
+ operands[5] = gen_reg_rtx (DImode);
+ else if (reg_overlap_mentioned_p (operands[5], operands[4]))
+ operands[5] = operands[0];
+})
+
+(define_insn_and_split "*cmp_sadd_sidi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (plus:SI (if_then_else:SI
+ (match_operator 1 "alpha_zero_comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "rJ")
+ (const_int 0)])
+ (match_operand:SI 3 "const48_operand" "I")
+ (const_int 0))
+ (match_operand:SI 4 "sext_add_operand" "rIO"))))
+ (clobber (match_scratch:SI 5 "=r"))]
+ ""
+ "#"
+ "! no_new_pseudos || reload_completed"
+ [(set (match_dup 5)
+ (match_op_dup:SI 1 [(match_dup 2) (const_int 0)]))
+ (set (match_dup 0)
+ (sign_extend:DI (plus:SI (mult:SI (match_dup 5) (match_dup 3))
+ (match_dup 4))))]
+{
+ if (! no_new_pseudos)
+ operands[5] = gen_reg_rtx (DImode);
+ else if (reg_overlap_mentioned_p (operands[5], operands[4]))
+ operands[5] = operands[0];
+})
+
+(define_insn_and_split "*cmp_ssub_di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (if_then_else:DI
+ (match_operator 1 "alpha_zero_comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "rJ")
+ (const_int 0)])
+ (match_operand:DI 3 "const48_operand" "I")
+ (const_int 0))
+ (match_operand:DI 4 "reg_or_8bit_operand" "rI")))
+ (clobber (match_scratch:DI 5 "=r"))]
+ ""
+ "#"
+ "! no_new_pseudos || reload_completed"
+ [(set (match_dup 5)
+ (match_op_dup:DI 1 [(match_dup 2) (const_int 0)]))
+ (set (match_dup 0)
+ (minus:DI (mult:DI (match_dup 5) (match_dup 3))
+ (match_dup 4)))]
+{
+ if (! no_new_pseudos)
+ operands[5] = gen_reg_rtx (DImode);
+ else if (reg_overlap_mentioned_p (operands[5], operands[4]))
+ operands[5] = operands[0];
+})
+
+(define_insn_and_split "*cmp_ssub_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (if_then_else:SI
+ (match_operator 1 "alpha_zero_comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "rJ")
+ (const_int 0)])
+ (match_operand:SI 3 "const48_operand" "I")
+ (const_int 0))
+ (match_operand:SI 4 "reg_or_8bit_operand" "rI")))
+ (clobber (match_scratch:SI 5 "=r"))]
+ ""
+ "#"
+ "! no_new_pseudos || reload_completed"
+ [(set (match_dup 5)
+ (match_op_dup:SI 1 [(match_dup 2) (const_int 0)]))
+ (set (match_dup 0)
+ (minus:SI (mult:SI (match_dup 5) (match_dup 3))
+ (match_dup 4)))]
+{
+ if (! no_new_pseudos)
+ operands[5] = gen_reg_rtx (DImode);
+ else if (reg_overlap_mentioned_p (operands[5], operands[4]))
+ operands[5] = operands[0];
+})
+
+(define_insn_and_split "*cmp_ssub_sidi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (minus:SI (if_then_else:SI
+ (match_operator 1 "alpha_zero_comparison_operator"
+ [(match_operand:DI 2 "reg_or_0_operand" "rJ")
+ (const_int 0)])
+ (match_operand:SI 3 "const48_operand" "I")
+ (const_int 0))
+ (match_operand:SI 4 "reg_or_8bit_operand" "rI"))))
+ (clobber (match_scratch:SI 5 "=r"))]
+ ""
+ "#"
+ "! no_new_pseudos || reload_completed"
+ [(set (match_dup 5)
+ (match_op_dup:SI 1 [(match_dup 2) (const_int 0)]))
+ (set (match_dup 0)
+ (sign_extend:DI (minus:SI (mult:SI (match_dup 5) (match_dup 3))
+ (match_dup 4))))]
+{
+ if (! no_new_pseudos)
+ operands[5] = gen_reg_rtx (DImode);
+ else if (reg_overlap_mentioned_p (operands[5], operands[4]))
+ operands[5] = operands[0];
+})
+
+;; Here are the CALL and unconditional branch insns. Calls on NT and OSF
+;; work differently, so we have different patterns for each.
+
+;; On Unicos/Mk a call information word (CIW) must be generated for each
+;; call. The CIW contains information about arguments passed in registers
+;; and is stored in the caller's SSIB. Its offset relative to the beginning
+;; of the SSIB is passed in $25. Handling this properly is quite complicated
+;; in the presence of inlining since the CIWs for calls performed by the
+;; inlined function must be stored in the SSIB of the function it is inlined
+;; into as well. We encode the CIW in an unspec and append it to the list
+;; of the CIWs for the current function only when the instruction for loading
+;; $25 is generated.
+
+(define_expand "call"
+ [(use (match_operand:DI 0 "" ""))
+ (use (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))]
+ ""
+{
+ if (TARGET_ABI_WINDOWS_NT)
+ emit_call_insn (gen_call_nt (operands[0], operands[1]));
+ else if (TARGET_ABI_OPEN_VMS)
+ emit_call_insn (gen_call_vms (operands[0], operands[2]));
+ else if (TARGET_ABI_UNICOSMK)
+ emit_call_insn (gen_call_umk (operands[0], operands[2]));
+ else
+ emit_call_insn (gen_call_osf (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "sibcall"
+ [(parallel [(call (mem:DI (match_operand 0 "" ""))
+ (match_operand 1 "" ""))
+ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)])]
+ "TARGET_ABI_OSF"
+{
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+ operands[0] = XEXP (operands[0], 0);
+})
+
+(define_expand "call_osf"
+ [(parallel [(call (mem:DI (match_operand 0 "" ""))
+ (match_operand 1 "" ""))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))])]
+ ""
+{
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+
+ operands[0] = XEXP (operands[0], 0);
+ if (! call_operand (operands[0], Pmode))
+ operands[0] = copy_to_mode_reg (Pmode, operands[0]);
+})
+
+(define_expand "call_nt"
+ [(parallel [(call (mem:DI (match_operand 0 "" ""))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 26))])]
+ ""
+{
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+
+ operands[0] = XEXP (operands[0], 0);
+ if (GET_CODE (operands[0]) != SYMBOL_REF && GET_CODE (operands[0]) != REG)
+ operands[0] = force_reg (DImode, operands[0]);
+})
+
+;; Calls on Unicos/Mk are always indirect.
+;; op 0: symbol ref for called function
+;; op 1: CIW for $25 represented by an unspec
+
+(define_expand "call_umk"
+ [(parallel [(call (mem:DI (match_operand 0 "" ""))
+ (match_operand 1 "" ""))
+ (use (reg:DI 25))
+ (clobber (reg:DI 26))])]
+ ""
+{
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+
+ /* Always load the address of the called function into a register;
+ load the CIW in $25. */
+
+ operands[0] = XEXP (operands[0], 0);
+ if (GET_CODE (operands[0]) != REG)
+ operands[0] = force_reg (DImode, operands[0]);
+
+ emit_move_insn (gen_rtx_REG (DImode, 25), operands[1]);
+})
+
+;;
+;; call openvms/alpha
+;; op 0: symbol ref for called function
+;; op 1: next_arg_reg (argument information value for R25)
+;;
+(define_expand "call_vms"
+ [(parallel [(call (mem:DI (match_operand 0 "" ""))
+ (match_operand 1 "" ""))
+ (use (match_dup 2))
+ (use (reg:DI 25))
+ (use (reg:DI 26))
+ (clobber (reg:DI 27))])]
+ ""
+{
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+
+ operands[0] = XEXP (operands[0], 0);
+
+ /* Always load AI with argument information, then handle symbolic and
+ indirect call differently. Load RA and set operands[2] to PV in
+ both cases. */
+
+ emit_move_insn (gen_rtx_REG (DImode, 25), operands[1]);
+ if (GET_CODE (operands[0]) == SYMBOL_REF)
+ {
+ alpha_need_linkage (XSTR (operands[0], 0), 0);
+
+ operands[2] = const0_rtx;
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (Pmode, 26),
+ gen_rtx_MEM (Pmode, plus_constant (operands[0], 8)));
+ operands[2] = operands[0];
+ }
+
+})
+
+(define_expand "call_value"
+ [(use (match_operand 0 "" ""))
+ (use (match_operand:DI 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (use (match_operand 4 "" ""))]
+ ""
+{
+ if (TARGET_ABI_WINDOWS_NT)
+ emit_call_insn (gen_call_value_nt (operands[0], operands[1], operands[2]));
+ else if (TARGET_ABI_OPEN_VMS)
+ emit_call_insn (gen_call_value_vms (operands[0], operands[1],
+ operands[3]));
+ else if (TARGET_ABI_UNICOSMK)
+ emit_call_insn (gen_call_value_umk (operands[0], operands[1],
+ operands[3]));
+ else
+ emit_call_insn (gen_call_value_osf (operands[0], operands[1],
+ operands[2]));
+ DONE;
+})
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand 1 "" ""))
+ (match_operand 2 "" "")))
+ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)])]
+ "TARGET_ABI_OSF"
+{
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+ operands[1] = XEXP (operands[1], 0);
+})
+
+(define_expand "call_value_osf"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand 1 "" ""))
+ (match_operand 2 "" "")))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))])]
+ ""
+{
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+
+ operands[1] = XEXP (operands[1], 0);
+ if (! call_operand (operands[1], Pmode))
+ operands[1] = copy_to_mode_reg (Pmode, operands[1]);
+})
+
+(define_expand "call_value_nt"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand 1 "" ""))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 26))])]
+ ""
+{
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) != SYMBOL_REF && GET_CODE (operands[1]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+})
+
+(define_expand "call_value_vms"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "" ""))
+ (match_operand 2 "" "")))
+ (use (match_dup 3))
+ (use (reg:DI 25))
+ (use (reg:DI 26))
+ (clobber (reg:DI 27))])]
+ ""
+{
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+
+ operands[1] = XEXP (operands[1], 0);
+
+ /* Always load AI with argument information, then handle symbolic and
+ indirect call differently. Load RA and set operands[3] to PV in
+ both cases. */
+
+ emit_move_insn (gen_rtx_REG (DImode, 25), operands[2]);
+ if (GET_CODE (operands[1]) == SYMBOL_REF)
+ {
+ alpha_need_linkage (XSTR (operands[1], 0), 0);
+
+ operands[3] = const0_rtx;
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (Pmode, 26),
+ gen_rtx_MEM (Pmode, plus_constant (operands[1], 8)));
+ operands[3] = operands[1];
+ }
+})
+
+(define_expand "call_value_umk"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand 1 "" ""))
+ (match_operand 2 "" "")))
+ (use (reg:DI 25))
+ (clobber (reg:DI 26))])]
+ ""
+{
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+
+ emit_move_insn (gen_rtx_REG (DImode, 25), operands[2]);
+})
+
+(define_insn "*call_osf_1_er"
+ [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s"))
+ (match_operand 1 "" ""))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "@
+ jsr $26,(%0),0\;ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%*
+ bsr $26,%0\t\t!samegp
+ ldq $27,%0($29)\t\t!literal!%#\;jsr $26,($27),%0\t\t!lituse_jsr!%#\;ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%*"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "12,*,16")])
+
+;; We must use peep2 instead of a split because we need accurate life
+;; information for $gp. Consider the case of { bar(); while (1); }.
+(define_peephole2
+ [(parallel [(call (mem:DI (match_operand:DI 0 "call_operand" ""))
+ (match_operand 1 "" ""))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))])]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF && reload_completed
+ && ! samegp_function_operand (operands[0], Pmode)
+ && (peep2_regno_dead_p (1, 29)
+ || find_reg_note (insn, REG_NORETURN, NULL_RTX))"
+ [(parallel [(call (mem:DI (match_dup 2))
+ (match_dup 1))
+ (set (reg:DI 26) (plus:DI (pc) (const_int 4)))
+ (unspec_volatile [(reg:DI 29)] UNSPECV_BLOCKAGE)
+ (use (match_dup 0))
+ (use (match_dup 3))])]
+{
+ if (CONSTANT_P (operands[0]))
+ {
+ operands[2] = gen_rtx_REG (Pmode, 27);
+ operands[3] = GEN_INT (alpha_next_sequence_number++);
+ emit_insn (gen_movdi_er_high_g (operands[2], pic_offset_table_rtx,
+ operands[0], operands[3]));
+ }
+ else
+ {
+ operands[2] = operands[0];
+ operands[0] = const0_rtx;
+ operands[3] = const0_rtx;
+ }
+})
+
+(define_peephole2
+ [(parallel [(call (mem:DI (match_operand:DI 0 "call_operand" ""))
+ (match_operand 1 "" ""))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))])]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF && reload_completed
+ && ! samegp_function_operand (operands[0], Pmode)
+ && ! (peep2_regno_dead_p (1, 29)
+ || find_reg_note (insn, REG_NORETURN, NULL_RTX))"
+ [(parallel [(call (mem:DI (match_dup 2))
+ (match_dup 1))
+ (set (reg:DI 26) (plus:DI (pc) (const_int 4)))
+ (unspec_volatile [(reg:DI 29)] UNSPECV_BLOCKAGE)
+ (use (match_dup 0))
+ (use (match_dup 4))])
+ (set (reg:DI 29)
+ (unspec_volatile:DI [(reg:DI 26) (match_dup 3)] UNSPECV_LDGP1))
+ (set (reg:DI 29)
+ (unspec:DI [(reg:DI 29) (match_dup 3)] UNSPEC_LDGP2))]
+{
+ if (CONSTANT_P (operands[0]))
+ {
+ operands[2] = gen_rtx_REG (Pmode, 27);
+ operands[4] = GEN_INT (alpha_next_sequence_number++);
+ emit_insn (gen_movdi_er_high_g (operands[2], pic_offset_table_rtx,
+ operands[0], operands[4]));
+ }
+ else
+ {
+ operands[2] = operands[0];
+ operands[0] = const0_rtx;
+ operands[4] = const0_rtx;
+ }
+ operands[3] = GEN_INT (alpha_next_sequence_number++);
+})
+
+;; We add a blockage unspec_volatile to prevent insns from moving down
+;; from above the call to in between the call and the ldah gpdisp.
+
+(define_insn "*call_osf_2_er"
+ [(call (mem:DI (match_operand:DI 0 "register_operand" "c"))
+ (match_operand 1 "" ""))
+ (set (reg:DI 26) (plus:DI (pc) (const_int 4)))
+ (unspec_volatile [(reg:DI 29)] UNSPECV_BLOCKAGE)
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "const_int_operand" ""))]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "jsr $26,(%0),%2%J3"
+ [(set_attr "type" "jsr")
+ (set_attr "cannot_copy" "true")])
+
+;; We output a nop after noreturn calls at the very end of the function to
+;; ensure that the return address always remains in the caller's code range,
+;; as not doing so might confuse unwinding engines.
+;;
+;; The potential change in insn length is not reflected in the length
+;; attributes at this stage. Since the extra space is only actually added at
+;; the very end of the compilation process (via final/print_operand), it
+;; really seems harmless and not worth the trouble of some extra computation
+;; cost and complexity.
+
+(define_insn "*call_osf_1_noreturn"
+ [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s"))
+ (match_operand 1 "" ""))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))]
+ "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ "@
+ jsr $26,($27),0%+
+ bsr $26,$%0..ng%+
+ jsr $26,%0%+"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "*,*,8")])
+
+(define_insn "*call_osf_1"
+ [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s"))
+ (match_operand 1 "" ""))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))]
+ "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "@
+ jsr $26,($27),0\;ldgp $29,0($26)
+ bsr $26,$%0..ng
+ jsr $26,%0\;ldgp $29,0($26)"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "12,*,16")])
+
+;; Note that the DEC assembler expands "jmp foo" with $at, which
+;; doesn't do what we want.
+(define_insn "*sibcall_osf_1_er"
+ [(call (mem:DI (match_operand:DI 0 "symbolic_operand" "R,s"))
+ (match_operand 1 "" ""))
+ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "@
+ br $31,%0\t\t!samegp
+ ldq $27,%0($29)\t\t!literal!%#\;jmp $31,($27),%0\t\t!lituse_jsr!%#"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "*,8")])
+
+(define_insn "*sibcall_osf_1"
+ [(call (mem:DI (match_operand:DI 0 "symbolic_operand" "R,s"))
+ (match_operand 1 "" ""))
+ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)]
+ "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "@
+ br $31,$%0..ng
+ lda $27,%0\;jmp $31,($27),%0"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "*,8")])
+
+(define_insn "*call_nt_1"
+ [(call (mem:DI (match_operand:DI 0 "call_operand" "r,R,s"))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 26))]
+ "TARGET_ABI_WINDOWS_NT"
+ "@
+ jsr $26,(%0)
+ bsr $26,%0
+ jsr $26,%0"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "*,*,12")])
+
+; GAS relies on the order and position of instructions output below in order
+; to generate relocs for VMS link to potentially optimize the call.
+; Please do not molest.
+(define_insn "*call_vms_1"
+ [(call (mem:DI (match_operand:DI 0 "call_operand" "r,s"))
+ (match_operand 1 "" ""))
+ (use (match_operand:DI 2 "nonmemory_operand" "r,n"))
+ (use (reg:DI 25))
+ (use (reg:DI 26))
+ (clobber (reg:DI 27))]
+ "TARGET_ABI_OPEN_VMS"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "mov %2,$27\;jsr $26,0\;ldq $27,0($29)";
+ case 1:
+ operands [2] = alpha_use_linkage (operands [0], cfun->decl, 1, 0);
+ operands [3] = alpha_use_linkage (operands [0], cfun->decl, 0, 0);
+ return "ldq $26,%3\;ldq $27,%2\;jsr $26,%0\;ldq $27,0($29)";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "jsr")
+ (set_attr "length" "12,16")])
+
+(define_insn "*call_umk_1"
+ [(call (mem:DI (match_operand:DI 0 "call_operand" "r"))
+ (match_operand 1 "" ""))
+ (use (reg:DI 25))
+ (clobber (reg:DI 26))]
+ "TARGET_ABI_UNICOSMK"
+ "jsr $26,(%0)"
+ [(set_attr "type" "jsr")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+{
+ int i;
+
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+})
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
+ ""
+ ""
+ [(set_attr "length" "0")
+ (set_attr "type" "none")])
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "br $31,%l0"
+ [(set_attr "type" "ibr")])
+
+(define_expand "return"
+ [(return)]
+ "direct_return ()"
+ "")
+
+(define_insn "*return_internal"
+ [(return)]
+ "reload_completed"
+ "ret $31,($26),1"
+ [(set_attr "type" "ibr")])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:DI 0 "register_operand" "r"))]
+ ""
+ "jmp $31,(%0),0"
+ [(set_attr "type" "ibr")])
+
+(define_expand "tablejump"
+ [(parallel [(set (pc)
+ (match_operand 0 "register_operand" ""))
+ (use (label_ref:DI (match_operand 1 "" "")))])]
+ ""
+{
+ if (TARGET_ABI_WINDOWS_NT)
+ {
+ rtx dest = gen_reg_rtx (DImode);
+ emit_insn (gen_extendsidi2 (dest, operands[0]));
+ operands[0] = dest;
+ }
+ else if (TARGET_ABI_OSF)
+ {
+ rtx dest = gen_reg_rtx (DImode);
+ emit_insn (gen_extendsidi2 (dest, operands[0]));
+ emit_insn (gen_adddi3 (dest, pic_offset_table_rtx, dest));
+ operands[0] = dest;
+ }
+})
+
+(define_insn "*tablejump_osf_nt_internal"
+ [(set (pc)
+ (match_operand:DI 0 "register_operand" "r"))
+ (use (label_ref:DI (match_operand 1 "" "")))]
+ "(TARGET_ABI_OSF || TARGET_ABI_WINDOWS_NT)
+ && alpha_tablejump_addr_vec (insn)"
+{
+ operands[2] = alpha_tablejump_best_label (insn);
+ return "jmp $31,(%0),%2";
+}
+ [(set_attr "type" "ibr")])
+
+(define_insn "*tablejump_internal"
+ [(set (pc)
+ (match_operand:DI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmp $31,(%0),0"
+ [(set_attr "type" "ibr")])
+
+;; Cache flush. Used by INITIALIZE_TRAMPOLINE. 0x86 is PAL_imb, but we don't
+;; want to have to include pal.h in our .s file.
+;;
+;; Technically the type for call_pal is jsr, but we use that for determining
+;; if we need a GP. Use ibr instead since it has the same EV5 scheduling
+;; characteristics.
+(define_insn "imb"
+ [(unspec_volatile [(const_int 0)] UNSPECV_IMB)]
+ ""
+ "call_pal 0x86"
+ [(set_attr "type" "callpal")])
+
+;; BUGCHK is documented common to OSF/1 and VMS PALcode.
+;; NT does not document anything at 0x81 -- presumably it would generate
+;; the equivalent of SIGILL, but this isn't that important.
+;; ??? Presuming unicosmk uses either OSF/1 or VMS PALcode.
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 0))]
+ "!TARGET_ABI_WINDOWS_NT"
+ "call_pal 0x81"
+ [(set_attr "type" "callpal")])
+
+;; For userland, we load the thread pointer from the TCB.
+;; For the kernel, we load the per-cpu private value.
+
+(define_insn "load_tp"
+ [(set (match_operand:DI 0 "register_operand" "=v")
+ (unspec:DI [(const_int 0)] UNSPEC_TP))]
+ "TARGET_ABI_OSF"
+{
+ if (TARGET_TLS_KERNEL)
+ return "call_pal 0x32";
+ else
+ return "call_pal 0x9e";
+}
+ [(set_attr "type" "callpal")])
+
+;; For completeness, and possibly a __builtin function, here's how to
+;; set the thread pointer. Since we don't describe enough of this
+;; quantity for CSE, we have to use a volatile unspec, and then there's
+;; not much point in creating an R16_REG register class.
+
+(define_expand "set_tp"
+ [(set (reg:DI 16) (match_operand:DI 0 "input_operand" ""))
+ (unspec_volatile [(reg:DI 16)] UNSPECV_SET_TP)]
+ "TARGET_ABI_OSF"
+ "")
+
+(define_insn "*set_tp"
+ [(unspec_volatile [(reg:DI 16)] UNSPECV_SET_TP)]
+ "TARGET_ABI_OSF"
+{
+ if (TARGET_TLS_KERNEL)
+ return "call_pal 0x31";
+ else
+ return "call_pal 0x9f";
+}
+ [(set_attr "type" "callpal")])
+
+;; Finally, we have the basic data motion insns. The byte and word insns
+;; are done via define_expand. Start with the floating-point insns, since
+;; they are simpler.
+
+(define_insn "*movsf_nofix"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,*r,*r,m,m")
+ (match_operand:SF 1 "input_operand" "fG,m,*rG,m,fG,*r"))]
+ "TARGET_FPREGS && ! TARGET_FIX
+ && (register_operand (operands[0], SFmode)
+ || reg_or_0_operand (operands[1], SFmode))"
+ "@
+ cpys %R1,%R1,%0
+ ld%, %0,%1
+ bis $31,%r1,%0
+ ldl %0,%1
+ st%, %R1,%0
+ stl %r1,%0"
+ [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist")])
+
+(define_insn "*movsf_fix"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,*r,*r,m,m,f,*r")
+ (match_operand:SF 1 "input_operand" "fG,m,*rG,m,fG,*r,*r,f"))]
+ "TARGET_FPREGS && TARGET_FIX
+ && (register_operand (operands[0], SFmode)
+ || reg_or_0_operand (operands[1], SFmode))"
+ "@
+ cpys %R1,%R1,%0
+ ld%, %0,%1
+ bis $31,%r1,%0
+ ldl %0,%1
+ st%, %R1,%0
+ stl %r1,%0
+ itofs %1,%0
+ ftois %1,%0"
+ [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist,itof,ftoi")])
+
+(define_insn "*movsf_nofp"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:SF 1 "input_operand" "rG,m,r"))]
+ "! TARGET_FPREGS
+ && (register_operand (operands[0], SFmode)
+ || reg_or_0_operand (operands[1], SFmode))"
+ "@
+ bis $31,%r1,%0
+ ldl %0,%1
+ stl %r1,%0"
+ [(set_attr "type" "ilog,ild,ist")])
+
+(define_insn "*movdf_nofix"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,*r,*r,m,m")
+ (match_operand:DF 1 "input_operand" "fG,m,*rG,m,fG,*r"))]
+ "TARGET_FPREGS && ! TARGET_FIX
+ && (register_operand (operands[0], DFmode)
+ || reg_or_0_operand (operands[1], DFmode))"
+ "@
+ cpys %R1,%R1,%0
+ ld%- %0,%1
+ bis $31,%r1,%0
+ ldq %0,%1
+ st%- %R1,%0
+ stq %r1,%0"
+ [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist")])
+
+(define_insn "*movdf_fix"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,*r,*r,m,m,f,*r")
+ (match_operand:DF 1 "input_operand" "fG,m,*rG,m,fG,*r,*r,f"))]
+ "TARGET_FPREGS && TARGET_FIX
+ && (register_operand (operands[0], DFmode)
+ || reg_or_0_operand (operands[1], DFmode))"
+ "@
+ cpys %R1,%R1,%0
+ ld%- %0,%1
+ bis $31,%r1,%0
+ ldq %0,%1
+ st%- %R1,%0
+ stq %r1,%0
+ itoft %1,%0
+ ftoit %1,%0"
+ [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist,itof,ftoi")])
+
+(define_insn "*movdf_nofp"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:DF 1 "input_operand" "rG,m,r"))]
+ "! TARGET_FPREGS
+ && (register_operand (operands[0], DFmode)
+ || reg_or_0_operand (operands[1], DFmode))"
+ "@
+ bis $31,%r1,%0
+ ldq %0,%1
+ stq %r1,%0"
+ [(set_attr "type" "ilog,ild,ist")])
+
+;; Subregs suck for register allocation. Pretend we can move TFmode
+;; data between general registers until after reload.
+
+(define_insn_and_split "*movtf_internal"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=r,o")
+ (match_operand:TF 1 "input_operand" "roG,rG"))]
+ "register_operand (operands[0], TFmode)
+ || reg_or_0_operand (operands[1], TFmode)"
+ "#"
+ "reload_completed"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 1) (match_dup 3))]
+{
+ alpha_split_tmode_pair (operands, TFmode, true);
+})
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_0_operand (operands[1], SFmode))
+ operands[1] = force_reg (SFmode, operands[1]);
+})
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_0_operand (operands[1], DFmode))
+ operands[1] = force_reg (DFmode, operands[1]);
+})
+
+(define_expand "movtf"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (match_operand:TF 1 "general_operand" ""))]
+ ""
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_0_operand (operands[1], TFmode))
+ operands[1] = force_reg (TFmode, operands[1]);
+})
+
+(define_insn "*movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,m")
+ (match_operand:SI 1 "input_operand" "rJ,K,L,n,m,rJ"))]
+ "(TARGET_ABI_OSF || TARGET_ABI_UNICOSMK)
+ && (register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
+ "@
+ bis $31,%r1,%0
+ lda %0,%1($31)
+ ldah %0,%h1($31)
+ #
+ ldl %0,%1
+ stl %r1,%0"
+ [(set_attr "type" "ilog,iadd,iadd,multi,ild,ist")])
+
+(define_insn "*movsi_nt_vms"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,r,m")
+ (match_operand:SI 1 "input_operand" "rJ,K,L,s,n,m,rJ"))]
+ "(TARGET_ABI_WINDOWS_NT || TARGET_ABI_OPEN_VMS)
+ && (register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
+ "@
+ bis $31,%1,%0
+ lda %0,%1
+ ldah %0,%h1
+ lda %0,%1
+ #
+ ldl %0,%1
+ stl %r1,%0"
+ [(set_attr "type" "ilog,iadd,iadd,ldsym,multi,ild,ist")])
+
+(define_insn "*movhi_nobwx"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (match_operand:HI 1 "input_operand" "rJ,n"))]
+ "! TARGET_BWX
+ && (register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode))"
+ "@
+ bis $31,%r1,%0
+ lda %0,%L1($31)"
+ [(set_attr "type" "ilog,iadd")])
+
+(define_insn "*movhi_bwx"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m")
+ (match_operand:HI 1 "input_operand" "rJ,n,m,rJ"))]
+ "TARGET_BWX
+ && (register_operand (operands[0], HImode)
+ || reg_or_0_operand (operands[1], HImode))"
+ "@
+ bis $31,%r1,%0
+ lda %0,%L1($31)
+ ldwu %0,%1
+ stw %r1,%0"
+ [(set_attr "type" "ilog,iadd,ild,ist")])
+
+(define_insn "*movqi_nobwx"
+ [(set (match_operand:QI 0 "register_operand" "=r,r")
+ (match_operand:QI 1 "input_operand" "rJ,n"))]
+ "! TARGET_BWX
+ && (register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode))"
+ "@
+ bis $31,%r1,%0
+ lda %0,%L1($31)"
+ [(set_attr "type" "ilog,iadd")])
+
+(define_insn "*movqi_bwx"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m")
+ (match_operand:QI 1 "input_operand" "rJ,n,m,rJ"))]
+ "TARGET_BWX
+ && (register_operand (operands[0], QImode)
+ || reg_or_0_operand (operands[1], QImode))"
+ "@
+ bis $31,%r1,%0
+ lda %0,%L1($31)
+ ldbu %0,%1
+ stb %r1,%0"
+ [(set_attr "type" "ilog,iadd,ild,ist")])
+
+;; We do two major things here: handle mem->mem and construct long
+;; constants.
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+{
+ if (alpha_expand_mov (SImode, operands))
+ DONE;
+})
+
+;; Split a load of a large constant into the appropriate two-insn
+;; sequence.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "non_add_const_operand" ""))]
+ ""
+ [(const_int 0)]
+{
+ if (alpha_split_const_mov (SImode, operands))
+ DONE;
+ else
+ FAIL;
+})
+
+;; Split the load of an address into a four-insn sequence on Unicos/Mk.
+;; Always generate a REG_EQUAL note for the last instruction to facilitate
+;; optimizations. If the symbolic operand is a label_ref, generate REG_LABEL
+;; notes and update LABEL_NUSES because this is not done automatically.
+;; Labels may be incorrectly deleted if we don't do this.
+;;
+;; Describing what the individual instructions do correctly is too complicated
+;; so use UNSPECs for each of the three parts of an address.
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "symbolic_operand" ""))]
+ "TARGET_ABI_UNICOSMK && reload_completed"
+ [(const_int 0)]
+{
+ rtx insn1, insn2, insn3;
+
+ insn1 = emit_insn (gen_umk_laum (operands[0], operands[1]));
+ emit_insn (gen_ashldi3 (operands[0], operands[0], GEN_INT (32)));
+ insn2 = emit_insn (gen_umk_lalm (operands[0], operands[0], operands[1]));
+ insn3 = emit_insn (gen_umk_lal (operands[0], operands[0], operands[1]));
+ REG_NOTES (insn3) = gen_rtx_EXPR_LIST (REG_EQUAL, operands[1],
+ REG_NOTES (insn3));
+ if (GET_CODE (operands[1]) == LABEL_REF)
+ {
+ rtx label;
+
+ label = XEXP (operands[1], 0);
+ REG_NOTES (insn1) = gen_rtx_EXPR_LIST (REG_LABEL, label,
+ REG_NOTES (insn1));
+ REG_NOTES (insn2) = gen_rtx_EXPR_LIST (REG_LABEL, label,
+ REG_NOTES (insn2));
+ REG_NOTES (insn3) = gen_rtx_EXPR_LIST (REG_LABEL, label,
+ REG_NOTES (insn3));
+ LABEL_NUSES (label) += 3;
+ }
+ DONE;
+})
+
+;; Instructions for loading the three parts of an address on Unicos/Mk.
+
+(define_insn "umk_laum"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")]
+ UNSPEC_UMK_LAUM))]
+ "TARGET_ABI_UNICOSMK"
+ "laum %r0,%t1($31)"
+ [(set_attr "type" "iadd")])
+
+(define_insn "umk_lalm"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "symbolic_operand" "")]
+ UNSPEC_UMK_LALM)))]
+ "TARGET_ABI_UNICOSMK"
+ "lalm %r0,%t2(%r1)"
+ [(set_attr "type" "iadd")])
+
+(define_insn "umk_lal"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "symbolic_operand" "")]
+ UNSPEC_UMK_LAL)))]
+ "TARGET_ABI_UNICOSMK"
+ "lal %r0,%t2(%r1)"
+ [(set_attr "type" "iadd")])
+
+;; Add a new call information word to the current function's list of CIWs
+;; and load its index into $25. Doing it here ensures that the CIW will be
+;; associated with the correct function even in the presence of inlining.
+
+(define_insn "*umk_load_ciw"
+ [(set (reg:DI 25)
+ (unspec:DI [(match_operand 0 "" "")] UNSPEC_UMK_LOAD_CIW))]
+ "TARGET_ABI_UNICOSMK"
+{
+ operands[0] = unicosmk_add_call_info_word (operands[0]);
+ return "lda $25,%0";
+}
+ [(set_attr "type" "iadd")])
+
+(define_insn "*movdi_er_low_l"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "local_symbolic_operand" "")))]
+ "TARGET_EXPLICIT_RELOCS"
+{
+ if (true_regnum (operands[1]) == 29)
+ return "lda %0,%2(%1)\t\t!gprel";
+ else
+ return "lda %0,%2(%1)\t\t!gprellow";
+}
+ [(set_attr "usegp" "yes")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "small_symbolic_operand" ""))]
+ "TARGET_EXPLICIT_RELOCS && reload_completed"
+ [(set (match_dup 0)
+ (lo_sum:DI (match_dup 2) (match_dup 1)))]
+ "operands[2] = pic_offset_table_rtx;")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "local_symbolic_operand" ""))]
+ "TARGET_EXPLICIT_RELOCS && reload_completed"
+ [(set (match_dup 0)
+ (plus:DI (match_dup 2) (high:DI (match_dup 1))))
+ (set (match_dup 0)
+ (lo_sum:DI (match_dup 0) (match_dup 1)))]
+ "operands[2] = pic_offset_table_rtx;")
+
+(define_split
+ [(match_operand 0 "some_small_symbolic_operand" "")]
+ ""
+ [(match_dup 0)]
+ "operands[0] = split_small_symbolic_operand (operands[0]);")
+
+;; Accepts any symbolic, not just global, since function calls that
+;; don't go via bsr still use !literal in hopes of linker relaxation.
+(define_insn "movdi_er_high_g"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "symbolic_operand" "")
+ (match_operand 3 "const_int_operand" "")]
+ UNSPEC_LITERAL))]
+ "TARGET_EXPLICIT_RELOCS"
+{
+ if (INTVAL (operands[3]) == 0)
+ return "ldq %0,%2(%1)\t\t!literal";
+ else
+ return "ldq %0,%2(%1)\t\t!literal!%3";
+}
+ [(set_attr "type" "ldsym")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "global_symbolic_operand" ""))]
+ "TARGET_EXPLICIT_RELOCS && reload_completed"
+ [(set (match_dup 0)
+ (unspec:DI [(match_dup 2)
+ (match_dup 1)
+ (const_int 0)] UNSPEC_LITERAL))]
+ "operands[2] = pic_offset_table_rtx;")
+
+(define_insn "movdi_er_tlsgd"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "symbolic_operand" "")
+ (match_operand 3 "const_int_operand" "")]
+ UNSPEC_TLSGD))]
+ "HAVE_AS_TLS"
+{
+ if (INTVAL (operands[3]) == 0)
+ return "lda %0,%2(%1)\t\t!tlsgd";
+ else
+ return "lda %0,%2(%1)\t\t!tlsgd!%3";
+})
+
+(define_insn "movdi_er_tlsldm"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "")]
+ UNSPEC_TLSLDM))]
+ "HAVE_AS_TLS"
+{
+ if (INTVAL (operands[2]) == 0)
+ return "lda %0,%&(%1)\t\t!tlsldm";
+ else
+ return "lda %0,%&(%1)\t\t!tlsldm!%2";
+})
+
+(define_insn "*movdi_er_gotdtp"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "symbolic_operand" "")]
+ UNSPEC_DTPREL))]
+ "HAVE_AS_TLS"
+ "ldq %0,%2(%1)\t\t!gotdtprel"
+ [(set_attr "type" "ild")
+ (set_attr "usegp" "yes")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "gotdtp_symbolic_operand" ""))]
+ "HAVE_AS_TLS && reload_completed"
+ [(set (match_dup 0)
+ (unspec:DI [(match_dup 2)
+ (match_dup 1)] UNSPEC_DTPREL))]
+{
+ operands[1] = XVECEXP (XEXP (operands[1], 0), 0, 0);
+ operands[2] = pic_offset_table_rtx;
+})
+
+(define_insn "*movdi_er_gottp"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "symbolic_operand" "")]
+ UNSPEC_TPREL))]
+ "HAVE_AS_TLS"
+ "ldq %0,%2(%1)\t\t!gottprel"
+ [(set_attr "type" "ild")
+ (set_attr "usegp" "yes")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "gottp_symbolic_operand" ""))]
+ "HAVE_AS_TLS && reload_completed"
+ [(set (match_dup 0)
+ (unspec:DI [(match_dup 2)
+ (match_dup 1)] UNSPEC_TPREL))]
+{
+ operands[1] = XVECEXP (XEXP (operands[1], 0), 0, 0);
+ operands[2] = pic_offset_table_rtx;
+})
+
+(define_insn "*movdi_er_nofix"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,r,r,r,m,*f,*f,Q")
+ (match_operand:DI 1 "input_operand" "rJ,K,L,T,s,n,m,rJ,*fJ,Q,*f"))]
+ "TARGET_EXPLICIT_RELOCS && ! TARGET_FIX
+ && (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ "@
+ mov %r1,%0
+ lda %0,%1($31)
+ ldah %0,%h1($31)
+ #
+ #
+ #
+ ldq%A1 %0,%1
+ stq%A0 %r1,%0
+ fmov %R1,%0
+ ldt %0,%1
+ stt %R1,%0"
+ [(set_attr "type" "ilog,iadd,iadd,iadd,ldsym,multi,ild,ist,fcpys,fld,fst")
+ (set_attr "usegp" "*,*,*,yes,*,*,*,*,*,*,*")])
+
+;; The 'U' constraint matches symbolic operands on Unicos/Mk. Those should
+;; have been split up by the rules above but we shouldn't reject the
+;; possibility of them getting through.
+
+(define_insn "*movdi_nofix"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,r,r,r,m,*f,*f,Q")
+ (match_operand:DI 1 "input_operand" "rJ,K,L,U,s,n,m,rJ,*fJ,Q,*f"))]
+ "! TARGET_FIX
+ && (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ "@
+ bis $31,%r1,%0
+ lda %0,%1($31)
+ ldah %0,%h1($31)
+ laum %0,%t1($31)\;sll %0,32,%0\;lalm %0,%t1(%0)\;lal %0,%t1(%0)
+ lda %0,%1
+ #
+ ldq%A1 %0,%1
+ stq%A0 %r1,%0
+ cpys %R1,%R1,%0
+ ldt %0,%1
+ stt %R1,%0"
+ [(set_attr "type" "ilog,iadd,iadd,ldsym,ldsym,multi,ild,ist,fcpys,fld,fst")
+ (set_attr "length" "*,*,*,16,*,*,*,*,*,*,*")])
+
+(define_insn "*movdi_er_fix"
+ [(set (match_operand:DI 0 "nonimmediate_operand"
+ "=r,r,r,r,r,r,r, m, *f,*f, Q, r,*f")
+ (match_operand:DI 1 "input_operand"
+ "rJ,K,L,T,s,n,m,rJ,*fJ, Q,*f,*f, r"))]
+ "TARGET_EXPLICIT_RELOCS && TARGET_FIX
+ && (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ "@
+ mov %r1,%0
+ lda %0,%1($31)
+ ldah %0,%h1($31)
+ #
+ #
+ #
+ ldq%A1 %0,%1
+ stq%A0 %r1,%0
+ fmov %R1,%0
+ ldt %0,%1
+ stt %R1,%0
+ ftoit %1,%0
+ itoft %1,%0"
+ [(set_attr "type" "ilog,iadd,iadd,iadd,ldsym,multi,ild,ist,fcpys,fld,fst,ftoi,itof")
+ (set_attr "usegp" "*,*,*,yes,*,*,*,*,*,*,*,*,*")])
+
+(define_insn "*movdi_fix"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,r,r,m,*f,*f,Q,r,*f")
+ (match_operand:DI 1 "input_operand" "rJ,K,L,s,n,m,rJ,*fJ,Q,*f,*f,r"))]
+ "! TARGET_EXPLICIT_RELOCS && TARGET_FIX
+ && (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ "@
+ bis $31,%r1,%0
+ lda %0,%1($31)
+ ldah %0,%h1($31)
+ lda %0,%1
+ #
+ ldq%A1 %0,%1
+ stq%A0 %r1,%0
+ cpys %R1,%R1,%0
+ ldt %0,%1
+ stt %R1,%0
+ ftoit %1,%0
+ itoft %1,%0"
+ [(set_attr "type" "ilog,iadd,iadd,ldsym,multi,ild,ist,fcpys,fld,fst,ftoi,itof")])
+
+;; VMS needs to set up "vms_base_regno" for unwinding. This move
+;; often appears dead to the life analysis code, at which point we
+;; die for emitting dead prologue instructions. Force this live.
+
+(define_insn "force_movdi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r")]
+ UNSPECV_FORCE_MOV))]
+ ""
+ "mov %1,%0"
+ [(set_attr "type" "ilog")])
+
+;; We do three major things here: handle mem->mem, put 64-bit constants in
+;; memory, and construct long 32-bit constants.
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+{
+ if (alpha_expand_mov (DImode, operands))
+ DONE;
+})
+
+;; Split a load of a large constant into the appropriate two-insn
+;; sequence.
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "non_add_const_operand" ""))]
+ ""
+ [(const_int 0)]
+{
+ if (alpha_split_const_mov (DImode, operands))
+ DONE;
+ else
+ FAIL;
+})
+
+;; We need to prevent reload from splitting TImode moves, because it
+;; might decide to overwrite a pointer with the value it points to.
+;; In that case we have to do the loads in the appropriate order so
+;; that the pointer is not destroyed too early.
+
+(define_insn_and_split "*movti_internal"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,o")
+ (match_operand:TI 1 "input_operand" "roJ,rJ"))]
+ "(register_operand (operands[0], TImode)
+ /* Prevent rematerialization of constants. */
+ && ! CONSTANT_P (operands[1]))
+ || reg_or_0_operand (operands[1], TImode)"
+ "#"
+ "reload_completed"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 1) (match_dup 3))]
+{
+ alpha_split_tmode_pair (operands, TImode, true);
+})
+
+(define_expand "movti"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (match_operand:TI 1 "general_operand" ""))]
+ ""
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_0_operand (operands[1], TImode))
+ operands[1] = force_reg (TImode, operands[1]);
+
+ if (operands[1] == const0_rtx)
+ ;
+ /* We must put 64-bit constants in memory. We could keep the
+ 32-bit constants in TImode and rely on the splitter, but
+ this doesn't seem to be worth the pain. */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ || GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ rtx in[2], out[2], target;
+
+ gcc_assert (!no_new_pseudos);
+
+ split_double (operands[1], &in[0], &in[1]);
+
+ if (in[0] == const0_rtx)
+ out[0] = const0_rtx;
+ else
+ {
+ out[0] = gen_reg_rtx (DImode);
+ emit_insn (gen_movdi (out[0], in[0]));
+ }
+
+ if (in[1] == const0_rtx)
+ out[1] = const0_rtx;
+ else
+ {
+ out[1] = gen_reg_rtx (DImode);
+ emit_insn (gen_movdi (out[1], in[1]));
+ }
+
+ if (GET_CODE (operands[0]) != REG)
+ target = gen_reg_rtx (TImode);
+ else
+ target = operands[0];
+
+ emit_insn (gen_movdi (gen_rtx_SUBREG (DImode, target, 0), out[0]));
+ emit_insn (gen_movdi (gen_rtx_SUBREG (DImode, target, 8), out[1]));
+
+ if (target != operands[0])
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], target));
+
+ DONE;
+ }
+})
+
+;; These are the partial-word cases.
+;;
+;; First we have the code to load an aligned word. Operand 0 is the register
+;; in which to place the result. It's mode is QImode or HImode. Operand 1
+;; is an SImode MEM at the low-order byte of the proper word. Operand 2 is the
+;; number of bits within the word that the value is. Operand 3 is an SImode
+;; scratch register. If operand 0 is a hard register, operand 3 may be the
+;; same register. It is allowed to conflict with operand 1 as well.
+
+(define_expand "aligned_loadqi"
+ [(set (match_operand:SI 3 "register_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_operand:DI 0 "register_operand" "")
+ (zero_extract:DI (subreg:DI (match_dup 3) 0)
+ (const_int 8)
+ (match_operand:DI 2 "const_int_operand" "")))]
+
+ ""
+ "")
+
+(define_expand "aligned_loadhi"
+ [(set (match_operand:SI 3 "register_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_operand:DI 0 "register_operand" "")
+ (zero_extract:DI (subreg:DI (match_dup 3) 0)
+ (const_int 16)
+ (match_operand:DI 2 "const_int_operand" "")))]
+
+ ""
+ "")
+
+;; Similar for unaligned loads, where we use the sequence from the
+;; Alpha Architecture manual. We have to distinguish between little-endian
+;; and big-endian systems as the sequences are different.
+;;
+;; Operand 1 is the address. Operands 2 and 3 are temporaries, where
+;; operand 3 can overlap the input and output registers.
+
+(define_expand "unaligned_loadqi"
+ [(use (match_operand:DI 0 "register_operand" ""))
+ (use (match_operand:DI 1 "address_operand" ""))
+ (use (match_operand:DI 2 "register_operand" ""))
+ (use (match_operand:DI 3 "register_operand" ""))]
+ ""
+{
+ if (WORDS_BIG_ENDIAN)
+ emit_insn (gen_unaligned_loadqi_be (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ emit_insn (gen_unaligned_loadqi_le (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+(define_expand "unaligned_loadqi_le"
+ [(set (match_operand:DI 2 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 1 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 3 "register_operand" "")
+ (match_dup 1))
+ (set (match_operand:DI 0 "register_operand" "")
+ (zero_extract:DI (match_dup 2)
+ (const_int 8)
+ (ashift:DI (match_dup 3) (const_int 3))))]
+ "! WORDS_BIG_ENDIAN"
+ "")
+
+(define_expand "unaligned_loadqi_be"
+ [(set (match_operand:DI 2 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 1 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 3 "register_operand" "")
+ (match_dup 1))
+ (set (match_operand:DI 0 "register_operand" "")
+ (zero_extract:DI (match_dup 2)
+ (const_int 8)
+ (minus:DI
+ (const_int 56)
+ (ashift:DI (match_dup 3) (const_int 3)))))]
+ "WORDS_BIG_ENDIAN"
+ "")
+
+(define_expand "unaligned_loadhi"
+ [(use (match_operand:DI 0 "register_operand" ""))
+ (use (match_operand:DI 1 "address_operand" ""))
+ (use (match_operand:DI 2 "register_operand" ""))
+ (use (match_operand:DI 3 "register_operand" ""))]
+ ""
+{
+ if (WORDS_BIG_ENDIAN)
+ emit_insn (gen_unaligned_loadhi_be (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ emit_insn (gen_unaligned_loadhi_le (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+(define_expand "unaligned_loadhi_le"
+ [(set (match_operand:DI 2 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 1 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 3 "register_operand" "")
+ (match_dup 1))
+ (set (match_operand:DI 0 "register_operand" "")
+ (zero_extract:DI (match_dup 2)
+ (const_int 16)
+ (ashift:DI (match_dup 3) (const_int 3))))]
+ "! WORDS_BIG_ENDIAN"
+ "")
+
+(define_expand "unaligned_loadhi_be"
+ [(set (match_operand:DI 2 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 1 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 3 "register_operand" "")
+ (plus:DI (match_dup 1) (const_int 1)))
+ (set (match_operand:DI 0 "register_operand" "")
+ (zero_extract:DI (match_dup 2)
+ (const_int 16)
+ (minus:DI
+ (const_int 56)
+ (ashift:DI (match_dup 3) (const_int 3)))))]
+ "WORDS_BIG_ENDIAN"
+ "")
+
+;; Storing an aligned byte or word requires two temporaries. Operand 0 is the
+;; aligned SImode MEM. Operand 1 is the register containing the
+;; byte or word to store. Operand 2 is the number of bits within the word that
+;; the value should be placed. Operands 3 and 4 are SImode temporaries.
+
+(define_expand "aligned_store"
+ [(set (match_operand:SI 3 "register_operand" "")
+ (match_operand:SI 0 "memory_operand" ""))
+ (set (subreg:DI (match_dup 3) 0)
+ (and:DI (subreg:DI (match_dup 3) 0) (match_dup 5)))
+ (set (subreg:DI (match_operand:SI 4 "register_operand" "") 0)
+ (ashift:DI (zero_extend:DI (match_operand 1 "register_operand" ""))
+ (match_operand:DI 2 "const_int_operand" "")))
+ (set (subreg:DI (match_dup 4) 0)
+ (ior:DI (subreg:DI (match_dup 4) 0) (subreg:DI (match_dup 3) 0)))
+ (set (match_dup 0) (match_dup 4))]
+ ""
+{
+ operands[5] = GEN_INT (~ (GET_MODE_MASK (GET_MODE (operands[1]))
+ << INTVAL (operands[2])));
+})
+
+;; For the unaligned byte and halfword cases, we use code similar to that
+;; in the ;; Architecture book, but reordered to lower the number of registers
+;; required. Operand 0 is the address. Operand 1 is the data to store.
+;; Operands 2, 3, and 4 are DImode temporaries, where operands 2 and 4 may
+;; be the same temporary, if desired. If the address is in a register,
+;; operand 2 can be that register.
+
+(define_expand "unaligned_storeqi"
+ [(use (match_operand:DI 0 "address_operand" ""))
+ (use (match_operand:QI 1 "register_operand" ""))
+ (use (match_operand:DI 2 "register_operand" ""))
+ (use (match_operand:DI 3 "register_operand" ""))
+ (use (match_operand:DI 4 "register_operand" ""))]
+ ""
+{
+ if (WORDS_BIG_ENDIAN)
+ emit_insn (gen_unaligned_storeqi_be (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ else
+ emit_insn (gen_unaligned_storeqi_le (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+(define_expand "unaligned_storeqi_le"
+ [(set (match_operand:DI 3 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 0 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 2 "register_operand" "")
+ (match_dup 0))
+ (set (match_dup 3)
+ (and:DI (not:DI (ashift:DI (const_int 255)
+ (ashift:DI (match_dup 2) (const_int 3))))
+ (match_dup 3)))
+ (set (match_operand:DI 4 "register_operand" "")
+ (ashift:DI (zero_extend:DI (match_operand:QI 1 "register_operand" ""))
+ (ashift:DI (match_dup 2) (const_int 3))))
+ (set (match_dup 4) (ior:DI (match_dup 4) (match_dup 3)))
+ (set (mem:DI (and:DI (match_dup 0) (const_int -8)))
+ (match_dup 4))]
+ "! WORDS_BIG_ENDIAN"
+ "")
+
+(define_expand "unaligned_storeqi_be"
+ [(set (match_operand:DI 3 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 0 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 2 "register_operand" "")
+ (match_dup 0))
+ (set (match_dup 3)
+ (and:DI (not:DI (ashift:DI (const_int 255)
+ (minus:DI (const_int 56)
+ (ashift:DI (match_dup 2) (const_int 3)))))
+ (match_dup 3)))
+ (set (match_operand:DI 4 "register_operand" "")
+ (ashift:DI (zero_extend:DI (match_operand:QI 1 "register_operand" ""))
+ (minus:DI (const_int 56)
+ (ashift:DI (match_dup 2) (const_int 3)))))
+ (set (match_dup 4) (ior:DI (match_dup 4) (match_dup 3)))
+ (set (mem:DI (and:DI (match_dup 0) (const_int -8)))
+ (match_dup 4))]
+ "WORDS_BIG_ENDIAN"
+ "")
+
+(define_expand "unaligned_storehi"
+ [(use (match_operand:DI 0 "address_operand" ""))
+ (use (match_operand:HI 1 "register_operand" ""))
+ (use (match_operand:DI 2 "register_operand" ""))
+ (use (match_operand:DI 3 "register_operand" ""))
+ (use (match_operand:DI 4 "register_operand" ""))]
+ ""
+{
+ if (WORDS_BIG_ENDIAN)
+ emit_insn (gen_unaligned_storehi_be (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ else
+ emit_insn (gen_unaligned_storehi_le (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+(define_expand "unaligned_storehi_le"
+ [(set (match_operand:DI 3 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 0 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 2 "register_operand" "")
+ (match_dup 0))
+ (set (match_dup 3)
+ (and:DI (not:DI (ashift:DI (const_int 65535)
+ (ashift:DI (match_dup 2) (const_int 3))))
+ (match_dup 3)))
+ (set (match_operand:DI 4 "register_operand" "")
+ (ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" ""))
+ (ashift:DI (match_dup 2) (const_int 3))))
+ (set (match_dup 4) (ior:DI (match_dup 4) (match_dup 3)))
+ (set (mem:DI (and:DI (match_dup 0) (const_int -8)))
+ (match_dup 4))]
+ "! WORDS_BIG_ENDIAN"
+ "")
+
+(define_expand "unaligned_storehi_be"
+ [(set (match_operand:DI 3 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 0 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 2 "register_operand" "")
+ (plus:DI (match_dup 0) (const_int 1)))
+ (set (match_dup 3)
+ (and:DI (not:DI (ashift:DI
+ (const_int 65535)
+ (minus:DI (const_int 56)
+ (ashift:DI (match_dup 2) (const_int 3)))))
+ (match_dup 3)))
+ (set (match_operand:DI 4 "register_operand" "")
+ (ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" ""))
+ (minus:DI (const_int 56)
+ (ashift:DI (match_dup 2) (const_int 3)))))
+ (set (match_dup 4) (ior:DI (match_dup 4) (match_dup 3)))
+ (set (mem:DI (and:DI (match_dup 0) (const_int -8)))
+ (match_dup 4))]
+ "WORDS_BIG_ENDIAN"
+ "")
+
+;; Here are the define_expand's for QI and HI moves that use the above
+;; patterns. We have the normal sets, plus the ones that need scratch
+;; registers for reload.
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+{
+ if (TARGET_BWX
+ ? alpha_expand_mov (QImode, operands)
+ : alpha_expand_mov_nobwx (QImode, operands))
+ DONE;
+})
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+{
+ if (TARGET_BWX
+ ? alpha_expand_mov (HImode, operands)
+ : alpha_expand_mov_nobwx (HImode, operands))
+ DONE;
+})
+
+;; Here are the versions for reload. Note that in the unaligned cases
+;; we know that the operand must not be a pseudo-register because stack
+;; slots are always aligned references.
+
+(define_expand "reload_inqi"
+ [(parallel [(match_operand:QI 0 "register_operand" "=r")
+ (match_operand:QI 1 "any_memory_operand" "m")
+ (match_operand:TI 2 "register_operand" "=&r")])]
+ "! TARGET_BWX"
+{
+ rtx scratch, seq;
+
+ if (aligned_memory_operand (operands[1], QImode))
+ {
+ seq = gen_reload_inqi_help (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[2])));
+ }
+ else
+ {
+ rtx addr;
+
+ /* It is possible that one of the registers we got for operands[2]
+ might coincide with that of operands[0] (which is why we made
+ it TImode). Pick the other one to use as our scratch. */
+ if (REGNO (operands[0]) == REGNO (operands[2]))
+ scratch = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
+ else
+ scratch = gen_rtx_REG (DImode, REGNO (operands[2]));
+
+ addr = get_unaligned_address (operands[1]);
+ operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
+ seq = gen_unaligned_loadqi (operands[0], addr, scratch, operands[0]);
+ alpha_set_memflags (seq, operands[1]);
+ }
+ emit_insn (seq);
+ DONE;
+})
+
+(define_expand "reload_inhi"
+ [(parallel [(match_operand:HI 0 "register_operand" "=r")
+ (match_operand:HI 1 "any_memory_operand" "m")
+ (match_operand:TI 2 "register_operand" "=&r")])]
+ "! TARGET_BWX"
+{
+ rtx scratch, seq;
+
+ if (aligned_memory_operand (operands[1], HImode))
+ {
+ seq = gen_reload_inhi_help (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[2])));
+ }
+ else
+ {
+ rtx addr;
+
+ /* It is possible that one of the registers we got for operands[2]
+ might coincide with that of operands[0] (which is why we made
+ it TImode). Pick the other one to use as our scratch. */
+ if (REGNO (operands[0]) == REGNO (operands[2]))
+ scratch = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
+ else
+ scratch = gen_rtx_REG (DImode, REGNO (operands[2]));
+
+ addr = get_unaligned_address (operands[1]);
+ operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
+ seq = gen_unaligned_loadhi (operands[0], addr, scratch, operands[0]);
+ alpha_set_memflags (seq, operands[1]);
+ }
+ emit_insn (seq);
+ DONE;
+})
+
+(define_expand "reload_outqi"
+ [(parallel [(match_operand:QI 0 "any_memory_operand" "=m")
+ (match_operand:QI 1 "register_operand" "r")
+ (match_operand:TI 2 "register_operand" "=&r")])]
+ "! TARGET_BWX"
+{
+ if (aligned_memory_operand (operands[0], QImode))
+ {
+ emit_insn (gen_reload_outqi_help
+ (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[2])),
+ gen_rtx_REG (SImode, REGNO (operands[2]) + 1)));
+ }
+ else
+ {
+ rtx addr = get_unaligned_address (operands[0]);
+ rtx scratch1 = gen_rtx_REG (DImode, REGNO (operands[2]));
+ rtx scratch2 = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
+ rtx scratch3 = scratch1;
+ rtx seq;
+
+ if (GET_CODE (addr) == REG)
+ scratch1 = addr;
+
+ seq = gen_unaligned_storeqi (addr, operands[1], scratch1,
+ scratch2, scratch3);
+ alpha_set_memflags (seq, operands[0]);
+ emit_insn (seq);
+ }
+ DONE;
+})
+
+(define_expand "reload_outhi"
+ [(parallel [(match_operand:HI 0 "any_memory_operand" "=m")
+ (match_operand:HI 1 "register_operand" "r")
+ (match_operand:TI 2 "register_operand" "=&r")])]
+ "! TARGET_BWX"
+{
+ if (aligned_memory_operand (operands[0], HImode))
+ {
+ emit_insn (gen_reload_outhi_help
+ (operands[0], operands[1],
+ gen_rtx_REG (SImode, REGNO (operands[2])),
+ gen_rtx_REG (SImode, REGNO (operands[2]) + 1)));
+ }
+ else
+ {
+ rtx addr = get_unaligned_address (operands[0]);
+ rtx scratch1 = gen_rtx_REG (DImode, REGNO (operands[2]));
+ rtx scratch2 = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
+ rtx scratch3 = scratch1;
+ rtx seq;
+
+ if (GET_CODE (addr) == REG)
+ scratch1 = addr;
+
+ seq = gen_unaligned_storehi (addr, operands[1], scratch1,
+ scratch2, scratch3);
+ alpha_set_memflags (seq, operands[0]);
+ emit_insn (seq);
+ }
+ DONE;
+})
+
+;; Helpers for the above. The way reload is structured, we can't
+;; always get a proper address for a stack slot during reload_foo
+;; expansion, so we must delay our address manipulations until after.
+
+(define_insn_and_split "reload_inqi_help"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (match_operand:QI 1 "memory_operand" "m"))
+ (clobber (match_operand:SI 2 "register_operand" "=r"))]
+ "! TARGET_BWX && (reload_in_progress || reload_completed)"
+ "#"
+ "! TARGET_BWX && reload_completed"
+ [(const_int 0)]
+{
+ rtx aligned_mem, bitnum;
+ get_aligned_mem (operands[1], &aligned_mem, &bitnum);
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ emit_insn (gen_aligned_loadqi (operands[0], aligned_mem, bitnum,
+ operands[2]));
+ DONE;
+})
+
+(define_insn_and_split "reload_inhi_help"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (match_operand:HI 1 "memory_operand" "m"))
+ (clobber (match_operand:SI 2 "register_operand" "=r"))]
+ "! TARGET_BWX && (reload_in_progress || reload_completed)"
+ "#"
+ "! TARGET_BWX && reload_completed"
+ [(const_int 0)]
+{
+ rtx aligned_mem, bitnum;
+ get_aligned_mem (operands[1], &aligned_mem, &bitnum);
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ emit_insn (gen_aligned_loadhi (operands[0], aligned_mem, bitnum,
+ operands[2]));
+ DONE;
+})
+
+(define_insn_and_split "reload_outqi_help"
+ [(set (match_operand:QI 0 "memory_operand" "=m")
+ (match_operand:QI 1 "register_operand" "r"))
+ (clobber (match_operand:SI 2 "register_operand" "=r"))
+ (clobber (match_operand:SI 3 "register_operand" "=r"))]
+ "! TARGET_BWX && (reload_in_progress || reload_completed)"
+ "#"
+ "! TARGET_BWX && reload_completed"
+ [(const_int 0)]
+{
+ rtx aligned_mem, bitnum;
+ get_aligned_mem (operands[0], &aligned_mem, &bitnum);
+ emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
+ operands[2], operands[3]));
+ DONE;
+})
+
+(define_insn_and_split "reload_outhi_help"
+ [(set (match_operand:HI 0 "memory_operand" "=m")
+ (match_operand:HI 1 "register_operand" "r"))
+ (clobber (match_operand:SI 2 "register_operand" "=r"))
+ (clobber (match_operand:SI 3 "register_operand" "=r"))]
+ "! TARGET_BWX && (reload_in_progress || reload_completed)"
+ "#"
+ "! TARGET_BWX && reload_completed"
+ [(const_int 0)]
+{
+ rtx aligned_mem, bitnum;
+ get_aligned_mem (operands[0], &aligned_mem, &bitnum);
+ emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
+ operands[2], operands[3]));
+ DONE;
+})
+
+;; Vector operations
+
+(define_mode_macro VEC [V8QI V4HI V2SI])
+
+(define_expand "mov<mode>"
+ [(set (match_operand:VEC 0 "nonimmediate_operand" "")
+ (match_operand:VEC 1 "general_operand" ""))]
+ ""
+{
+ if (alpha_expand_mov (<MODE>mode, operands))
+ DONE;
+})
+
+(define_split
+ [(set (match_operand:VEC 0 "register_operand" "")
+ (match_operand:VEC 1 "non_zero_const_operand" ""))]
+ ""
+ [(const_int 0)]
+{
+ if (alpha_split_const_mov (<MODE>mode, operands))
+ DONE;
+ else
+ FAIL;
+})
+
+
+(define_expand "movmisalign<mode>"
+ [(set (match_operand:VEC 0 "nonimmediate_operand" "")
+ (match_operand:VEC 1 "general_operand" ""))]
+ ""
+{
+ alpha_expand_movmisalign (<MODE>mode, operands);
+ DONE;
+})
+
+(define_insn "*mov<mode>_fix"
+ [(set (match_operand:VEC 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,m,r,*f")
+ (match_operand:VEC 1 "input_operand" "rW,i,m,rW,*fW,m,*f,*f,r"))]
+ "TARGET_FIX
+ && (register_operand (operands[0], <MODE>mode)
+ || reg_or_0_operand (operands[1], <MODE>mode))"
+ "@
+ bis $31,%r1,%0
+ #
+ ldq %0,%1
+ stq %r1,%0
+ cpys %R1,%R1,%0
+ ldt %0,%1
+ stt %R1,%0
+ ftoit %1,%0
+ itoft %1,%0"
+ [(set_attr "type" "ilog,multi,ild,ist,fcpys,fld,fst,ftoi,itof")])
+
+(define_insn "*mov<mode>_nofix"
+ [(set (match_operand:VEC 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,m")
+ (match_operand:VEC 1 "input_operand" "rW,i,m,rW,*fW,m,*f"))]
+ "! TARGET_FIX
+ && (register_operand (operands[0], <MODE>mode)
+ || reg_or_0_operand (operands[1], <MODE>mode))"
+ "@
+ bis $31,%r1,%0
+ #
+ ldq %0,%1
+ stq %r1,%0
+ cpys %R1,%R1,%0
+ ldt %0,%1
+ stt %R1,%0"
+ [(set_attr "type" "ilog,multi,ild,ist,fcpys,fld,fst")])
+
+(define_insn "uminv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=r")
+ (umin:V8QI (match_operand:V8QI 1 "reg_or_0_operand" "rW")
+ (match_operand:V8QI 2 "reg_or_0_operand" "rW")))]
+ "TARGET_MAX"
+ "minub8 %r1,%r2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "sminv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=r")
+ (smin:V8QI (match_operand:V8QI 1 "reg_or_0_operand" "rW")
+ (match_operand:V8QI 2 "reg_or_0_operand" "rW")))]
+ "TARGET_MAX"
+ "minsb8 %r1,%r2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "uminv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=r")
+ (umin:V4HI (match_operand:V4HI 1 "reg_or_0_operand" "rW")
+ (match_operand:V4HI 2 "reg_or_0_operand" "rW")))]
+ "TARGET_MAX"
+ "minuw4 %r1,%r2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "sminv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=r")
+ (smin:V4HI (match_operand:V4HI 1 "reg_or_0_operand" "rW")
+ (match_operand:V4HI 2 "reg_or_0_operand" "rW")))]
+ "TARGET_MAX"
+ "minsw4 %r1,%r2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "umaxv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=r")
+ (umax:V8QI (match_operand:V8QI 1 "reg_or_0_operand" "rW")
+ (match_operand:V8QI 2 "reg_or_0_operand" "rW")))]
+ "TARGET_MAX"
+ "maxub8 %r1,%r2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "smaxv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=r")
+ (smax:V8QI (match_operand:V8QI 1 "reg_or_0_operand" "rW")
+ (match_operand:V8QI 2 "reg_or_0_operand" "rW")))]
+ "TARGET_MAX"
+ "maxsb8 %r1,%r2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "umaxv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=r")
+ (umax:V4HI (match_operand:V4HI 1 "reg_or_0_operand" "rW")
+ (match_operand:V4HI 2 "reg_or_0_operand" "rW")))]
+ "TARGET_MAX"
+ "maxuw4 %r1,%r2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "smaxv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=r")
+ (smax:V4HI (match_operand:V4HI 1 "reg_or_0_operand" "rW")
+ (match_operand:V4HI 2 "reg_or_0_operand" "rW")))]
+ "TARGET_MAX"
+ "maxsw4 %r1,%r2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:VEC 0 "register_operand" "=r")
+ (not:VEC (match_operand:VEC 1 "register_operand" "r")))]
+ ""
+ "ornot $31,%1,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "and<mode>3"
+ [(set (match_operand:VEC 0 "register_operand" "=r")
+ (and:VEC (match_operand:VEC 1 "register_operand" "r")
+ (match_operand:VEC 2 "register_operand" "r")))]
+ ""
+ "and %1,%2,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "*andnot<mode>3"
+ [(set (match_operand:VEC 0 "register_operand" "=r")
+ (and:VEC (not:VEC (match_operand:VEC 1 "register_operand" "r"))
+ (match_operand:VEC 2 "register_operand" "r")))]
+ ""
+ "bic %2,%1,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "ior<mode>3"
+ [(set (match_operand:VEC 0 "register_operand" "=r")
+ (ior:VEC (match_operand:VEC 1 "register_operand" "r")
+ (match_operand:VEC 2 "register_operand" "r")))]
+ ""
+ "bis %1,%2,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "*iornot<mode>3"
+ [(set (match_operand:VEC 0 "register_operand" "=r")
+ (ior:VEC (not:DI (match_operand:VEC 1 "register_operand" "r"))
+ (match_operand:VEC 2 "register_operand" "r")))]
+ ""
+ "ornot %2,%1,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "xor<mode>3"
+ [(set (match_operand:VEC 0 "register_operand" "=r")
+ (xor:VEC (match_operand:VEC 1 "register_operand" "r")
+ (match_operand:VEC 2 "register_operand" "r")))]
+ ""
+ "xor %1,%2,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "*xornot<mode>3"
+ [(set (match_operand:VEC 0 "register_operand" "=r")
+ (not:VEC (xor:VEC (match_operand:VEC 1 "register_operand" "r")
+ (match_operand:VEC 2 "register_operand" "r"))))]
+ ""
+ "eqv %1,%2,%0"
+ [(set_attr "type" "ilog")])
+
+(define_expand "vec_shl_<mode>"
+ [(set (match_operand:VEC 0 "register_operand" "")
+ (ashift:DI (match_operand:VEC 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_6bit_operand" "")))]
+ ""
+{
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, operands[1]);
+})
+
+(define_expand "vec_shr_<mode>"
+ [(set (match_operand:VEC 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:VEC 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_6bit_operand" "")))]
+ ""
+{
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, operands[1]);
+})
+
+;; Bit field extract patterns which use ext[wlq][lh]
+
+(define_expand "extv"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extract:DI (match_operand:QI 1 "memory_operand" "")
+ (match_operand:DI 2 "immediate_operand" "")
+ (match_operand:DI 3 "immediate_operand" "")))]
+ ""
+{
+ int ofs;
+
+ /* We can do 16, 32 and 64 bit fields, if aligned on byte boundaries. */
+ if (INTVAL (operands[3]) % 8 != 0
+ || (INTVAL (operands[2]) != 16
+ && INTVAL (operands[2]) != 32
+ && INTVAL (operands[2]) != 64))
+ FAIL;
+
+ /* From mips.md: extract_bit_field doesn't verify that our source
+ matches the predicate, so we force it to be a MEM here. */
+ if (GET_CODE (operands[1]) != MEM)
+ FAIL;
+
+ /* The bit number is relative to the mode of operand 1 which is
+ usually QImode (this might actually be a bug in expmed.c). Note
+ that the bit number is negative in big-endian mode in this case.
+ We have to convert that to the offset. */
+ if (WORDS_BIG_ENDIAN)
+ ofs = GET_MODE_BITSIZE (GET_MODE (operands[1]))
+ - INTVAL (operands[2]) - INTVAL (operands[3]);
+ else
+ ofs = INTVAL (operands[3]);
+
+ ofs = ofs / 8;
+
+ alpha_expand_unaligned_load (operands[0], operands[1],
+ INTVAL (operands[2]) / 8,
+ ofs, 1);
+ DONE;
+})
+
+(define_expand "extzv"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extract:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "immediate_operand" "")
+ (match_operand:DI 3 "immediate_operand" "")))]
+ ""
+{
+ /* We can do 8, 16, 32 and 64 bit fields, if aligned on byte boundaries. */
+ if (INTVAL (operands[3]) % 8 != 0
+ || (INTVAL (operands[2]) != 8
+ && INTVAL (operands[2]) != 16
+ && INTVAL (operands[2]) != 32
+ && INTVAL (operands[2]) != 64))
+ FAIL;
+
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ int ofs;
+
+ /* Fail 8 bit fields, falling back on a simple byte load. */
+ if (INTVAL (operands[2]) == 8)
+ FAIL;
+
+ /* The bit number is relative to the mode of operand 1 which is
+ usually QImode (this might actually be a bug in expmed.c). Note
+ that the bit number is negative in big-endian mode in this case.
+ We have to convert that to the offset. */
+ if (WORDS_BIG_ENDIAN)
+ ofs = GET_MODE_BITSIZE (GET_MODE (operands[1]))
+ - INTVAL (operands[2]) - INTVAL (operands[3]);
+ else
+ ofs = INTVAL (operands[3]);
+
+ ofs = ofs / 8;
+
+ alpha_expand_unaligned_load (operands[0], operands[1],
+ INTVAL (operands[2]) / 8,
+ ofs, 0);
+ DONE;
+ }
+})
+
+(define_expand "insv"
+ [(set (zero_extract:DI (match_operand:QI 0 "memory_operand" "")
+ (match_operand:DI 1 "immediate_operand" "")
+ (match_operand:DI 2 "immediate_operand" ""))
+ (match_operand:DI 3 "register_operand" ""))]
+ ""
+{
+ int ofs;
+
+ /* We can do 16, 32 and 64 bit fields, if aligned on byte boundaries. */
+ if (INTVAL (operands[2]) % 8 != 0
+ || (INTVAL (operands[1]) != 16
+ && INTVAL (operands[1]) != 32
+ && INTVAL (operands[1]) != 64))
+ FAIL;
+
+ /* From mips.md: store_bit_field doesn't verify that our source
+ matches the predicate, so we force it to be a MEM here. */
+ if (GET_CODE (operands[0]) != MEM)
+ FAIL;
+
+ /* The bit number is relative to the mode of operand 1 which is
+ usually QImode (this might actually be a bug in expmed.c). Note
+ that the bit number is negative in big-endian mode in this case.
+ We have to convert that to the offset. */
+ if (WORDS_BIG_ENDIAN)
+ ofs = GET_MODE_BITSIZE (GET_MODE (operands[0]))
+ - INTVAL (operands[1]) - INTVAL (operands[2]);
+ else
+ ofs = INTVAL (operands[2]);
+
+ ofs = ofs / 8;
+
+ alpha_expand_unaligned_store (operands[0], operands[3],
+ INTVAL (operands[1]) / 8, ofs);
+ DONE;
+})
+
+;; Block move/clear, see alpha.c for more details.
+;; Argument 0 is the destination
+;; Argument 1 is the source
+;; Argument 2 is the length
+;; Argument 3 is the alignment
+
+(define_expand "movmemqi"
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand:BLK 1 "memory_operand" ""))
+ (use (match_operand:DI 2 "immediate_operand" ""))
+ (use (match_operand:DI 3 "immediate_operand" ""))])]
+ ""
+{
+ if (alpha_expand_block_move (operands))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "movmemdi"
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand:BLK 1 "memory_operand" ""))
+ (use (match_operand:DI 2 "immediate_operand" ""))
+ (use (match_operand:DI 3 "immediate_operand" ""))
+ (use (match_dup 4))
+ (clobber (reg:DI 25))
+ (clobber (reg:DI 16))
+ (clobber (reg:DI 17))
+ (clobber (reg:DI 18))
+ (clobber (reg:DI 19))
+ (clobber (reg:DI 20))
+ (clobber (reg:DI 26))
+ (clobber (reg:DI 27))])]
+ "TARGET_ABI_OPEN_VMS"
+{
+ operands[4] = gen_rtx_SYMBOL_REF (Pmode, "OTS$MOVE");
+ alpha_need_linkage (XSTR (operands[4], 0), 0);
+})
+
+(define_insn "*movmemdi_1"
+ [(set (match_operand:BLK 0 "memory_operand" "=m,=m")
+ (match_operand:BLK 1 "memory_operand" "m,m"))
+ (use (match_operand:DI 2 "nonmemory_operand" "r,i"))
+ (use (match_operand:DI 3 "immediate_operand" ""))
+ (use (match_operand:DI 4 "call_operand" "i,i"))
+ (clobber (reg:DI 25))
+ (clobber (reg:DI 16))
+ (clobber (reg:DI 17))
+ (clobber (reg:DI 18))
+ (clobber (reg:DI 19))
+ (clobber (reg:DI 20))
+ (clobber (reg:DI 26))
+ (clobber (reg:DI 27))]
+ "TARGET_ABI_OPEN_VMS"
+{
+ operands [5] = alpha_use_linkage (operands [4], cfun->decl, 0, 1);
+ switch (which_alternative)
+ {
+ case 0:
+ return "lda $16,%0\;bis $31,%2,$17\;lda $18,%1\;ldq $26,%5\;lda $25,3($31)\;jsr $26,%4\;ldq $27,0($29)";
+ case 1:
+ return "lda $16,%0\;lda $17,%2($31)\;lda $18,%1\;ldq $26,%5\;lda $25,3($31)\;jsr $26,%4\;ldq $27,0($29)";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "multi")
+ (set_attr "length" "28")])
+
+(define_expand "setmemqi"
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand 2 "const_int_operand" ""))
+ (use (match_operand:DI 1 "immediate_operand" ""))
+ (use (match_operand:DI 3 "immediate_operand" ""))])]
+ ""
+{
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
+ if (alpha_expand_block_clear (operands))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "setmemdi"
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand 2 "const_int_operand" ""))
+ (use (match_operand:DI 1 "immediate_operand" ""))
+ (use (match_operand:DI 3 "immediate_operand" ""))
+ (use (match_dup 4))
+ (clobber (reg:DI 25))
+ (clobber (reg:DI 16))
+ (clobber (reg:DI 17))
+ (clobber (reg:DI 26))
+ (clobber (reg:DI 27))])]
+ "TARGET_ABI_OPEN_VMS"
+{
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
+ operands[4] = gen_rtx_SYMBOL_REF (Pmode, "OTS$ZERO");
+ alpha_need_linkage (XSTR (operands[4], 0), 0);
+})
+
+(define_insn "*clrmemdi_1"
+ [(set (match_operand:BLK 0 "memory_operand" "=m,=m")
+ (const_int 0))
+ (use (match_operand:DI 1 "nonmemory_operand" "r,i"))
+ (use (match_operand:DI 2 "immediate_operand" ""))
+ (use (match_operand:DI 3 "call_operand" "i,i"))
+ (clobber (reg:DI 25))
+ (clobber (reg:DI 16))
+ (clobber (reg:DI 17))
+ (clobber (reg:DI 26))
+ (clobber (reg:DI 27))]
+ "TARGET_ABI_OPEN_VMS"
+{
+ operands [4] = alpha_use_linkage (operands [3], cfun->decl, 0, 1);
+ switch (which_alternative)
+ {
+ case 0:
+ return "lda $16,%0\;bis $31,%1,$17\;ldq $26,%4\;lda $25,2($31)\;jsr $26,%3\;ldq $27,0($29)";
+ case 1:
+ return "lda $16,%0\;lda $17,%1($31)\;ldq $26,%4\;lda $25,2($31)\;jsr $26,%3\;ldq $27,0($29)";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "multi")
+ (set_attr "length" "24")])
+
+
+;; Subroutine of stack space allocation. Perform a stack probe.
+(define_expand "probe_stack"
+ [(set (match_dup 1) (match_operand:DI 0 "const_int_operand" ""))]
+ ""
+{
+ operands[1] = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx,
+ INTVAL (operands[0])));
+ MEM_VOLATILE_P (operands[1]) = 1;
+
+ operands[0] = const0_rtx;
+})
+
+;; This is how we allocate stack space. If we are allocating a
+;; constant amount of space and we know it is less than 4096
+;; bytes, we need do nothing.
+;;
+;; If it is more than 4096 bytes, we need to probe the stack
+;; periodically.
+(define_expand "allocate_stack"
+ [(set (reg:DI 30)
+ (plus:DI (reg:DI 30)
+ (match_operand:DI 1 "reg_or_cint_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (match_dup 2))]
+ ""
+{
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < 32768)
+ {
+ if (INTVAL (operands[1]) >= 4096)
+ {
+ /* We do this the same way as in the prologue and generate explicit
+ probes. Then we update the stack by the constant. */
+
+ int probed = 4096;
+
+ emit_insn (gen_probe_stack (GEN_INT (- probed)));
+ while (probed + 8192 < INTVAL (operands[1]))
+ emit_insn (gen_probe_stack (GEN_INT (- (probed += 8192))));
+
+ if (probed + 4096 < INTVAL (operands[1]))
+ emit_insn (gen_probe_stack (GEN_INT (- INTVAL(operands[1]))));
+ }
+
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ operands[2] = virtual_stack_dynamic_rtx;
+ }
+ else
+ {
+ rtx out_label = 0;
+ rtx loop_label = gen_label_rtx ();
+ rtx want = gen_reg_rtx (Pmode);
+ rtx tmp = gen_reg_rtx (Pmode);
+ rtx memref;
+
+ emit_insn (gen_subdi3 (want, stack_pointer_rtx,
+ force_reg (Pmode, operands[1])));
+ emit_insn (gen_adddi3 (tmp, stack_pointer_rtx, GEN_INT (-4096)));
+
+ if (GET_CODE (operands[1]) != CONST_INT)
+ {
+ out_label = gen_label_rtx ();
+ emit_insn (gen_cmpdi (want, tmp));
+ emit_jump_insn (gen_bgeu (out_label));
+ }
+
+ emit_label (loop_label);
+ memref = gen_rtx_MEM (DImode, tmp);
+ MEM_VOLATILE_P (memref) = 1;
+ emit_move_insn (memref, const0_rtx);
+ emit_insn (gen_adddi3 (tmp, tmp, GEN_INT(-8192)));
+ emit_insn (gen_cmpdi (tmp, want));
+ emit_jump_insn (gen_bgtu (loop_label));
+
+ memref = gen_rtx_MEM (DImode, want);
+ MEM_VOLATILE_P (memref) = 1;
+ emit_move_insn (memref, const0_rtx);
+
+ if (out_label)
+ emit_label (out_label);
+
+ emit_move_insn (stack_pointer_rtx, want);
+ emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
+ DONE;
+ }
+})
+
+;; This is used by alpha_expand_prolog to do the same thing as above,
+;; except we cannot at that time generate new basic blocks, so we hide
+;; the loop in this one insn.
+
+(define_insn "prologue_stack_probe_loop"
+ [(unspec_volatile [(match_operand:DI 0 "register_operand" "r")
+ (match_operand:DI 1 "register_operand" "r")]
+ UNSPECV_PSPL)]
+ ""
+{
+ operands[2] = gen_label_rtx ();
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (operands[2]));
+
+ return "stq $31,-8192(%1)\;subq %0,1,%0\;lda %1,-8192(%1)\;bne %0,%l2";
+}
+ [(set_attr "length" "16")
+ (set_attr "type" "multi")])
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+{
+ alpha_expand_prologue ();
+ DONE;
+})
+
+;; These take care of emitting the ldgp insn in the prologue. This will be
+;; an lda/ldah pair and we want to align them properly. So we have two
+;; unspec_volatile insns, the first of which emits the ldgp assembler macro
+;; and the second of which emits nothing. However, both are marked as type
+;; IADD (the default) so the alignment code in alpha.c does the right thing
+;; with them.
+
+(define_expand "prologue_ldgp"
+ [(set (match_dup 0)
+ (unspec_volatile:DI [(match_dup 1) (match_dup 2)] UNSPECV_LDGP1))
+ (set (match_dup 0)
+ (unspec_volatile:DI [(match_dup 0) (match_dup 2)] UNSPECV_PLDGP2))]
+ ""
+{
+ operands[0] = pic_offset_table_rtx;
+ operands[1] = gen_rtx_REG (Pmode, 27);
+ operands[2] = (TARGET_EXPLICIT_RELOCS
+ ? GEN_INT (alpha_next_sequence_number++)
+ : const0_rtx);
+})
+
+(define_insn "*ldgp_er_1"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "")]
+ UNSPECV_LDGP1))]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "ldah %0,0(%1)\t\t!gpdisp!%2"
+ [(set_attr "cannot_copy" "true")])
+
+(define_insn "*ldgp_er_2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "")]
+ UNSPEC_LDGP2))]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "lda %0,0(%1)\t\t!gpdisp!%2"
+ [(set_attr "cannot_copy" "true")])
+
+(define_insn "*prologue_ldgp_er_2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "")]
+ UNSPECV_PLDGP2))]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "lda %0,0(%1)\t\t!gpdisp!%2\n$%~..ng:"
+ [(set_attr "cannot_copy" "true")])
+
+(define_insn "*prologue_ldgp_1"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "")]
+ UNSPECV_LDGP1))]
+ ""
+ "ldgp %0,0(%1)\n$%~..ng:"
+ [(set_attr "cannot_copy" "true")])
+
+(define_insn "*prologue_ldgp_2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "")]
+ UNSPECV_PLDGP2))]
+ ""
+ "")
+
+;; The _mcount profiling hook has special calling conventions, and
+;; does not clobber all the registers that a normal call would. So
+;; hide the fact this is a call at all.
+
+(define_insn "prologue_mcount"
+ [(unspec_volatile [(const_int 0)] UNSPECV_MCOUNT)]
+ ""
+{
+ if (TARGET_EXPLICIT_RELOCS)
+ /* Note that we cannot use a lituse_jsr reloc, since _mcount
+ cannot be called via the PLT. */
+ return "ldq $28,_mcount($29)\t\t!literal\;jsr $28,($28),_mcount";
+ else
+ return "lda $28,_mcount\;jsr $28,($28),_mcount";
+}
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "init_fp"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "register_operand" "r"))
+ (clobber (mem:BLK (match_operand:DI 2 "register_operand" "=r")))]
+ ""
+ "bis $31,%1,%0")
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+{
+ alpha_expand_epilogue ();
+})
+
+(define_expand "sibcall_epilogue"
+ [(return)]
+ "TARGET_ABI_OSF"
+{
+ alpha_expand_epilogue ();
+ DONE;
+})
+
+(define_expand "builtin_longjmp"
+ [(use (match_operand:DI 0 "register_operand" "r"))]
+ "TARGET_ABI_OSF"
+{
+ /* The elements of the buffer are, in order: */
+ rtx fp = gen_rtx_MEM (Pmode, operands[0]);
+ rtx lab = gen_rtx_MEM (Pmode, plus_constant (operands[0], 8));
+ rtx stack = gen_rtx_MEM (Pmode, plus_constant (operands[0], 16));
+ rtx pv = gen_rtx_REG (Pmode, 27);
+
+ /* This bit is the same as expand_builtin_longjmp. */
+ emit_move_insn (hard_frame_pointer_rtx, fp);
+ emit_move_insn (pv, lab);
+ emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
+ emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
+ emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+
+ /* Load the label we are jumping through into $27 so that we know
+ where to look for it when we get back to setjmp's function for
+ restoring the gp. */
+ emit_jump_insn (gen_builtin_longjmp_internal (pv));
+ emit_barrier ();
+ DONE;
+})
+
+;; This is effectively a copy of indirect_jump, but constrained such
+;; that register renaming cannot foil our cunning plan with $27.
+(define_insn "builtin_longjmp_internal"
+ [(set (pc)
+ (unspec_volatile [(match_operand:DI 0 "register_operand" "c")]
+ UNSPECV_LONGJMP))]
+ ""
+ "jmp $31,(%0),0"
+ [(set_attr "type" "ibr")])
+
+(define_expand "builtin_setjmp_receiver"
+ [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
+ "TARGET_ABI_OSF"
+ "")
+
+(define_insn_and_split "*builtin_setjmp_receiver_1"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_SETJMPR)]
+ "TARGET_ABI_OSF"
+{
+ if (TARGET_EXPLICIT_RELOCS)
+ return "#";
+ else
+ return "br $27,$LSJ%=\n$LSJ%=:\;ldgp $29,0($27)";
+}
+ "&& TARGET_EXPLICIT_RELOCS && reload_completed"
+ [(set (match_dup 1)
+ (unspec_volatile:DI [(match_dup 2) (match_dup 3)] UNSPECV_LDGP1))
+ (set (match_dup 1)
+ (unspec:DI [(match_dup 1) (match_dup 3)] UNSPEC_LDGP2))]
+{
+ if (prev_nonnote_insn (curr_insn) != XEXP (operands[0], 0))
+ emit_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (1, operands[0]),
+ UNSPECV_SETJMPR_ER));
+ operands[1] = pic_offset_table_rtx;
+ operands[2] = gen_rtx_REG (Pmode, 27);
+ operands[3] = GEN_INT (alpha_next_sequence_number++);
+}
+ [(set_attr "length" "12")
+ (set_attr "type" "multi")])
+
+(define_insn "*builtin_setjmp_receiver_er_sl_1"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_SETJMPR_ER)]
+ "TARGET_ABI_OSF && TARGET_EXPLICIT_RELOCS && TARGET_AS_CAN_SUBTRACT_LABELS"
+ "lda $27,$LSJ%=-%l0($27)\n$LSJ%=:")
+
+(define_insn "*builtin_setjmp_receiver_er_1"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_SETJMPR_ER)]
+ "TARGET_ABI_OSF && TARGET_EXPLICIT_RELOCS"
+ "br $27,$LSJ%=\n$LSJ%=:"
+ [(set_attr "type" "ibr")])
+
+(define_expand "exception_receiver"
+ [(unspec_volatile [(match_dup 0)] UNSPECV_EHR)]
+ "TARGET_ABI_OSF"
+{
+ if (TARGET_LD_BUGGY_LDGP)
+ operands[0] = alpha_gp_save_rtx ();
+ else
+ operands[0] = const0_rtx;
+})
+
+(define_insn "*exception_receiver_2"
+ [(unspec_volatile [(match_operand:DI 0 "memory_operand" "m")] UNSPECV_EHR)]
+ "TARGET_ABI_OSF && TARGET_LD_BUGGY_LDGP"
+ "ldq $29,%0"
+ [(set_attr "type" "ild")])
+
+(define_insn_and_split "*exception_receiver_1"
+ [(unspec_volatile [(const_int 0)] UNSPECV_EHR)]
+ "TARGET_ABI_OSF"
+{
+ if (TARGET_EXPLICIT_RELOCS)
+ return "ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%*";
+ else
+ return "ldgp $29,0($26)";
+}
+ "&& TARGET_EXPLICIT_RELOCS && reload_completed"
+ [(set (match_dup 0)
+ (unspec_volatile:DI [(match_dup 1) (match_dup 2)] UNSPECV_LDGP1))
+ (set (match_dup 0)
+ (unspec:DI [(match_dup 0) (match_dup 2)] UNSPEC_LDGP2))]
+{
+ operands[0] = pic_offset_table_rtx;
+ operands[1] = gen_rtx_REG (Pmode, 26);
+ operands[2] = GEN_INT (alpha_next_sequence_number++);
+}
+ [(set_attr "length" "8")
+ (set_attr "type" "multi")])
+
+(define_expand "nonlocal_goto_receiver"
+ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)
+ (set (reg:DI 27) (mem:DI (reg:DI 29)))
+ (unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)
+ (use (reg:DI 27))]
+ "TARGET_ABI_OPEN_VMS"
+ "")
+
+(define_insn "arg_home"
+ [(unspec [(const_int 0)] UNSPEC_ARG_HOME)
+ (use (reg:DI 1))
+ (use (reg:DI 25))
+ (use (reg:DI 16))
+ (use (reg:DI 17))
+ (use (reg:DI 18))
+ (use (reg:DI 19))
+ (use (reg:DI 20))
+ (use (reg:DI 21))
+ (use (reg:DI 48))
+ (use (reg:DI 49))
+ (use (reg:DI 50))
+ (use (reg:DI 51))
+ (use (reg:DI 52))
+ (use (reg:DI 53))
+ (clobber (mem:BLK (const_int 0)))
+ (clobber (reg:DI 24))
+ (clobber (reg:DI 25))
+ (clobber (reg:DI 0))]
+ "TARGET_ABI_OPEN_VMS"
+ "lda $0,OTS$HOME_ARGS\;ldq $0,8($0)\;jsr $0,OTS$HOME_ARGS"
+ [(set_attr "length" "16")
+ (set_attr "type" "multi")])
+
+;; Load the CIW into r2 for calling __T3E_MISMATCH
+
+(define_expand "umk_mismatch_args"
+ [(set:DI (match_dup 1) (mem:DI (plus:DI (reg:DI 15) (const_int -16))))
+ (set:DI (match_dup 2) (mem:DI (plus:DI (match_dup 1) (const_int -32))))
+ (set:DI (reg:DI 1) (match_operand:DI 0 "const_int_operand" ""))
+ (set:DI (match_dup 3) (plus:DI (mult:DI (reg:DI 25)
+ (const_int 8))
+ (match_dup 2)))
+ (set:DI (reg:DI 2) (mem:DI (match_dup 3)))]
+ "TARGET_ABI_UNICOSMK"
+{
+ operands[1] = gen_reg_rtx (DImode);
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_reg_rtx (DImode);
+})
+
+(define_insn "arg_home_umk"
+ [(unspec [(const_int 0)] UNSPEC_ARG_HOME)
+ (use (reg:DI 1))
+ (use (reg:DI 2))
+ (use (reg:DI 16))
+ (use (reg:DI 17))
+ (use (reg:DI 18))
+ (use (reg:DI 19))
+ (use (reg:DI 20))
+ (use (reg:DI 21))
+ (use (reg:DI 48))
+ (use (reg:DI 49))
+ (use (reg:DI 50))
+ (use (reg:DI 51))
+ (use (reg:DI 52))
+ (use (reg:DI 53))
+ (clobber (mem:BLK (const_int 0)))
+ (parallel [
+ (clobber (reg:DI 22))
+ (clobber (reg:DI 23))
+ (clobber (reg:DI 24))
+ (clobber (reg:DI 0))
+ (clobber (reg:DI 1))
+ (clobber (reg:DI 2))
+ (clobber (reg:DI 3))
+ (clobber (reg:DI 4))
+ (clobber (reg:DI 5))
+ (clobber (reg:DI 6))
+ (clobber (reg:DI 7))
+ (clobber (reg:DI 8))])]
+ "TARGET_ABI_UNICOSMK"
+ "laum $4,__T3E_MISMATCH($31)\;sll $4,32,$4\;lalm $4,__T3E_MISMATCH($4)\;lal $4,__T3E_MISMATCH($4)\;jsr $3,($4)"
+ [(set_attr "length" "16")
+ (set_attr "type" "multi")])
+
+;; Prefetch data.
+;;
+;; On EV4, these instructions are nops -- no load occurs.
+;;
+;; On EV5, these instructions act as a normal load, and thus can trap
+;; if the address is invalid. The OS may (or may not) handle this in
+;; the entMM fault handler and suppress the fault. If so, then this
+;; has the effect of a read prefetch instruction.
+;;
+;; On EV6, these become official prefetch instructions.
+
+(define_insn "prefetch"
+ [(prefetch (match_operand:DI 0 "address_operand" "p")
+ (match_operand:DI 1 "const_int_operand" "n")
+ (match_operand:DI 2 "const_int_operand" "n"))]
+ "TARGET_FIXUP_EV5_PREFETCH || alpha_cpu == PROCESSOR_EV6"
+{
+ /* Interpret "no temporal locality" as this data should be evicted once
+ it is used. The "evict next" alternatives load the data into the cache
+ and leave the LRU eviction counter pointing to that block. */
+ static const char * const alt[2][2] = {
+ {
+ "ldq $31,%a0", /* read, evict next */
+ "ldl $31,%a0", /* read, evict last */
+ },
+ {
+ "ldt $f31,%a0", /* write, evict next */
+ "lds $f31,%a0", /* write, evict last */
+ }
+ };
+
+ bool write = INTVAL (operands[1]) != 0;
+ bool lru = INTVAL (operands[2]) != 0;
+
+ return alt[write][lru];
+}
+ [(set_attr "type" "ild")])
+
+;; Close the trap shadow of preceding instructions. This is generated
+;; by alpha_reorg.
+
+(define_insn "trapb"
+ [(unspec_volatile [(const_int 0)] UNSPECV_TRAPB)]
+ ""
+ "trapb"
+ [(set_attr "type" "misc")])
+
+;; No-op instructions used by machine-dependent reorg to preserve
+;; alignment for instruction issue.
+;; The Unicos/Mk assembler does not support these opcodes.
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "bis $31,$31,$31"
+ [(set_attr "type" "ilog")])
+
+(define_insn "fnop"
+ [(const_int 1)]
+ "TARGET_FP"
+ "cpys $f31,$f31,$f31"
+ [(set_attr "type" "fcpys")])
+
+(define_insn "unop"
+ [(const_int 2)]
+ ""
+ "ldq_u $31,0($30)")
+
+;; On Unicos/Mk we use a macro for aligning code.
+
+(define_insn "realign"
+ [(unspec_volatile [(match_operand 0 "immediate_operand" "i")]
+ UNSPECV_REALIGN)]
+ ""
+{
+ if (TARGET_ABI_UNICOSMK)
+ return "gcc@code@align %0";
+ else
+ return ".align %0 #realign";
+})
+
+;; Instructions to be emitted from __builtins.
+
+(define_insn "builtin_cmpbge"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI")]
+ UNSPEC_CMPBGE))]
+ ""
+ "cmpbge %r1,%2,%0"
+ ;; The EV6 data sheets list this as ILOG. OTOH, EV6 doesn't
+ ;; actually differentiate between ILOG and ICMP in the schedule.
+ [(set_attr "type" "icmp")])
+
+(define_expand "builtin_extbl"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_extxl_be;
+ else
+ gen = gen_extxl_le;
+ emit_insn ((*gen) (operands[0], operands[1], GEN_INT (8), operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_extwl"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_extxl_be;
+ else
+ gen = gen_extxl_le;
+ emit_insn ((*gen) (operands[0], operands[1], GEN_INT (16), operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_extll"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_extxl_be;
+ else
+ gen = gen_extxl_le;
+ emit_insn ((*gen) (operands[0], operands[1], GEN_INT (32), operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_extql"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_extxl_be;
+ else
+ gen = gen_extxl_le;
+ emit_insn ((*gen) (operands[0], operands[1], GEN_INT (64), operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_extwh"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_extwh_be;
+ else
+ gen = gen_extwh_le;
+ emit_insn ((*gen) (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_extlh"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_extlh_be;
+ else
+ gen = gen_extlh_le;
+ emit_insn ((*gen) (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_extqh"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_extqh_be;
+ else
+ gen = gen_extqh_le;
+ emit_insn ((*gen) (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_insbl"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_insbl_be;
+ else
+ gen = gen_insbl_le;
+ operands[1] = gen_lowpart (QImode, operands[1]);
+ emit_insn ((*gen) (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_inswl"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_inswl_be;
+ else
+ gen = gen_inswl_le;
+ operands[1] = gen_lowpart (HImode, operands[1]);
+ emit_insn ((*gen) (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_insll"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_insll_be;
+ else
+ gen = gen_insll_le;
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn ((*gen) (operands[0], operands[1], operands[2]));
+ emit_insn ((*gen) (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_insql"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx);
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_insql_be;
+ else
+ gen = gen_insql_le;
+ emit_insn ((*gen) (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_inswh"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ emit_insn (gen_insxh (operands[0], operands[1], GEN_INT (16), operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_inslh"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ emit_insn (gen_insxh (operands[0], operands[1], GEN_INT (32), operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_insqh"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ emit_insn (gen_insxh (operands[0], operands[1], GEN_INT (64), operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_mskbl"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+ rtx mask;
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_mskxl_be;
+ else
+ gen = gen_mskxl_le;
+ mask = GEN_INT (0xff);
+ emit_insn ((*gen) (operands[0], operands[1], mask, operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_mskwl"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+ rtx mask;
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_mskxl_be;
+ else
+ gen = gen_mskxl_le;
+ mask = GEN_INT (0xffff);
+ emit_insn ((*gen) (operands[0], operands[1], mask, operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_mskll"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+ rtx mask;
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_mskxl_be;
+ else
+ gen = gen_mskxl_le;
+ mask = immed_double_const (0xffffffff, 0, DImode);
+ emit_insn ((*gen) (operands[0], operands[1], mask, operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_mskql"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+ rtx mask;
+ if (WORDS_BIG_ENDIAN)
+ gen = gen_mskxl_be;
+ else
+ gen = gen_mskxl_le;
+ mask = constm1_rtx;
+ emit_insn ((*gen) (operands[0], operands[1], mask, operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_mskwh"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ emit_insn (gen_mskxh (operands[0], operands[1], GEN_INT (16), operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_msklh"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ emit_insn (gen_mskxh (operands[0], operands[1], GEN_INT (32), operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_mskqh"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "reg_or_8bit_operand" "")]
+ ""
+{
+ emit_insn (gen_mskxh (operands[0], operands[1], GEN_INT (64), operands[2]));
+ DONE;
+})
+
+(define_expand "builtin_zap"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (and:DI (unspec:DI
+ [(match_operand:DI 2 "reg_or_cint_operand" "")]
+ UNSPEC_ZAP)
+ (match_operand:DI 1 "reg_or_cint_operand" "")))]
+ ""
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ rtx mask = alpha_expand_zap_mask (INTVAL (operands[2]));
+
+ if (mask == const0_rtx)
+ {
+ emit_move_insn (operands[0], const0_rtx);
+ DONE;
+ }
+ if (mask == constm1_rtx)
+ {
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+ }
+
+ operands[1] = force_reg (DImode, operands[1]);
+ emit_insn (gen_anddi3 (operands[0], operands[1], mask));
+ DONE;
+ }
+
+ operands[1] = force_reg (DImode, operands[1]);
+ operands[2] = gen_lowpart (QImode, operands[2]);
+})
+
+(define_insn "*builtin_zap_1"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r")
+ (and:DI (unspec:DI
+ [(match_operand:QI 2 "reg_or_cint_operand" "n,n,r,r")]
+ UNSPEC_ZAP)
+ (match_operand:DI 1 "reg_or_cint_operand" "n,r,J,r")))]
+ ""
+ "@
+ #
+ #
+ bis $31,$31,%0
+ zap %r1,%2,%0"
+ [(set_attr "type" "shift,shift,ilog,shift")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (and:DI (unspec:DI
+ [(match_operand:QI 2 "const_int_operand" "")]
+ UNSPEC_ZAP)
+ (match_operand:DI 1 "const_int_operand" "")))]
+ ""
+ [(const_int 0)]
+{
+ rtx mask = alpha_expand_zap_mask (INTVAL (operands[2]));
+ if (HOST_BITS_PER_WIDE_INT >= 64 || GET_CODE (mask) == CONST_INT)
+ operands[1] = gen_int_mode (INTVAL (operands[1]) & INTVAL (mask), DImode);
+ else
+ {
+ HOST_WIDE_INT c_lo = INTVAL (operands[1]);
+ HOST_WIDE_INT c_hi = (c_lo < 0 ? -1 : 0);
+ operands[1] = immed_double_const (c_lo & CONST_DOUBLE_LOW (mask),
+ c_hi & CONST_DOUBLE_HIGH (mask),
+ DImode);
+ }
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+})
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (and:DI (unspec:DI
+ [(match_operand:QI 2 "const_int_operand" "")]
+ UNSPEC_ZAP)
+ (match_operand:DI 1 "register_operand" "")))]
+ ""
+ [(set (match_dup 0)
+ (and:DI (match_dup 1) (match_dup 2)))]
+{
+ operands[2] = alpha_expand_zap_mask (INTVAL (operands[2]));
+ if (operands[2] == const0_rtx)
+ {
+ emit_move_insn (operands[0], const0_rtx);
+ DONE;
+ }
+ if (operands[2] == constm1_rtx)
+ {
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+ }
+})
+
+(define_expand "builtin_zapnot"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (and:DI (unspec:DI
+ [(not:QI (match_operand:DI 2 "reg_or_cint_operand" ""))]
+ UNSPEC_ZAP)
+ (match_operand:DI 1 "reg_or_cint_operand" "")))]
+ ""
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ rtx mask = alpha_expand_zap_mask (~ INTVAL (operands[2]));
+
+ if (mask == const0_rtx)
+ {
+ emit_move_insn (operands[0], const0_rtx);
+ DONE;
+ }
+ if (mask == constm1_rtx)
+ {
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+ }
+
+ operands[1] = force_reg (DImode, operands[1]);
+ emit_insn (gen_anddi3 (operands[0], operands[1], mask));
+ DONE;
+ }
+
+ operands[1] = force_reg (DImode, operands[1]);
+ operands[2] = gen_lowpart (QImode, operands[2]);
+})
+
+(define_insn "*builtin_zapnot_1"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (unspec:DI
+ [(not:QI (match_operand:QI 2 "register_operand" "r"))]
+ UNSPEC_ZAP)
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+ ""
+ "zapnot %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "builtin_amask"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "reg_or_8bit_operand" "rI")]
+ UNSPEC_AMASK))]
+ ""
+ "amask %1,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "builtin_implver"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(const_int 0)] UNSPEC_IMPLVER))]
+ ""
+ "implver %0"
+ [(set_attr "type" "ilog")])
+
+(define_insn "builtin_rpcc"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(const_int 0)] UNSPECV_RPCC))]
+ ""
+ "rpcc %0"
+ [(set_attr "type" "ilog")])
+
+(define_expand "builtin_minub8"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")]
+ "TARGET_MAX"
+{
+ alpha_expand_builtin_vector_binop (gen_uminv8qi3, V8QImode, operands[0],
+ operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "builtin_minsb8"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")]
+ "TARGET_MAX"
+{
+ alpha_expand_builtin_vector_binop (gen_sminv8qi3, V8QImode, operands[0],
+ operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "builtin_minuw4"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")]
+ "TARGET_MAX"
+{
+ alpha_expand_builtin_vector_binop (gen_uminv4hi3, V4HImode, operands[0],
+ operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "builtin_minsw4"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")]
+ "TARGET_MAX"
+{
+ alpha_expand_builtin_vector_binop (gen_sminv4hi3, V4HImode, operands[0],
+ operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "builtin_maxub8"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")]
+ "TARGET_MAX"
+{
+ alpha_expand_builtin_vector_binop (gen_umaxv8qi3, V8QImode, operands[0],
+ operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "builtin_maxsb8"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")]
+ "TARGET_MAX"
+{
+ alpha_expand_builtin_vector_binop (gen_smaxv8qi3, V8QImode, operands[0],
+ operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "builtin_maxuw4"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")]
+ "TARGET_MAX"
+{
+ alpha_expand_builtin_vector_binop (gen_umaxv4hi3, V4HImode, operands[0],
+ operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "builtin_maxsw4"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")]
+ "TARGET_MAX"
+{
+ alpha_expand_builtin_vector_binop (gen_smaxv4hi3, V4HImode, operands[0],
+ operands[1], operands[2]);
+ DONE;
+})
+
+(define_insn "builtin_perr"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:DI 2 "reg_or_8bit_operand" "rJ")]
+ UNSPEC_PERR))]
+ "TARGET_MAX"
+ "perr %r1,%r2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_expand "builtin_pklb"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (vec_concat:V8QI
+ (vec_concat:V4QI
+ (truncate:V2QI (match_operand:DI 1 "register_operand" ""))
+ (match_dup 2))
+ (match_dup 3)))]
+ "TARGET_MAX"
+{
+ operands[0] = gen_lowpart (V8QImode, operands[0]);
+ operands[1] = gen_lowpart (V2SImode, operands[1]);
+ operands[2] = CONST0_RTX (V2QImode);
+ operands[3] = CONST0_RTX (V4QImode);
+})
+
+(define_insn "*pklb"
+ [(set (match_operand:V8QI 0 "register_operand" "=r")
+ (vec_concat:V8QI
+ (vec_concat:V4QI
+ (truncate:V2QI (match_operand:V2SI 1 "register_operand" "r"))
+ (match_operand:V2QI 2 "const0_operand" ""))
+ (match_operand:V4QI 3 "const0_operand" "")))]
+ "TARGET_MAX"
+ "pklb %r1,%0"
+ [(set_attr "type" "mvi")])
+
+(define_expand "builtin_pkwb"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (vec_concat:V8QI
+ (truncate:V4QI (match_operand:DI 1 "register_operand" ""))
+ (match_dup 2)))]
+ "TARGET_MAX"
+{
+ operands[0] = gen_lowpart (V8QImode, operands[0]);
+ operands[1] = gen_lowpart (V4HImode, operands[1]);
+ operands[2] = CONST0_RTX (V4QImode);
+})
+
+(define_insn "*pkwb"
+ [(set (match_operand:V8QI 0 "register_operand" "=r")
+ (vec_concat:V8QI
+ (truncate:V4QI (match_operand:V4HI 1 "register_operand" "r"))
+ (match_operand:V4QI 2 "const0_operand" "")))]
+ "TARGET_MAX"
+ "pkwb %r1,%0"
+ [(set_attr "type" "mvi")])
+
+(define_expand "builtin_unpkbl"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:V2SI
+ (vec_select:V2QI (match_operand:DI 1 "register_operand" "")
+ (parallel [(const_int 0) (const_int 1)]))))]
+ "TARGET_MAX"
+{
+ operands[0] = gen_lowpart (V2SImode, operands[0]);
+ operands[1] = gen_lowpart (V8QImode, operands[1]);
+})
+
+(define_insn "*unpkbl"
+ [(set (match_operand:V2SI 0 "register_operand" "=r")
+ (zero_extend:V2SI
+ (vec_select:V2QI (match_operand:V8QI 1 "reg_or_0_operand" "rW")
+ (parallel [(const_int 0) (const_int 1)]))))]
+ "TARGET_MAX"
+ "unpkbl %r1,%0"
+ [(set_attr "type" "mvi")])
+
+(define_expand "builtin_unpkbw"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:V4HI
+ (vec_select:V4QI (match_operand:DI 1 "register_operand" "")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_MAX"
+{
+ operands[0] = gen_lowpart (V4HImode, operands[0]);
+ operands[1] = gen_lowpart (V8QImode, operands[1]);
+})
+
+(define_insn "*unpkbw"
+ [(set (match_operand:V4HI 0 "register_operand" "=r")
+ (zero_extend:V4HI
+ (vec_select:V4QI (match_operand:V8QI 1 "reg_or_0_operand" "rW")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_MAX"
+ "unpkbw %r1,%0"
+ [(set_attr "type" "mvi")])
+
+(include "sync.md")
+
+;; The call patterns are at the end of the file because their
+;; wildcard operand0 interferes with nice recognition.
+
+(define_insn "*call_value_osf_1_er"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s"))
+ (match_operand 2 "" "")))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "@
+ jsr $26,(%1),0\;ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%*
+ bsr $26,%1\t\t!samegp
+ ldq $27,%1($29)\t\t!literal!%#\;jsr $26,($27),0\t\t!lituse_jsr!%#\;ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%*"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "12,*,16")])
+
+;; We must use peep2 instead of a split because we need accurate life
+;; information for $gp. Consider the case of { bar(); while (1); }.
+(define_peephole2
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "call_operand" ""))
+ (match_operand 2 "" "")))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))])]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF && reload_completed
+ && ! samegp_function_operand (operands[1], Pmode)
+ && (peep2_regno_dead_p (1, 29)
+ || find_reg_note (insn, REG_NORETURN, NULL_RTX))"
+ [(parallel [(set (match_dup 0)
+ (call (mem:DI (match_dup 3))
+ (match_dup 2)))
+ (set (reg:DI 26) (plus:DI (pc) (const_int 4)))
+ (unspec_volatile [(reg:DI 29)] UNSPECV_BLOCKAGE)
+ (use (match_dup 1))
+ (use (match_dup 4))])]
+{
+ if (CONSTANT_P (operands[1]))
+ {
+ operands[3] = gen_rtx_REG (Pmode, 27);
+ operands[4] = GEN_INT (alpha_next_sequence_number++);
+ emit_insn (gen_movdi_er_high_g (operands[3], pic_offset_table_rtx,
+ operands[1], operands[4]));
+ }
+ else
+ {
+ operands[3] = operands[1];
+ operands[1] = const0_rtx;
+ operands[4] = const0_rtx;
+ }
+})
+
+(define_peephole2
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "call_operand" ""))
+ (match_operand 2 "" "")))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))])]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF && reload_completed
+ && ! samegp_function_operand (operands[1], Pmode)
+ && ! (peep2_regno_dead_p (1, 29)
+ || find_reg_note (insn, REG_NORETURN, NULL_RTX))"
+ [(parallel [(set (match_dup 0)
+ (call (mem:DI (match_dup 3))
+ (match_dup 2)))
+ (set (reg:DI 26) (plus:DI (pc) (const_int 4)))
+ (unspec_volatile [(reg:DI 29)] UNSPECV_BLOCKAGE)
+ (use (match_dup 1))
+ (use (match_dup 5))])
+ (set (reg:DI 29)
+ (unspec_volatile:DI [(reg:DI 26) (match_dup 4)] UNSPECV_LDGP1))
+ (set (reg:DI 29)
+ (unspec:DI [(reg:DI 29) (match_dup 4)] UNSPEC_LDGP2))]
+{
+ if (CONSTANT_P (operands[1]))
+ {
+ operands[3] = gen_rtx_REG (Pmode, 27);
+ operands[5] = GEN_INT (alpha_next_sequence_number++);
+ emit_insn (gen_movdi_er_high_g (operands[3], pic_offset_table_rtx,
+ operands[1], operands[5]));
+ }
+ else
+ {
+ operands[3] = operands[1];
+ operands[1] = const0_rtx;
+ operands[5] = const0_rtx;
+ }
+ operands[4] = GEN_INT (alpha_next_sequence_number++);
+})
+
+;; We add a blockage unspec_volatile to prevent insns from moving down
+;; from above the call to in between the call and the ldah gpdisp.
+(define_insn "*call_value_osf_2_er"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "register_operand" "c"))
+ (match_operand 2 "" "")))
+ (set (reg:DI 26)
+ (plus:DI (pc) (const_int 4)))
+ (unspec_volatile [(reg:DI 29)] UNSPECV_BLOCKAGE)
+ (use (match_operand 3 "" ""))
+ (use (match_operand 4 "" ""))]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "jsr $26,(%1),%3%J4"
+ [(set_attr "type" "jsr")
+ (set_attr "cannot_copy" "true")])
+
+(define_insn "*call_value_osf_1_noreturn"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s"))
+ (match_operand 2 "" "")))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))]
+ "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ "@
+ jsr $26,($27),0%+
+ bsr $26,$%1..ng%+
+ jsr $26,%1%+"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "*,*,8")])
+
+(define_insn_and_split "call_value_osf_tlsgd"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "symbolic_operand" ""))
+ (const_int 0)))
+ (unspec [(match_operand:DI 2 "const_int_operand" "")] UNSPEC_TLSGD_CALL)
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))]
+ "HAVE_AS_TLS"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3)
+ (unspec:DI [(match_dup 5)
+ (match_dup 1)
+ (match_dup 2)] UNSPEC_LITERAL))
+ (parallel [(set (match_dup 0)
+ (call (mem:DI (match_dup 3))
+ (const_int 0)))
+ (set (reg:DI 26) (plus:DI (pc) (const_int 4)))
+ (unspec_volatile [(match_dup 5)] UNSPECV_BLOCKAGE)
+ (use (match_dup 1))
+ (use (unspec [(match_dup 2)] UNSPEC_TLSGD_CALL))])
+ (set (match_dup 5)
+ (unspec_volatile:DI [(reg:DI 26) (match_dup 4)] UNSPECV_LDGP1))
+ (set (match_dup 5)
+ (unspec:DI [(match_dup 5) (match_dup 4)] UNSPEC_LDGP2))]
+{
+ operands[3] = gen_rtx_REG (Pmode, 27);
+ operands[4] = GEN_INT (alpha_next_sequence_number++);
+ operands[5] = pic_offset_table_rtx;
+}
+ [(set_attr "type" "multi")])
+
+(define_insn_and_split "call_value_osf_tlsldm"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "symbolic_operand" ""))
+ (const_int 0)))
+ (unspec [(match_operand:DI 2 "const_int_operand" "")] UNSPEC_TLSLDM_CALL)
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))]
+ "HAVE_AS_TLS"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3)
+ (unspec:DI [(match_dup 5)
+ (match_dup 1)
+ (match_dup 2)] UNSPEC_LITERAL))
+ (parallel [(set (match_dup 0)
+ (call (mem:DI (match_dup 3))
+ (const_int 0)))
+ (set (reg:DI 26) (plus:DI (pc) (const_int 4)))
+ (unspec_volatile [(match_dup 5)] UNSPECV_BLOCKAGE)
+ (use (match_dup 1))
+ (use (unspec [(match_dup 2)] UNSPEC_TLSLDM_CALL))])
+ (set (reg:DI 29)
+ (unspec_volatile:DI [(reg:DI 26) (match_dup 4)] UNSPECV_LDGP1))
+ (set (reg:DI 29)
+ (unspec:DI [(reg:DI 29) (match_dup 4)] UNSPEC_LDGP2))]
+{
+ operands[3] = gen_rtx_REG (Pmode, 27);
+ operands[4] = GEN_INT (alpha_next_sequence_number++);
+ operands[5] = pic_offset_table_rtx;
+}
+ [(set_attr "type" "multi")])
+
+(define_insn "*call_value_osf_1"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s"))
+ (match_operand 2 "" "")))
+ (use (reg:DI 29))
+ (clobber (reg:DI 26))]
+ "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "@
+ jsr $26,($27),0\;ldgp $29,0($26)
+ bsr $26,$%1..ng
+ jsr $26,%1\;ldgp $29,0($26)"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "12,*,16")])
+
+(define_insn "*sibcall_value_osf_1_er"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "symbolic_operand" "R,s"))
+ (match_operand 2 "" "")))
+ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)]
+ "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "@
+ br $31,%1\t\t!samegp
+ ldq $27,%1($29)\t\t!literal!%#\;jmp $31,($27),%1\t\t!lituse_jsr!%#"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "*,8")])
+
+(define_insn "*sibcall_value_osf_1"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "symbolic_operand" "R,s"))
+ (match_operand 2 "" "")))
+ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)]
+ "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
+ "@
+ br $31,$%1..ng
+ lda $27,%1\;jmp $31,($27),%1"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "*,8")])
+
+(define_insn "*call_value_nt_1"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "call_operand" "r,R,s"))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 26))]
+ "TARGET_ABI_WINDOWS_NT"
+ "@
+ jsr $26,(%1)
+ bsr $26,%1
+ jsr $26,%1"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "*,*,12")])
+
+; GAS relies on the order and position of instructions output below in order
+; to generate relocs for VMS link to potentially optimize the call.
+; Please do not molest.
+(define_insn "*call_value_vms_1"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "call_operand" "r,s"))
+ (match_operand 2 "" "")))
+ (use (match_operand:DI 3 "nonmemory_operand" "r,n"))
+ (use (reg:DI 25))
+ (use (reg:DI 26))
+ (clobber (reg:DI 27))]
+ "TARGET_ABI_OPEN_VMS"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "mov %3,$27\;jsr $26,0\;ldq $27,0($29)";
+ case 1:
+ operands [3] = alpha_use_linkage (operands [1], cfun->decl, 1, 0);
+ operands [4] = alpha_use_linkage (operands [1], cfun->decl, 0, 0);
+ return "ldq $26,%4\;ldq $27,%3\;jsr $26,%1\;ldq $27,0($29)";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "jsr")
+ (set_attr "length" "12,16")])
+
+(define_insn "*call_value_umk"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "call_operand" "r"))
+ (match_operand 2 "" "")))
+ (use (reg:DI 25))
+ (clobber (reg:DI 26))]
+ "TARGET_ABI_UNICOSMK"
+ "jsr $26,(%1)"
+ [(set_attr "type" "jsr")])
diff --git a/gcc-4.2.1/gcc/config/alpha/alpha.opt b/gcc-4.2.1/gcc/config/alpha/alpha.opt
new file mode 100644
index 000000000..a29f58ecc
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/alpha.opt
@@ -0,0 +1,135 @@
+; Options for the DEC Alpha port of the compiler
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+msoft-float
+Target Report Mask(SOFT_FP)
+Do not use hardware fp
+
+mfp-regs
+Target Report Mask(FPREGS)
+Use fp registers
+
+mgas
+Target RejectNegative Mask(GAS)
+Assume GAS
+
+malpha-as
+Target RejectNegative InverseMask(GAS)
+Do not assume GAS
+
+mieee-conformant
+Target RejectNegative Mask(IEEE_CONFORMANT)
+Request IEEE-conformant math library routines (OSF/1)
+
+mieee
+Target Report RejectNegative Mask(IEEE)
+Emit IEEE-conformant code, without inexact exceptions
+
+mieee-with-inexact
+Target Report RejectNegative Mask(IEEE_WITH_INEXACT)
+
+mbuild-constants
+Target Report Mask(BUILD_CONSTANTS)
+Do not emit complex integer constants to read-only memory
+
+mfloat-vax
+Target Report RejectNegative Mask(FLOAT_VAX)
+Use VAX fp
+
+mfloat-ieee
+Target Report RejectNegative InverseMask(FLOAT_VAX)
+Do not use VAX fp
+
+mbwx
+Target Report Mask(BWX)
+Emit code for the byte/word ISA extension
+
+mmax
+Target Report Mask(MAX)
+Emit code for the motion video ISA extension
+
+mfix
+Target Report Mask(FIX)
+Emit code for the fp move and sqrt ISA extension
+
+mcix
+Target Report Mask(CIX)
+Emit code for the counting ISA extension
+
+mexplicit-relocs
+Target Report Mask(EXPLICIT_RELOCS)
+Emit code using explicit relocation directives
+
+msmall-data
+Target Report RejectNegative Mask(SMALL_DATA)
+Emit 16-bit relocations to the small data areas
+
+mlarge-data
+Target Report RejectNegative InverseMask(SMALL_DATA)
+Emit 32-bit relocations to the small data areas
+
+msmall-text
+Target Report RejectNegative Mask(SMALL_TEXT)
+Emit direct branches to local functions
+
+mlarge-text
+Target Report RejectNegative InverseMask(SMALL_TEXT)
+Emit indirect branches to local functions
+
+mtls-kernel
+Target Report Mask(TLS_KERNEL)
+Emit rdval instead of rduniq for thread pointer
+
+mlong-double-128
+Target Report RejectNegative Mask(LONG_DOUBLE_128)
+Use 128-bit long double
+
+mlong-double-64
+Target Report RejectNegative InverseMask(LONG_DOUBLE_128)
+Use 64-bit long double
+
+mcpu=
+Target RejectNegative Joined Var(alpha_cpu_string)
+Use features of and schedule given CPU
+
+mtune=
+Target RejectNegative Joined Var(alpha_tune_string)
+Schedule given CPU
+
+mfp-rounding-mode=
+Target RejectNegative Joined Var(alpha_fprm_string)
+Control the generated fp rounding mode
+
+mfp-trap-mode=
+Target RejectNegative Joined Var(alpha_fptm_string)
+Control the IEEE trap mode
+
+mtrap-precision=
+Target RejectNegative Joined Var(alpha_tp_string)
+Control the precision given to fp exceptions
+
+mmemory-latency=
+Target RejectNegative Joined Var(alpha_mlat_string)
+Tune expected memory latency
+
+mtls-size=
+Target RejectNegative Joined UInteger Var(alpha_tls_size) Init(32)
+Specify bit size of immediate TLS offsets
diff --git a/gcc-4.2.1/gcc/config/alpha/crtfastmath.c b/gcc-4.2.1/gcc/config/alpha/crtfastmath.c
new file mode 100644
index 000000000..045ea43d6
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/crtfastmath.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2001 Free Software Foundation, Inc.
+ * Contributed by Richard Henderson (rth@redhat.com)
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause
+ * the resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why
+ * the executable file might be covered by the GNU General Public License.
+ */
+
+/* Assume OSF/1 compatible interfaces. */
+
+extern void __ieee_set_fp_control (unsigned long int);
+
+#define IEEE_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */
+#define IEEE_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */
+
+static void __attribute__((constructor))
+set_fast_math (void)
+{
+ __ieee_set_fp_control (IEEE_MAP_DMZ | IEEE_MAP_UMZ);
+}
diff --git a/gcc-4.2.1/gcc/config/alpha/elf.h b/gcc-4.2.1/gcc/config/alpha/elf.h
new file mode 100644
index 000000000..043a5244c
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/elf.h
@@ -0,0 +1,426 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha w/ELF.
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
+ Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@tamu.edu).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef OBJECT_FORMAT_COFF
+#undef EXTENDED_COFF
+#define OBJECT_FORMAT_ELF
+
+/* ??? Move all SDB stuff from alpha.h to osf.h. */
+#undef SDB_DEBUGGING_INFO
+
+#define DBX_DEBUGGING_INFO 1
+#define DWARF2_DEBUGGING_INFO 1
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+#undef ASM_FINAL_SPEC
+
+/* alpha/ doesn't use elfos.h for some reason. */
+#define TARGET_OBJFMT_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__ELF__"); \
+ } \
+ while (0)
+
+#undef CC1_SPEC
+#define CC1_SPEC "%{G*}"
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{G*} %{relax:-relax} %{!gstabs*:-no-mdebug}%{gstabs*:-mdebug}"
+
+#undef IDENT_ASM_OP
+#define IDENT_ASM_OP "\t.ident\t"
+
+/* Output #ident as a .ident. */
+#undef ASM_OUTPUT_IDENT
+#define ASM_OUTPUT_IDENT(FILE, NAME) \
+ fprintf (FILE, "%s\"%s\"\n", IDENT_ASM_OP, NAME);
+
+/* This is how to allocate empty space in some section. The .zero
+ pseudo-op is used for this on most svr4 assemblers. */
+
+#undef SKIP_ASM_OP
+#define SKIP_ASM_OP "\t.zero\t"
+
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE, SIZE) \
+ fprintf (FILE, "%s"HOST_WIDE_INT_PRINT_UNSIGNED"\n", SKIP_ASM_OP, (SIZE))
+
+/* Output the label which precedes a jumptable. Note that for all svr4
+ systems where we actually generate jumptables (which is to say every
+ svr4 target except i386, where we use casesi instead) we put the jump-
+ tables into the .rodata section and since other stuff could have been
+ put into the .rodata section prior to any given jumptable, we have to
+ make sure that the location counter for the .rodata section gets pro-
+ perly re-aligned prior to the actual beginning of the jump table. */
+
+#undef ALIGN_ASM_OP
+#define ALIGN_ASM_OP "\t.align\t"
+
+#ifndef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \
+ ASM_OUTPUT_ALIGN ((FILE), 2);
+#endif
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_BEFORE_CASE_LABEL (FILE, PREFIX, NUM, JUMPTABLE) \
+ (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* The standard SVR4 assembler seems to require that certain builtin
+ library routines (e.g. .udiv) be explicitly declared as .globl
+ in each assembly file where they are referenced. */
+
+#undef ASM_OUTPUT_EXTERNAL_LIBCALL
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
+ (*targetm.asm_out.globalize_label) (FILE, XSTR (FUN, 0))
+
+/* This says how to output assembler code to declare an
+ uninitialized external linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#undef COMMON_ASM_OP
+#define COMMON_ASM_OP "\t.comm\t"
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n", (SIZE), (ALIGN) / BITS_PER_UNIT); \
+} while (0)
+
+/* This says how to output assembler code to declare an
+ uninitialized internal linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ if ((SIZE) <= g_switch_value) \
+ switch_to_section (sbss_section); \
+ else \
+ switch_to_section (bss_section); \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, SIZE); \
+ ASM_OUTPUT_ALIGN ((FILE), exact_log2((ALIGN) / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ ASM_OUTPUT_SKIP((FILE), (SIZE) ? (SIZE) : 1); \
+} while (0)
+
+/* This says how to output assembler code to declare an
+ uninitialized external linkage data object. */
+
+#undef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+do { \
+ ASM_OUTPUT_ALIGNED_LOCAL (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+/* Biggest alignment supported by the object file format of this
+ machine. Use this macro to limit the alignment which can be
+ specified using the `__attribute__ ((aligned (N)))' construct. If
+ not defined, the default value is `BIGGEST_ALIGNMENT'.
+
+ This value is really 2^63. Since gcc figures the alignment in bits,
+ we could only potentially get to 2^60 on suitable hosts. Due to other
+ considerations in varasm, we must restrict this to what fits in an int. */
+
+#undef MAX_OFILE_ALIGNMENT
+#define MAX_OFILE_ALIGNMENT \
+ (1 << (HOST_BITS_PER_INT < 64 ? HOST_BITS_PER_INT - 2 : 62))
+
+/* This is the pseudo-op used to generate a contiguous sequence of byte
+ values from a double-quoted string WITHOUT HAVING A TERMINATING NUL
+ AUTOMATICALLY APPENDED. This is the same for most svr4 assemblers. */
+
+#undef ASCII_DATA_ASM_OP
+#define ASCII_DATA_ASM_OP "\t.ascii\t"
+
+#undef READONLY_DATA_SECTION_ASM_OP
+#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata"
+#undef BSS_SECTION_ASM_OP
+#define BSS_SECTION_ASM_OP "\t.section\t.bss"
+#undef SBSS_SECTION_ASM_OP
+#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\""
+#undef SDATA_SECTION_ASM_OP
+#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\""
+
+/* On svr4, we *do* have support for the .init and .fini sections, and we
+ can put stuff in there to be executed before and after `main'. We let
+ crtstuff.c and other files know this by defining the following symbols.
+ The definitions say how to change sections to the .init and .fini
+ sections. This is the same for all known svr4 assemblers. */
+
+#undef INIT_SECTION_ASM_OP
+#define INIT_SECTION_ASM_OP "\t.section\t.init"
+#undef FINI_SECTION_ASM_OP
+#define FINI_SECTION_ASM_OP "\t.section\t.fini"
+
+#ifdef HAVE_GAS_SUBSECTION_ORDERING
+
+#define ASM_SECTION_START_OP "\t.subsection\t-1"
+
+/* Output assembly directive to move to the beginning of current section. */
+#define ASM_OUTPUT_SECTION_START(FILE) \
+ fprintf ((FILE), "%s\n", ASM_SECTION_START_OP)
+
+#endif
+
+/* Switch into a generic section. */
+#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
+#define TARGET_ASM_SELECT_SECTION default_elf_select_section
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+
+/* Define the strings used for the special svr4 .type and .size directives.
+ These strings generally do not vary from one system running svr4 to
+ another, but if a given system (e.g. m88k running svr) needs to use
+ different pseudo-op names for these, they may be overridden in the
+ file which includes this one. */
+
+#undef TYPE_ASM_OP
+#define TYPE_ASM_OP "\t.type\t"
+#undef SIZE_ASM_OP
+#define SIZE_ASM_OP "\t.size\t"
+
+/* This is how we tell the assembler that a symbol is weak. */
+
+#undef ASM_WEAKEN_LABEL
+#define ASM_WEAKEN_LABEL(FILE, NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+
+/* This is how we tell the assembler that two symbols have the same value. */
+
+#undef ASM_OUTPUT_DEF
+#define ASM_OUTPUT_DEF(FILE, ALIAS, NAME) \
+ do { \
+ assemble_name(FILE, ALIAS); \
+ fputs(" = ", FILE); \
+ assemble_name(FILE, NAME); \
+ fputc('\n', FILE); \
+ } while (0)
+
+#undef ASM_OUTPUT_DEF_FROM_DECLS
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL, TARGET) \
+ do { \
+ const char *alias = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ const char *name = IDENTIFIER_POINTER (TARGET); \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ { \
+ fputc ('$', FILE); \
+ assemble_name (FILE, alias); \
+ fputs ("..ng = $", FILE); \
+ assemble_name (FILE, name); \
+ fputs ("..ng\n", FILE); \
+ } \
+ assemble_name(FILE, alias); \
+ fputs(" = ", FILE); \
+ assemble_name(FILE, name); \
+ fputc('\n', FILE); \
+ } while (0)
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "@%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+
+/* Write the extra assembler code needed to declare an object properly. */
+
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do { \
+ HOST_WIDE_INT size; \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive \
+ && DECL_SIZE (DECL) \
+ && (size = int_size_in_bytes (TREE_TYPE (DECL))) > 0) \
+ { \
+ size_directive_output = 1; \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, size); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+
+#undef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+ do { \
+ const char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ HOST_WIDE_INT size; \
+ if (!flag_inhibit_size_directive \
+ && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output \
+ && (size = int_size_in_bytes (TREE_TYPE (DECL))) > 0) \
+ { \
+ size_directive_output = 1; \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, name, size); \
+ } \
+ } while (0)
+
+/* A table of bytes codes used by the ASM_OUTPUT_ASCII and
+ ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table
+ corresponds to a particular byte value [0..255]. For any
+ given byte value, if the value in the corresponding table
+ position is zero, the given character can be output directly.
+ If the table value is 1, the byte must be output as a \ooo
+ octal escape. If the tables value is anything else, then the
+ byte value should be output as a \ followed by the value
+ in the table. Note that we can use standard UN*X escape
+ sequences for many control characters, but we don't use
+ \a to represent BEL because some svr4 assemblers (e.g. on
+ the i386) don't know about that. Also, we don't use \v
+ since some versions of gas, such as 2.2 did not accept it. */
+
+#undef ESCAPES
+#define ESCAPES \
+"\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1"
+
+/* Some svr4 assemblers have a limit on the number of characters which
+ can appear in the operand of a .string directive. If your assembler
+ has such a limitation, you should define STRING_LIMIT to reflect that
+ limit. Note that at least some svr4 assemblers have a limit on the
+ actual number of bytes in the double-quoted string, and that they
+ count each character in an escape sequence as one byte. Thus, an
+ escape sequence like \377 would count as four bytes.
+
+ If your target assembler doesn't support the .string directive, you
+ should define this to zero. */
+
+#undef STRING_LIMIT
+#define STRING_LIMIT ((unsigned) 256)
+#undef STRING_ASM_OP
+#define STRING_ASM_OP "\t.string\t"
+
+/* GAS is the only Alpha/ELF assembler. */
+#undef TARGET_GAS
+#define TARGET_GAS (1)
+
+/* Provide a STARTFILE_SPEC appropriate for ELF. Here we add the
+ (even more) magical crtbegin.o file which provides part of the
+ support for getting C++ file-scope static object constructed
+ before entering `main'. */
+
+#undef STARTFILE_SPEC
+#ifdef HAVE_LD_PIE
+#define STARTFILE_SPEC \
+ "%{!shared: %{pg|p:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}}\
+ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+#else
+#define STARTFILE_SPEC \
+ "%{!shared: %{pg|p:gcrt1.o%s;:crt1.o%s}}\
+ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+#endif
+
+/* Provide a ENDFILE_SPEC appropriate for ELF. Here we tack on the
+ magical crtend.o file which provides part of the support for
+ getting C++ file-scope static object constructed before entering
+ `main', followed by a normal ELF "finalizer" file, `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
+ %{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
+
+/* We support #pragma. */
+#define HANDLE_SYSV_PRAGMA 1
+
+/* Select a format to encode pointers in exception handling data. CODE
+ is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
+ true if the symbol may be affected by dynamic relocations.
+
+ Since application size is already constrained to <2GB by the form of
+ the ldgp relocation, we can use a 32-bit pc-relative relocation to
+ static data. Dynamic data is accessed indirectly to allow for read
+ only EH sections. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
+ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
+
+/* If defined, a C statement to be executed just prior to the output of
+ assembler code for INSN. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ (alpha_this_literal_sequence_number = 0, \
+ alpha_this_gpdisp_sequence_number = 0)
+extern int alpha_this_literal_sequence_number;
+extern int alpha_this_gpdisp_sequence_number;
+
+/* Since the bits of the _init and _fini function is spread across
+ many object files, each potentially with its own GP, we must assume
+ we need to load our GP. Further, the .init/.fini section can
+ easily be more than 4MB away from the function to call so we can't
+ use bsr. */
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+ asm (SECTION_OP "\n" \
+" br $29,1f\n" \
+"1: ldgp $29,0($29)\n" \
+" unop\n" \
+" jsr $26," USER_LABEL_PREFIX #FUNC "\n" \
+" .align 3\n" \
+" .previous");
+
+/* If we have the capability create headers for efficient EH lookup.
+ As of Jan 2002, only glibc 2.2.4 can actually make use of this, but
+ I imagine that other systems will catch up. In the meantime, it
+ doesn't harm to make sure that the data exists to be used later. */
+#if defined(HAVE_LD_EH_FRAME_HDR)
+#define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
+#endif
diff --git a/gcc-4.2.1/gcc/config/alpha/ev4.md b/gcc-4.2.1/gcc/config/alpha/ev4.md
new file mode 100644
index 000000000..da63c429c
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/ev4.md
@@ -0,0 +1,162 @@
+;; Scheduling description for Alpha EV4.
+;; Copyright (C) 2002, 2004, 2005 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+; On EV4 there are two classes of resources to consider: resources needed
+; to issue, and resources needed to execute. IBUS[01] are in the first
+; category. ABOX, BBOX, EBOX, FBOX, IMUL & FDIV make up the second.
+; (There are a few other register-like resources, but ...)
+
+(define_automaton "ev4_0,ev4_1,ev4_2")
+(define_cpu_unit "ev4_ib0,ev4_ib1,ev4_abox,ev4_bbox" "ev4_0")
+(define_cpu_unit "ev4_ebox,ev4_imul" "ev4_1")
+(define_cpu_unit "ev4_fbox,ev4_fdiv" "ev4_2")
+(define_reservation "ev4_ib01" "ev4_ib0|ev4_ib1")
+
+; Assume type "multi" single issues.
+(define_insn_reservation "ev4_multi" 1
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "multi"))
+ "ev4_ib0+ev4_ib1")
+
+; Loads from L0 completes in three cycles. adjust_cost still factors
+; in user-specified memory latency, so return 1 here.
+(define_insn_reservation "ev4_ld" 1
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "ild,fld,ldsym,ld_l"))
+ "ev4_ib01+ev4_abox")
+
+; Stores can issue before the data (but not address) is ready.
+(define_insn_reservation "ev4_ist" 1
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "ist"))
+ "ev4_ib1+ev4_abox")
+
+; ??? Separate from ev4_ist because store_data_bypass_p can't handle
+; the patterns with multiple sets, like store-conditional.
+(define_insn_reservation "ev4_ist_c" 1
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "st_c"))
+ "ev4_ib1+ev4_abox")
+
+(define_insn_reservation "ev4_fst" 1
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "fst"))
+ "ev4_ib0+ev4_abox")
+
+; Memory barrier blocks ABOX insns until it's acknowledged by the external
+; memory bus. This may be *quite* slow. Setting this to 4 cycles gets
+; about all the benefit without making the DFA too large.
+(define_insn_reservation "ev4_mb" 4
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "mb"))
+ "ev4_ib1+ev4_abox,ev4_abox*3")
+
+; Branches have no delay cost, but do tie up the unit for two cycles.
+(define_insn_reservation "ev4_ibr" 2
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "ibr,jsr"))
+ "ev4_ib1+ev4_bbox,ev4_bbox")
+
+(define_insn_reservation "ev4_callpal" 2
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "callpal"))
+ "ev4_ib1+ev4_bbox,ev4_bbox")
+
+(define_insn_reservation "ev4_fbr" 2
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "fbr"))
+ "ev4_ib0+ev4_bbox,ev4_bbox")
+
+; Arithmetic insns are normally have their results available after
+; two cycles. There are a number of exceptions.
+
+(define_insn_reservation "ev4_iaddlog" 2
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "iadd,ilog"))
+ "ev4_ib0+ev4_ebox")
+
+(define_bypass 1
+ "ev4_iaddlog"
+ "ev4_ibr,ev4_iaddlog,ev4_shiftcm,ev4_icmp,ev4_imulsi,ev4_imuldi")
+
+(define_insn_reservation "ev4_shiftcm" 2
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "shift,icmov"))
+ "ev4_ib0+ev4_ebox")
+
+(define_insn_reservation "ev4_icmp" 2
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "icmp"))
+ "ev4_ib0+ev4_ebox")
+
+(define_bypass 1 "ev4_icmp" "ev4_ibr")
+
+(define_bypass 0
+ "ev4_iaddlog,ev4_shiftcm,ev4_icmp"
+ "ev4_ist"
+ "store_data_bypass_p")
+
+; Multiplies use a non-pipelined imul unit. Also, "no [ebox] insn can
+; be issued exactly three cycles before an integer multiply completes".
+
+(define_insn_reservation "ev4_imulsi" 21
+ (and (eq_attr "tune" "ev4")
+ (and (eq_attr "type" "imul")
+ (eq_attr "opsize" "si")))
+ "ev4_ib0+ev4_imul,ev4_imul*18,ev4_ebox")
+
+(define_bypass 20 "ev4_imulsi" "ev4_ist" "store_data_bypass_p")
+
+(define_insn_reservation "ev4_imuldi" 23
+ (and (eq_attr "tune" "ev4")
+ (and (eq_attr "type" "imul")
+ (eq_attr "opsize" "!si")))
+ "ev4_ib0+ev4_imul,ev4_imul*20,ev4_ebox")
+
+(define_bypass 22 "ev4_imuldi" "ev4_ist" "store_data_bypass_p")
+
+; Most FP insns have a 6 cycle latency, but with a 4 cycle bypass back in.
+(define_insn_reservation "ev4_fpop" 6
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "fadd,fmul,fcpys,fcmov"))
+ "ev4_ib1+ev4_fbox")
+
+(define_bypass 4 "ev4_fpop" "ev4_fpop")
+
+; The floating point divider is not pipelined. Also, "no FPOP insn can be
+; issued exactly five or exactly six cycles before an fdiv insn completes".
+
+(define_insn_reservation "ev4_fdivsf" 34
+ (and (eq_attr "tune" "ev4")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "si")))
+ "ev4_ib1+ev4_fdiv,ev4_fdiv*28,ev4_fdiv+ev4_fbox,ev4_fbox")
+
+(define_insn_reservation "ev4_fdivdf" 63
+ (and (eq_attr "tune" "ev4")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "di")))
+ "ev4_ib1+ev4_fdiv,ev4_fdiv*57,ev4_fdiv+ev4_fbox,ev4_fbox")
+
+; Traps don't consume or produce data.
+(define_insn_reservation "ev4_misc" 1
+ (and (eq_attr "tune" "ev4")
+ (eq_attr "type" "misc"))
+ "ev4_ib1")
diff --git a/gcc-4.2.1/gcc/config/alpha/ev5.md b/gcc-4.2.1/gcc/config/alpha/ev5.md
new file mode 100644
index 000000000..4207d020f
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/ev5.md
@@ -0,0 +1,195 @@
+;; Scheduling description for Alpha EV5.
+;; Copyright (C) 2002, 2004, 2005 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; EV5 has two asymmetric integer units, E0 and E1, plus separate
+;; FP add and multiply units.
+
+(define_automaton "ev5_0,ev5_1")
+(define_cpu_unit "ev5_e0,ev5_e1,ev5_fa,ev5_fm" "ev5_0")
+(define_reservation "ev5_e01" "ev5_e0|ev5_e1")
+(define_reservation "ev5_fam" "ev5_fa|ev5_fm")
+(define_cpu_unit "ev5_imul" "ev5_0")
+(define_cpu_unit "ev5_fdiv" "ev5_1")
+
+; Assume type "multi" single issues.
+(define_insn_reservation "ev5_multi" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "multi"))
+ "ev5_e0+ev5_e1+ev5_fa+ev5_fm")
+
+; Stores can only issue to E0, and may not issue with loads.
+; Model this with some fake units.
+
+(define_cpu_unit "ev5_l0,ev5_l1,ev5_st" "ev5_0")
+(define_reservation "ev5_ld" "ev5_l0|ev5_l1")
+(exclusion_set "ev5_l0,ev5_l1" "ev5_st")
+
+(define_insn_reservation "ev5_st" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "ist,fst,st_c,mb"))
+ "ev5_e0+ev5_st")
+
+; Loads from L0 complete in two cycles. adjust_cost still factors
+; in user-specified memory latency, so return 1 here.
+(define_insn_reservation "ev5_ld" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "ild,fld,ldsym"))
+ "ev5_e01+ev5_ld")
+
+(define_insn_reservation "ev5_ld_l" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "ld_l"))
+ "ev5_e0+ev5_ld")
+
+; Integer branches slot only to E1.
+(define_insn_reservation "ev5_ibr" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "ibr"))
+ "ev5_e1")
+
+(define_insn_reservation "ev5_callpal" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "callpal"))
+ "ev5_e1")
+
+(define_insn_reservation "ev5_jsr" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "jsr"))
+ "ev5_e1")
+
+(define_insn_reservation "ev5_shift" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "shift"))
+ "ev5_e0")
+
+(define_insn_reservation "ev5_mvi" 2
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "mvi"))
+ "ev5_e0")
+
+(define_insn_reservation "ev5_cmov" 2
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "icmov"))
+ "ev5_e01")
+
+(define_insn_reservation "ev5_iadd" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "iadd"))
+ "ev5_e01")
+
+(define_insn_reservation "ev5_ilogcmp" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "ilog,icmp"))
+ "ev5_e01")
+
+; Conditional move and branch can issue the same cycle as the test.
+(define_bypass 0 "ev5_ilogcmp" "ev5_ibr,ev5_cmov" "if_test_bypass_p")
+
+; Multiplies use a non-pipelined imul unit. Also, "no insn can be issued
+; to E0 exactly two cycles before an integer multiply completes".
+
+(define_insn_reservation "ev5_imull" 8
+ (and (eq_attr "tune" "ev5")
+ (and (eq_attr "type" "imul")
+ (eq_attr "opsize" "si")))
+ "ev5_e0+ev5_imul,ev5_imul*3,nothing,ev5_e0")
+
+(define_insn_reservation "ev5_imulq" 12
+ (and (eq_attr "tune" "ev5")
+ (and (eq_attr "type" "imul")
+ (eq_attr "opsize" "di")))
+ "ev5_e0+ev5_imul,ev5_imul*7,nothing,ev5_e0")
+
+(define_insn_reservation "ev5_imulh" 14
+ (and (eq_attr "tune" "ev5")
+ (and (eq_attr "type" "imul")
+ (eq_attr "opsize" "udi")))
+ "ev5_e0+ev5_imul,ev5_imul*7,nothing*3,ev5_e0")
+
+; The multiplier is unable to receive data from Ebox bypass paths. The
+; instruction issues at the expected time, but its latency is increased
+; by the time it takes for the input data to become available to the
+; multiplier. For example, an IMULL instruction issued one cycle later
+; than an ADDL instruction, which produced one of its operands, has a
+; latency of 10 (8 + 2). If the IMULL instruction is issued two cycles
+; later than the ADDL instruction, the latency is 9 (8 + 1).
+;
+; Model this instead with increased latency on the input instruction.
+
+(define_bypass 3
+ "ev5_ld,ev5_ld_l,ev5_shift,ev5_mvi,ev5_cmov,ev5_iadd,ev5_ilogcmp"
+ "ev5_imull,ev5_imulq,ev5_imulh")
+
+(define_bypass 9 "ev5_imull" "ev5_imull,ev5_imulq,ev5_imulh")
+(define_bypass 13 "ev5_imulq" "ev5_imull,ev5_imulq,ev5_imulh")
+(define_bypass 15 "ev5_imulh" "ev5_imull,ev5_imulq,ev5_imulh")
+
+; Similarly for the FPU we have two asymmetric units.
+
+(define_insn_reservation "ev5_fadd" 4
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "fadd,fcmov"))
+ "ev5_fa")
+
+(define_insn_reservation "ev5_fbr" 1
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "fbr"))
+ "ev5_fa")
+
+(define_insn_reservation "ev5_fcpys" 4
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "fcpys"))
+ "ev5_fam")
+
+(define_insn_reservation "ev5_fmul" 4
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "fmul"))
+ "ev5_fm")
+
+; The floating point divider is not pipelined. Also, "no insn can be issued
+; to FA exactly five before an fdiv insn completes".
+;
+; ??? Do not model this late reservation due to the enormously increased
+; size of the resulting DFA.
+;
+; ??? Putting ev5_fa and ev5_fdiv alone into the same automata produces
+; a DFA of acceptable size, but putting ev5_fm and ev5_fa into separate
+; automata produces incorrect results for insns that can choose one or
+; the other, i.e. ev5_fcpys.
+
+(define_insn_reservation "ev5_fdivsf" 15
+ (and (eq_attr "tune" "ev5")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "si")))
+ ; "ev5_fa+ev5_fdiv,ev5_fdiv*9,ev5_fa+ev5_fdiv,ev5_fdiv*4"
+ "ev5_fa+ev5_fdiv,ev5_fdiv*14")
+
+(define_insn_reservation "ev5_fdivdf" 22
+ (and (eq_attr "tune" "ev5")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "di")))
+ ; "ev5_fa+ev5_fdiv,ev5_fdiv*17,ev5_fa+ev5_fdiv,ev5_fdiv*4"
+ "ev5_fa+ev5_fdiv,ev5_fdiv*21")
+
+; Traps don't consume or produce data; rpcc is latency 2 if we ever add it.
+(define_insn_reservation "ev5_misc" 2
+ (and (eq_attr "tune" "ev5")
+ (eq_attr "type" "misc"))
+ "ev5_e0")
diff --git a/gcc-4.2.1/gcc/config/alpha/ev6.md b/gcc-4.2.1/gcc/config/alpha/ev6.md
new file mode 100644
index 000000000..2b04e5456
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/ev6.md
@@ -0,0 +1,178 @@
+;; Scheduling description for Alpha EV6.
+;; Copyright (C) 2002, 2004, 2005 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+; EV6 can issue 4 insns per clock. It's out-of-order, so this isn't
+; expected to help over-much, but a precise description can be important
+; for software pipelining.
+;
+; EV6 has two symmetric pairs ("clusters") of two asymmetric integer
+; units ("upper" and "lower"), yielding pipe names U0, U1, L0, L1.
+;
+; ??? The clusters have independent register files that are re-synced
+; every cycle. Thus there is one additional cycle of latency between
+; insns issued on different clusters. Possibly model that by duplicating
+; all EBOX insn_reservations that can issue to either cluster, increasing
+; all latencies by one, and adding bypasses within the cluster.
+;
+; ??? In addition, instruction order affects cluster issue.
+
+(define_automaton "ev6_0,ev6_1")
+(define_cpu_unit "ev6_u0,ev6_u1,ev6_l0,ev6_l1" "ev6_0")
+(define_reservation "ev6_u" "ev6_u0|ev6_u1")
+(define_reservation "ev6_l" "ev6_l0|ev6_l1")
+(define_reservation "ev6_ebox" "ev6_u|ev6_l")
+
+(define_cpu_unit "ev6_fa" "ev6_1")
+(define_cpu_unit "ev6_fm,ev6_fst0,ev6_fst1" "ev6_0")
+(define_reservation "ev6_fst" "ev6_fst0|ev6_fst1")
+
+; Assume type "multi" single issues.
+(define_insn_reservation "ev6_multi" 1
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "multi"))
+ "ev6_u0+ev6_u1+ev6_l0+ev6_l1+ev6_fa+ev6_fm+ev6_fst0+ev6_fst1")
+
+; Integer loads take at least 3 clocks, and only issue to lower units.
+; adjust_cost still factors in user-specified memory latency, so return 1 here.
+(define_insn_reservation "ev6_ild" 1
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "ild,ldsym,ld_l"))
+ "ev6_l")
+
+(define_insn_reservation "ev6_ist" 1
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "ist,st_c"))
+ "ev6_l")
+
+(define_insn_reservation "ev6_mb" 1
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "mb"))
+ "ev6_l1")
+
+; FP loads take at least 4 clocks. adjust_cost still factors
+; in user-specified memory latency, so return 2 here.
+(define_insn_reservation "ev6_fld" 2
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "fld"))
+ "ev6_l")
+
+; The FPU communicates with memory and the integer register file
+; via two fp store units. We need a slot in the fst immediately, and
+; a slot in LOW after the operand data is ready. At which point the
+; data may be moved either to the store queue or the integer register
+; file and the insn retired.
+
+(define_insn_reservation "ev6_fst" 3
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "fst"))
+ "ev6_fst,nothing,ev6_l")
+
+; Arithmetic goes anywhere.
+(define_insn_reservation "ev6_arith" 1
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "iadd,ilog,icmp"))
+ "ev6_ebox")
+
+; Motion video insns also issue only to U0, and take three ticks.
+(define_insn_reservation "ev6_mvi" 3
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "mvi"))
+ "ev6_u0")
+
+; Shifts issue to upper units.
+(define_insn_reservation "ev6_shift" 1
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "shift"))
+ "ev6_u")
+
+; Multiplies issue only to U1, and all take 7 ticks.
+(define_insn_reservation "ev6_imul" 7
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "imul"))
+ "ev6_u1")
+
+; Conditional moves decompose into two independent primitives, each taking
+; one cycle. Since ev6 is out-of-order, we can't see anything but two cycles.
+(define_insn_reservation "ev6_icmov" 2
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "icmov"))
+ "ev6_ebox,ev6_ebox")
+
+; Integer branches issue to upper units
+(define_insn_reservation "ev6_ibr" 1
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "ibr,callpal"))
+ "ev6_u")
+
+; Calls only issue to L0.
+(define_insn_reservation "ev6_jsr" 1
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "jsr"))
+ "ev6_l0")
+
+; Ftoi/itof only issue to lower pipes.
+(define_insn_reservation "ev6_itof" 3
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "itof"))
+ "ev6_l")
+
+(define_insn_reservation "ev6_ftoi" 3
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "ftoi"))
+ "ev6_fst,nothing,ev6_l")
+
+(define_insn_reservation "ev6_fmul" 4
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "fmul"))
+ "ev6_fm")
+
+(define_insn_reservation "ev6_fadd" 4
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "fadd,fcpys,fbr"))
+ "ev6_fa")
+
+(define_insn_reservation "ev6_fcmov" 8
+ (and (eq_attr "tune" "ev6")
+ (eq_attr "type" "fcmov"))
+ "ev6_fa,nothing*3,ev6_fa")
+
+(define_insn_reservation "ev6_fdivsf" 12
+ (and (eq_attr "tune" "ev6")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "si")))
+ "ev6_fa*9")
+
+(define_insn_reservation "ev6_fdivdf" 15
+ (and (eq_attr "tune" "ev6")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "di")))
+ "ev6_fa*12")
+
+(define_insn_reservation "ev6_sqrtsf" 18
+ (and (eq_attr "tune" "ev6")
+ (and (eq_attr "type" "fsqrt")
+ (eq_attr "opsize" "si")))
+ "ev6_fa*15")
+
+(define_insn_reservation "ev6_sqrtdf" 33
+ (and (eq_attr "tune" "ev6")
+ (and (eq_attr "type" "fsqrt")
+ (eq_attr "opsize" "di")))
+ "ev6_fa*30")
diff --git a/gcc-4.2.1/gcc/config/alpha/freebsd.h b/gcc-4.2.1/gcc/config/alpha/freebsd.h
new file mode 100644
index 000000000..ca49a8bb0
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/freebsd.h
@@ -0,0 +1,83 @@
+/* Definitions for DEC Alpha/AXP running FreeBSD using the ELF format
+ Copyright (C) 2000, 2002, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by David E. O'Brien <obrien@FreeBSD.org> and BSDi.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "fbsd_dynamic_linker", FBSD_DYNAMIC_LINKER }
+
+/* Provide a CPP_SPEC appropriate for FreeBSD/alpha. Besides the
+ dealing with the GCC option `-posix', we must deal with the Alpha's
+ FP issues. */
+
+#undef CPP_SPEC
+#define CPP_SPEC "%(cpp_subtarget) %{posix:-D_POSIX_SOURCE}"
+
+#define LINK_SPEC "%{G*} %{relax:-relax} \
+ %{p:%nconsider using `-pg' instead of `-p' with gprof(1)} \
+ %{Wl,*:%*} \
+ %{assert*} %{R*} %{rpath*} %{defsym*} \
+ %{shared:-Bshareable %{h*} %{soname*}} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker %(fbsd_dynamic_linker) }} \
+ %{static:-Bstatic}} \
+ %{symbolic:-Bsymbolic}"
+
+
+/************************[ Target stuff ]***********************************/
+
+/* Define the actual types of some ANSI-mandated types.
+ Needs to agree with <machine/ansi.h>. GCC defaults come from c-decl.c,
+ c-common.c, and config/<arch>/<arch>.h. */
+
+/* alpha.h gets this wrong for FreeBSD. We use the GCC defaults instead. */
+#undef WCHAR_TYPE
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (FreeBSD/alpha ELF)");
+
+#define TARGET_ELF 1
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_FPREGS | MASK_GAS)
+
+#undef HAS_INIT_SECTION
+
+/* Show that we need a GP when profiling. */
+#undef TARGET_PROFILING_NEEDS_GP
+#define TARGET_PROFILING_NEEDS_GP 1
+
+/* This is the char to use for continuation (in case we need to turn
+ continuation back on). */
+
+#undef DBX_CONTIN_CHAR
+#define DBX_CONTIN_CHAR '?'
+
+/* Don't default to pcc-struct-return, we want to retain compatibility with
+ older FreeBSD releases AND pcc-struct-return may not be reentrant. */
+
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 0
diff --git a/gcc-4.2.1/gcc/config/alpha/gnu.h b/gcc-4.2.1/gcc/config/alpha/gnu.h
new file mode 100644
index 000000000..b9bfceafd
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/gnu.h
@@ -0,0 +1,26 @@
+/* Configuration for an Alpha running GNU with ELF as the target machine. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Alpha GNU)");
+
+#undef TARGET_OS_CPP_BUILTINS /* config.gcc includes alpha/linux.h. */
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ HURD_TARGET_OS_CPP_BUILTINS(); \
+ builtin_define ("_LONGLONG"); \
+ } while (0)
+
+#undef ELF_DYNAMIC_LINKER
+#define ELF_DYNAMIC_LINKER "/lib/ld.so"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: \
+ %{!static: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}} \
+ %{static:crt0.o%s}} \
+ crti.o%s \
+ %{!static:%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}}"
+
+/* FIXME: Is a Hurd-specific fallback mechanism necessary? */
+#undef MD_UNWIND_SUPPORT
diff --git a/gcc-4.2.1/gcc/config/alpha/lib1funcs.asm b/gcc-4.2.1/gcc/config/alpha/lib1funcs.asm
new file mode 100644
index 000000000..41a551057
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/lib1funcs.asm
@@ -0,0 +1,320 @@
+/* DEC Alpha division and remainder support.
+ Copyright (C) 1994, 1999 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* This had to be written in assembler because the division functions
+ use a non-standard calling convention.
+
+ This file provides an implementation of __divqu, __divq, __divlu,
+ __divl, __remqu, __remq, __remlu and __reml. CPP macros control
+ the exact operation.
+
+ Operation performed: $27 := $24 o $25, clobber $28, return address to
+ caller in $23, where o one of the operations.
+
+ The following macros need to be defined:
+
+ SIZE, the number of bits, 32 or 64.
+
+ TYPE, either UNSIGNED or SIGNED
+
+ OPERATION, either DIVISION or REMAINDER
+
+ SPECIAL_CALLING_CONVENTION, 0 or 1. It is useful for debugging to
+ define this to 0. That removes the `__' prefix to make the function
+ name not collide with the existing libc.a names, and uses the
+ standard Alpha procedure calling convention.
+*/
+
+#ifndef SPECIAL_CALLING_CONVENTION
+#define SPECIAL_CALLING_CONVENTION 1
+#endif
+
+#ifdef L_divl
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __divl
+#else
+#define FUNCTION_NAME divl
+#endif
+#define SIZE 32
+#define TYPE SIGNED
+#define OPERATION DIVISION
+#endif
+
+#ifdef L_divlu
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __divlu
+#else
+#define FUNCTION_NAME divlu
+#endif
+#define SIZE 32
+#define TYPE UNSIGNED
+#define OPERATION DIVISION
+#endif
+
+#ifdef L_divq
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __divq
+#else
+#define FUNCTION_NAME divq
+#endif
+#define SIZE 64
+#define TYPE SIGNED
+#define OPERATION DIVISION
+#endif
+
+#ifdef L_divqu
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __divqu
+#else
+#define FUNCTION_NAME divqu
+#endif
+#define SIZE 64
+#define TYPE UNSIGNED
+#define OPERATION DIVISION
+#endif
+
+#ifdef L_reml
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __reml
+#else
+#define FUNCTION_NAME reml
+#endif
+#define SIZE 32
+#define TYPE SIGNED
+#define OPERATION REMAINDER
+#endif
+
+#ifdef L_remlu
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __remlu
+#else
+#define FUNCTION_NAME remlu
+#endif
+#define SIZE 32
+#define TYPE UNSIGNED
+#define OPERATION REMAINDER
+#endif
+
+#ifdef L_remq
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __remq
+#else
+#define FUNCTION_NAME remq
+#endif
+#define SIZE 64
+#define TYPE SIGNED
+#define OPERATION REMAINDER
+#endif
+
+#ifdef L_remqu
+#if SPECIAL_CALLING_CONVENTION
+#define FUNCTION_NAME __remqu
+#else
+#define FUNCTION_NAME remqu
+#endif
+#define SIZE 64
+#define TYPE UNSIGNED
+#define OPERATION REMAINDER
+#endif
+
+#define tmp0 $3
+#define tmp1 $28
+#define cnt $1
+#define result_sign $2
+
+#if SPECIAL_CALLING_CONVENTION
+#define N $24
+#define D $25
+#define Q RETREG
+#define RETREG $27
+#else
+#define N $16
+#define D $17
+#define Q RETREG
+#define RETREG $0
+#endif
+
+/* Misc symbols to make alpha assembler easier to read. */
+#define zero $31
+#define sp $30
+
+/* Symbols to make interface nicer. */
+#define UNSIGNED 0
+#define SIGNED 1
+#define DIVISION 0
+#define REMAINDER 1
+
+ .set noreorder
+ .set noat
+.text
+ .align 3
+ .globl FUNCTION_NAME
+ .ent FUNCTION_NAME
+FUNCTION_NAME:
+
+ .frame $30,0,$26,0
+ .prologue 0
+
+/* Under the special calling convention, we have to preserve all register
+ values but $23 and $28. */
+#if SPECIAL_CALLING_CONVENTION
+ lda sp,-64(sp)
+#if OPERATION == DIVISION
+ stq N,0(sp)
+#endif
+ stq D,8(sp)
+ stq cnt,16(sp)
+ stq result_sign,24(sp)
+ stq tmp0,32(sp)
+#endif
+
+/* If we are computing the remainder, move N to the register that is used
+ for the return value, and redefine what register is used for N. */
+#if OPERATION == REMAINDER
+ bis N,N,RETREG
+#undef N
+#define N RETREG
+#endif
+
+/* Perform conversion from 32 bit types to 64 bit types. */
+#if SIZE == 32
+#if TYPE == SIGNED
+ /* If there are problems with the signed case, add these instructions.
+ The caller should already have done this.
+ addl N,0,N # sign extend N
+ addl D,0,D # sign extend D
+ */
+#else /* UNSIGNED */
+ zap N,0xf0,N # zero extend N (caller required to sign extend)
+ zap D,0xf0,D # zero extend D
+#endif
+#endif
+
+/* Check for divide by zero. */
+ bne D,$34
+ lda $16,-2(zero)
+ call_pal 0xaa
+$34:
+
+#if TYPE == SIGNED
+#if OPERATION == DIVISION
+ xor N,D,result_sign
+#else
+ bis N,N,result_sign
+#endif
+/* Get the absolute values of N and D. */
+ subq zero,N,tmp0
+ cmovlt N,tmp0,N
+ subq zero,D,tmp0
+ cmovlt D,tmp0,D
+#endif
+
+/* Compute CNT = ceil(log2(N)) - ceil(log2(D)). This is the number of
+ divide iterations we will have to perform. Should you wish to optimize
+ this, check a few bits at a time, preferably using zap/zapnot. Be
+ careful though, this code runs fast fro the most common cases, when the
+ quotient is small. */
+ bge N,$35
+ bis zero,1,cnt
+ blt D,$40
+ .align 3
+$39: addq D,D,D
+ addl cnt,1,cnt
+ bge D,$39
+ br zero,$40
+$35: cmpult N,D,tmp0
+ bis zero,zero,cnt
+ bne tmp0,$42
+ .align 3
+$44: addq D,D,D
+ cmpult N,D,tmp0
+ addl cnt,1,cnt
+ beq tmp0,$44
+$42: srl D,1,D
+$40:
+ subl cnt,1,cnt
+
+
+/* Actual divide. Could be optimized with unrolling. */
+#if OPERATION == DIVISION
+ bis zero,zero,Q
+#endif
+ blt cnt,$46
+ .align 3
+$49: cmpule D,N,tmp1
+ subq N,D,tmp0
+ srl D,1,D
+ subl cnt,1,cnt
+ cmovne tmp1,tmp0,N
+#if OPERATION == DIVISION
+ addq Q,Q,Q
+ bis Q,tmp1,Q
+#endif
+ bge cnt,$49
+$46:
+
+
+/* The result is now in RETREG. NOTE! It was written to RETREG using
+ either N or Q as a synonym! */
+
+
+/* Change the sign of the result as needed. */
+#if TYPE == SIGNED
+ subq zero,RETREG,tmp0
+ cmovlt result_sign,tmp0,RETREG
+#endif
+
+
+/* Restore clobbered registers. */
+#if SPECIAL_CALLING_CONVENTION
+#if OPERATION == DIVISION
+ ldq N,0(sp)
+#endif
+ ldq D,8(sp)
+ ldq cnt,16(sp)
+ ldq result_sign,24(sp)
+ ldq tmp0,32(sp)
+
+ lda sp,64(sp)
+#endif
+
+
+/* Sign extend an *unsigned* 32 bit result, as required by the Alpha
+ conventions. */
+#if TYPE == UNSIGNED && SIZE == 32
+ /* This could be avoided by adding some CPP hair to the divide loop.
+ It is probably not worth the added complexity. */
+ addl RETREG,0,RETREG
+#endif
+
+
+#if SPECIAL_CALLING_CONVENTION
+ ret zero,($23),1
+#else
+ ret zero,($26),1
+#endif
+ .end FUNCTION_NAME
diff --git a/gcc-4.2.1/gcc/config/alpha/libgcc-alpha-ldbl.ver b/gcc-4.2.1/gcc/config/alpha/libgcc-alpha-ldbl.ver
new file mode 100644
index 000000000..f8fc4a9fa
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/libgcc-alpha-ldbl.ver
@@ -0,0 +1,32 @@
+%ifdef __LONG_DOUBLE_128__
+
+# long double 128 bit support in libgcc_s.so.1 is only available
+# when configured with --with-long-double-128. Make sure all the
+# symbols are available at @@GCC_LDBL_* versions to make it clear
+# there is a configurable symbol set.
+
+%exclude {
+ __fixtfdi
+ __fixunstfdi
+ __floatditf
+
+ __divtc3
+ __multc3
+ __powitf2
+}
+
+%inherit GCC_LDBL_3.0 GCC_3.0
+GCC_LDBL_3.0 {
+ __fixtfdi
+ __fixunstfdi
+ __floatditf
+}
+
+%inherit GCC_LDBL_4.0.0 GCC_4.0.0
+GCC_LDBL_4.0.0 {
+ __divtc3
+ __multc3
+ __powitf2
+}
+
+%endif
diff --git a/gcc-4.2.1/gcc/config/alpha/linux-elf.h b/gcc-4.2.1/gcc/config/alpha/linux-elf.h
new file mode 100644
index 000000000..565cd7ef9
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/linux-elf.h
@@ -0,0 +1,56 @@
+/* Definitions of target machine for GNU compiler
+ for Alpha Linux-based GNU systems using ELF.
+ Copyright (C) 1996, 1997, 1998, 2001, 2002, 2003, 2006
+ Free Software Foundation, Inc.
+ Contributed by Richard Henderson.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Alpha GNU/Linux for ELF)");
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+{ "elf_dynamic_linker", ELF_DYNAMIC_LINKER },
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2"
+#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0"
+#if UCLIBC_DEFAULT
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:%{muclibc:%e-mglibc and -muclibc used together}" G ";:" U "}"
+#else
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{muclibc:%{mglibc:%e-mglibc and -muclibc used together}" U ";:" G "}"
+#endif
+#define LINUX_DYNAMIC_LINKER \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER, UCLIBC_DYNAMIC_LINKER)
+
+#define ELF_DYNAMIC_LINKER LINUX_DYNAMIC_LINKER
+
+#define LINK_SPEC "-m elf64alpha %{G*} %{relax:-relax} \
+ %{O*:-O3} %{!O*:-O1} \
+ %{shared:-shared} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker %(elf_dynamic_linker)}} \
+ %{static:-static}}"
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+"%{pthread:-lpthread} %{shared:-lc}%{!shared:%{profile:-lc_p}%{!profile:-lc}} "
+
+#define TARGET_ASM_FILE_END file_end_indicate_exec_stack
diff --git a/gcc-4.2.1/gcc/config/alpha/linux-unwind.h b/gcc-4.2.1/gcc/config/alpha/linux-unwind.h
new file mode 100644
index 000000000..1f1c2af72
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/linux-unwind.h
@@ -0,0 +1,82 @@
+/* DWARF2 EH unwinding support for Alpha Linux.
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Do code reading to identify a signal frame, and set the frame
+ state data appropriately. See unwind-dw2.c for the structs. */
+
+#include <signal.h>
+#include <sys/ucontext.h>
+
+#define MD_FALLBACK_FRAME_STATE_FOR alpha_fallback_frame_state
+
+static _Unwind_Reason_Code
+alpha_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ unsigned int *pc = context->ra;
+ struct sigcontext *sc;
+ long new_cfa, i;
+
+ if (pc[0] != 0x47fe0410 /* mov $30,$16 */
+ || pc[2] != 0x00000083 /* callsys */)
+ return _URC_END_OF_STACK;
+ if (context->cfa == 0)
+ return _URC_END_OF_STACK;
+ if (pc[1] == 0x201f0067) /* lda $0,NR_sigreturn */
+ sc = context->cfa;
+ else if (pc[1] == 0x201f015f) /* lda $0,NR_rt_sigreturn */
+ {
+ struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+ } *rt_ = context->cfa;
+ sc = &rt_->uc.uc_mcontext;
+ }
+ else
+ return _URC_END_OF_STACK;
+ new_cfa = sc->sc_regs[30];
+ fs->cfa_how = CFA_REG_OFFSET;
+ fs->cfa_reg = 30;
+ fs->cfa_offset = new_cfa - (long) context->cfa;
+ for (i = 0; i < 30; ++i)
+ {
+ fs->regs.reg[i].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i].loc.offset
+ = (long)&sc->sc_regs[i] - new_cfa;
+ }
+ for (i = 0; i < 31; ++i)
+ {
+ fs->regs.reg[i+32].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i+32].loc.offset
+ = (long)&sc->sc_fpregs[i] - new_cfa;
+ }
+ fs->regs.reg[64].how = REG_SAVED_OFFSET;
+ fs->regs.reg[64].loc.offset = (long)&sc->sc_pc - new_cfa;
+ fs->retaddr_column = 64;
+ return _URC_NO_REASON;
+}
diff --git a/gcc-4.2.1/gcc/config/alpha/linux.h b/gcc-4.2.1/gcc/config/alpha/linux.h
new file mode 100644
index 000000000..1be120f9e
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/linux.h
@@ -0,0 +1,85 @@
+/* Definitions of target machine for GNU compiler,
+ for Alpha Linux-based GNU systems.
+ Copyright (C) 1996, 1997, 1998, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+ Contributed by Richard Henderson.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_FPREGS | MASK_GAS)
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define ("__gnu_linux__"); \
+ builtin_define ("_LONGLONG"); \
+ builtin_define_std ("linux"); \
+ builtin_define_std ("unix"); \
+ builtin_assert ("system=linux"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=posix"); \
+ /* The GNU C++ standard library requires this. */ \
+ if (c_dialect_cxx ()) \
+ builtin_define ("_GNU_SOURCE"); \
+ } while (0)
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{pthread:-lpthread} \
+ %{shared:-lc} \
+ %{!shared: %{profile:-lc_p}%{!profile:-lc}}"
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
+
+/* Show that we need a GP when profiling. */
+#undef TARGET_PROFILING_NEEDS_GP
+#define TARGET_PROFILING_NEEDS_GP 1
+
+/* Don't care about faults in the prologue. */
+#undef TARGET_CAN_FAULT_IN_PROLOGUE
+#define TARGET_CAN_FAULT_IN_PROLOGUE 1
+
+/* OS fixes up EV5 data fault on prefetch. */
+#undef TARGET_FIXUP_EV5_PREFETCH
+#define TARGET_FIXUP_EV5_PREFETCH 1
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+/* Define this so that all GNU/Linux targets handle the same pragmas. */
+#define HANDLE_PRAGMA_PACK_PUSH_POP
+
+/* Determine whether the entire c99 runtime is present in the
+ runtime library. */
+#define TARGET_C99_FUNCTIONS (OPTION_GLIBC)
+
+#define TARGET_POSIX_IO
+
+#define LINK_GCC_C_SEQUENCE_SPEC \
+ "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
+
+/* Use --as-needed -lgcc_s for eh support. */
+#ifdef HAVE_LD_AS_NEEDED
+#define USE_LD_AS_NEEDED 1
+#endif
+
+#define MD_UNWIND_SUPPORT "config/alpha/linux-unwind.h"
+
+/* Define if long doubles should be mangled as 'g'. */
+#define TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
diff --git a/gcc-4.2.1/gcc/config/alpha/netbsd.h b/gcc-4.2.1/gcc/config/alpha/netbsd.h
new file mode 100644
index 000000000..614b92b7a
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/netbsd.h
@@ -0,0 +1,83 @@
+/* Definitions of target machine for GNU compiler,
+ for Alpha NetBSD systems.
+ Copyright (C) 1998, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_FPREGS | MASK_GAS)
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ } while (0)
+
+
+/* NetBSD doesn't use the LANGUAGE* built-ins. */
+#undef SUBTARGET_LANGUAGE_CPP_BUILTINS
+#define SUBTARGET_LANGUAGE_CPP_BUILTINS() /* nothing */
+
+
+/* Show that we need a GP when profiling. */
+#undef TARGET_PROFILING_NEEDS_GP
+#define TARGET_PROFILING_NEEDS_GP 1
+
+
+/* Provide a CPP_SUBTARGET_SPEC appropriate for NetBSD/alpha. We use
+ this to pull in CPP specs that all NetBSD configurations need. */
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC NETBSD_CPP_SPEC
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "netbsd_link_spec", NETBSD_LINK_SPEC_ELF }, \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT }, \
+ { "netbsd_endfile_spec", NETBSD_ENDFILE_SPEC },
+
+
+/* Provide a LINK_SPEC appropriate for a NetBSD/alpha ELF target. */
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{G*} %{relax:-relax} \
+ %{O*:-O3} %{!O*:-O1} \
+ %(netbsd_link_spec)"
+
+#define NETBSD_ENTRY_POINT "__start"
+
+
+/* Provide an ENDFILE_SPEC appropriate for NetBSD/alpha ELF. Here we
+ add crtend.o, which provides part of the support for getting
+ C++ file-scope static objects deconstructed after exiting "main".
+
+ We also need to handle the GCC option `-ffast-math'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{ffast-math|funsafe-math-optimizations:crtfm%O%s} \
+ %(netbsd_endfile_spec)"
+
+
+/* Attempt to enable execute permissions on the stack. */
+
+#define ENABLE_EXECUTE_STACK NETBSD_ENABLE_EXECUTE_STACK
+
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (NetBSD/alpha ELF)");
diff --git a/gcc-4.2.1/gcc/config/alpha/openbsd.h b/gcc-4.2.1/gcc/config/alpha/openbsd.h
new file mode 100644
index 000000000..f31467d6a
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/openbsd.h
@@ -0,0 +1,99 @@
+/* Configuration file for an alpha OpenBSD target.
+ Copyright (C) 1999, 2003 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* We settle for little endian for now. */
+#define TARGET_ENDIAN_DEFAULT 0
+
+/* Controlling the compilation driver. */
+
+/* alpha needs __start. */
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{!nostdlib:%{!r*:%{!e*:-e __start}}} -dc -dp %{assert*}"
+
+/* run-time target specifications */
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define ("__OpenBSD__"); \
+ builtin_define ("__ANSI_COMPAT"); \
+ builtin_define ("__unix__"); \
+ builtin_assert ("system=unix"); \
+ } while (0)
+
+/* Layout of source language data types. */
+
+/* This must agree with <machine/ansi.h> */
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#define LOCAL_LABEL_PREFIX "."
+
+/* We don't have an init section yet. */
+#undef HAS_INIT_SECTION
+
+/* collect2 support (assembler format: macros for initialization). */
+
+/* Don't tell collect2 we use COFF as we don't have (yet ?) a dynamic ld
+ library with the proper functions to handle this -> collect2 will
+ default to using nm. */
+#undef OBJECT_FORMAT_COFF
+#undef EXTENDED_COFF
+
+/* Assembler format: exception region output. */
+
+/* All configurations that don't use elf must be explicit about not using
+ dwarf unwind information. */
+#ifdef INCOMING_RETURN_ADDR_RTX
+#undef DWARF2_UNWIND_INFO
+#define DWARF2_UNWIND_INFO 0
+#endif
+
+/* Assembler format: label output. */
+
+/* alpha ecoff supports only weak aliases. */
+#undef ASM_WEAKEN_LABEL
+#define ASM_WEAKEN_LABEL(FILE,NAME) ASM_OUTPUT_WEAK_ALIAS (FILE,NAME,0)
+
+#define ASM_OUTPUT_WEAK_ALIAS(FILE,NAME,VALUE) \
+ do { \
+ fputs ("\t.weakext\t", FILE); \
+ assemble_name (FILE, NAME); \
+ if (VALUE) \
+ { \
+ fputs (" , ", FILE); \
+ assemble_name (FILE, VALUE); \
+ } \
+ fputc ('\n', FILE); \
+ } while (0)
+
+
diff --git a/gcc-4.2.1/gcc/config/alpha/osf.h b/gcc-4.2.1/gcc/config/alpha/osf.h
new file mode 100644
index 000000000..d798840c0
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/osf.h
@@ -0,0 +1,213 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha on OSF/1.
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 2001, 2002, 2003,
+ 2004 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* As of OSF 4.0, as can subtract adjacent labels. */
+
+#undef TARGET_AS_CAN_SUBTRACT_LABELS
+#define TARGET_AS_CAN_SUBTRACT_LABELS 1
+
+/* The GEM libraries for X_float are present, though not used by C. */
+
+#undef TARGET_HAS_XFLOATING_LIBS
+#define TARGET_HAS_XFLOATING_LIBS 1
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define_std ("unix"); \
+ builtin_define_std ("SYSTYPE_BSD"); \
+ builtin_define ("_SYSTYPE_BSD"); \
+ builtin_define ("__osf__"); \
+ builtin_define ("__digital__"); \
+ builtin_define ("__arch64__"); \
+ builtin_define ("_LONGLONG"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=xpg4"); \
+ /* Tru64 UNIX V5 has a 16 byte long \
+ double type and requires __X_FLOAT \
+ to be defined for <math.h>. */ \
+ if (LONG_DOUBLE_TYPE_SIZE == 128) \
+ builtin_define ("__X_FLOAT"); \
+ \
+ /* Tru64 UNIX V4/V5 provide several ISO C94 \
+ features protected by the corresponding \
+ __STDC_VERSION__ macro. libstdc++ v3 \
+ needs them as well. */ \
+ if (c_dialect_cxx ()) \
+ builtin_define ("__STDC_VERSION__=199409L"); \
+ } while (0)
+
+/* Accept DEC C flags for multithreaded programs. We use _PTHREAD_USE_D4
+ instead of PTHREAD_USE_D4 since both have the same effect and the former
+ doesn't invade the users' namespace. */
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC \
+"%{pthread|threads:-D_REENTRANT} %{threads:-D_PTHREAD_USE_D4}"
+
+/* Under OSF4, -p and -pg require -lprof1, and -lprof1 requires -lpdf. */
+
+#define LIB_SPEC \
+"%{p|pg:-lprof1%{pthread|threads:_r} -lpdf} %{a:-lprof2} \
+ %{threads: -lpthreads} %{pthread|threads: -lpthread -lmach -lexc} -lc"
+
+/* Pass "-G 8" to ld because Alpha's CC does. Pass -O3 if we are
+ optimizing, -O1 if we are not. Pass -S to silence `weak symbol
+ multiply defined' warnings. Pass -shared, -non_shared or
+ -call_shared as appropriate. Pass -hidden_symbol so that our
+ constructor and call-frame data structures are not accidentally
+ overridden. */
+#define LINK_SPEC \
+ "-G 8 %{O*:-O3} %{!O*:-O1} -S %{static:-non_shared} \
+ %{!static:%{shared:-shared -hidden_symbol _GLOBAL_*} \
+ %{!shared:-call_shared}} %{pg} %{taso} %{rpath*}"
+
+#define STARTFILE_SPEC \
+ "%{!shared:%{pg:gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}}"
+
+#define ENDFILE_SPEC \
+ "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s}"
+
+#define MD_STARTFILE_PREFIX "/usr/lib/cmplrs/cc/"
+
+/* Tru64 UNIX V5.1 requires a special as flag. Empty by default. */
+
+#define ASM_OLDAS_SPEC ""
+
+/* In OSF/1 v3.2c, the assembler by default does not output file names which
+ causes mips-tfile to fail. Passing -g to the assembler fixes this problem.
+ ??? Strictly speaking, we need -g only if the user specifies -g. Passing
+ it always means that we get slightly larger than necessary object files
+ if the user does not specify -g. If we don't pass -g, then mips-tfile
+ will need to be fixed to work in this case. Pass -O0 since some
+ optimization are broken and don't help us anyway. Pass -nocpp because
+ there's no point in running CPP on our assembler output. */
+#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_GAS) != 0
+#define ASM_SPEC "%{malpha-as:-g %(asm_oldas) -nocpp %{pg} -O0}"
+#else
+#define ASM_SPEC "%{!mgas:-g %(asm_oldas) -nocpp %{pg} -O0}"
+#endif
+
+/* Specify to run a post-processor, mips-tfile after the assembler
+ has run to stuff the ecoff debug information into the object file.
+ This is needed because the Alpha assembler provides no way
+ of specifying such information in the assembly file. */
+
+#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_GAS) != 0
+
+#define ASM_FINAL_SPEC "\
+%{malpha-as: %{!mno-mips-tfile: \
+ \n mips-tfile %{v*: -v} \
+ %{K: -I %b.o~} \
+ %{!K: %{save-temps: -I %b.o~}} \
+ %{c:%W{o*}%{!o*:-o %b.o}}%{!c:-o %U.o} \
+ %{.s:%i} %{!.s:%g.s}}}"
+
+#else
+#define ASM_FINAL_SPEC "\
+%{!mgas: %{!mno-mips-tfile: \
+ \n mips-tfile %{v*: -v} \
+ %{K: -I %b.o~} \
+ %{!K: %{save-temps: -I %b.o~}} \
+ %{c:%W{o*}%{!o*:-o %b.o}}%{!c:-o %U.o} \
+ %{.s:%i} %{!.s:%g.s}}}"
+
+#endif
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS { "asm_oldas", ASM_OLDAS_SPEC }
+
+/* Indicate that we have a stamp.h to use. */
+#ifndef CROSS_COMPILE
+#define HAVE_STAMP_H 1
+#endif
+
+/* Attempt to turn on access permissions for the stack. */
+
+#define ENABLE_EXECUTE_STACK \
+void \
+__enable_execute_stack (void *addr) \
+{ \
+ extern int mprotect (const void *, size_t, int); \
+ long size = getpagesize (); \
+ long mask = ~(size-1); \
+ char *page = (char *) (((long) addr) & mask); \
+ char *end = (char *) ((((long) (addr + TRAMPOLINE_SIZE)) & mask) + size); \
+ \
+ /* 7 is PROT_READ | PROT_WRITE | PROT_EXEC */ \
+ if (mprotect (page, end - page, 7) < 0) \
+ perror ("mprotect of trampoline code"); \
+}
+
+/* Digital UNIX V4.0E (1091)/usr/include/sys/types.h 4.3.49.9 1997/08/14 */
+#define SIZE_TYPE "long unsigned int"
+#define PTRDIFF_TYPE "long int"
+
+/* The linker will stick __main into the .init section. */
+#define HAS_INIT_SECTION
+#define LD_INIT_SWITCH "-init"
+#define LD_FINI_SWITCH "-fini"
+
+/* The linker needs a space after "-o". This allows -oldstyle_liblookup to
+ be passed to ld. */
+#define SWITCHES_NEED_SPACES "o"
+
+/* Select a format to encode pointers in exception handling data. CODE
+ is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
+ true if the symbol may be affected by dynamic relocations.
+
+ We really ought to be using the SREL32 relocations that ECOFF has,
+ but no version of the native assembler supports creating such things,
+ and Compaq has no plans to rectify this. Worse, the dynamic loader
+ cannot handle unaligned relocations, so we have to make sure that
+ things get padded appropriately. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
+ (TARGET_GAS \
+ ? (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4) \
+ : DW_EH_PE_aligned)
+
+/* This is how we tell the assembler that a symbol is weak. */
+
+#define ASM_OUTPUT_WEAK_ALIAS(FILE, NAME, VALUE) \
+ do \
+ { \
+ (*targetm.asm_out.globalize_label) (FILE, NAME); \
+ fputs ("\t.weakext\t", FILE); \
+ assemble_name (FILE, NAME); \
+ if (VALUE) \
+ { \
+ fputc (' ', FILE); \
+ assemble_name (FILE, VALUE); \
+ } \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+
+#define ASM_WEAKEN_LABEL(FILE, NAME) ASM_OUTPUT_WEAK_ALIAS(FILE, NAME, 0)
+
+/* Handle #pragma weak and #pragma pack. */
+#define HANDLE_SYSV_PRAGMA 1
+
+/* Handle #pragma extern_prefix. Technically only needed for Tru64 5.x,
+ but easier to manipulate preprocessor bits from here. */
+#define TARGET_HANDLE_PRAGMA_EXTERN_PREFIX 1
diff --git a/gcc-4.2.1/gcc/config/alpha/osf5.h b/gcc-4.2.1/gcc/config/alpha/osf5.h
new file mode 100644
index 000000000..1926a3c2f
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/osf5.h
@@ -0,0 +1,57 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha on Tru64 5.
+ Copyright (C) 2000, 2001, 2004, 2005, 2006 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Tru64 5.1 uses IEEE QUAD format. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_FPREGS | MASK_LONG_DOUBLE_128)
+
+/* In Tru64 UNIX V5.1, Compaq introduced a new assembler
+ (/usr/lib/cmplrs/cc/adu) which currently (versions between 3.04.29 and
+ 3.04.32) breaks mips-tfile. Passing the undocumented -oldas flag reverts
+ to using the old assembler (/usr/lib/cmplrs/cc/as[01]).
+
+ The V5.0 and V5.0A assemblers silently ignore -oldas, so it can be
+ specified here.
+
+ It is clearly not desirable to depend on this undocumented flag, and
+ Compaq wants -oldas to go away soon, but until they have released a
+ new adu that works with mips-tfile, this is the only option.
+
+ In some versions of the DTK, the assembler driver invokes ld after
+ assembly. This has been fixed in current versions, but adding -c
+ works as expected for all versions. */
+
+#undef ASM_OLDAS_SPEC
+#define ASM_OLDAS_SPEC "-oldas -c"
+
+/* The linker appears to perform invalid code optimizations that result
+ in the ldgp emitted for the exception_receiver pattern being incorrectly
+ linked. */
+#undef TARGET_LD_BUGGY_LDGP
+#define TARGET_LD_BUGGY_LDGP 1
+
+/* Tru64 v5.1 has the float and long double forms of math functions. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS 1
+
+/* The native assembler doesn't understand parenthesis. */
+#define TARGET_ASM_OPEN_PAREN ""
+#define TARGET_ASM_CLOSE_PAREN ""
+
diff --git a/gcc-4.2.1/gcc/config/alpha/predicates.md b/gcc-4.2.1/gcc/config/alpha/predicates.md
new file mode 100644
index 000000000..6f86324f4
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/predicates.md
@@ -0,0 +1,589 @@
+;; Predicate definitions for DEC Alpha.
+;; Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; Return 1 if OP is the zero constant for MODE.
+(define_predicate "const0_operand"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; Returns true if OP is either the constant zero or a register.
+(define_predicate "reg_or_0_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const0_operand")))
+
+;; Return 1 if OP is a constant in the range of 0-63 (for a shift) or
+;; any register.
+(define_predicate "reg_or_6bit_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) < 64")
+ (match_operand 0 "register_operand")))
+
+;; Return 1 if OP is an 8-bit constant.
+(define_predicate "cint8_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) < 256")))
+
+;; Return 1 if OP is an 8-bit constant or any register.
+(define_predicate "reg_or_8bit_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) < 256")
+ (match_operand 0 "register_operand")))
+
+;; Return 1 if OP is a constant or any register.
+(define_predicate "reg_or_cint_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_int_operand")))
+
+;; Return 1 if the operand is a valid second operand to an add insn.
+(define_predicate "add_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
+ || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L')")
+ (match_operand 0 "register_operand")))
+
+;; Return 1 if the operand is a valid second operand to a
+;; sign-extending add insn.
+(define_predicate "sext_add_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O')")
+ (match_operand 0 "register_operand")))
+
+;; Return 1 if the operand is a non-symbolic constant operand that
+;; does not satisfy add_operand.
+(define_predicate "non_add_const_operand"
+ (and (match_code "const_int,const_double,const_vector")
+ (not (match_operand 0 "add_operand"))))
+
+;; Return 1 if the operand is a non-symbolic, nonzero constant operand.
+(define_predicate "non_zero_const_operand"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op != CONST0_RTX (mode)")))
+
+;; Return 1 if OP is the constant 4 or 8.
+(define_predicate "const48_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 4 || INTVAL (op) == 8")))
+
+;; Return 1 if OP is a valid first operand to an AND insn.
+(define_predicate "and_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
+ || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
+ || zap_mask (INTVAL (op))")
+ (if_then_else (match_code "const_double")
+ (match_test "GET_MODE (op) == VOIDmode
+ && zap_mask (CONST_DOUBLE_LOW (op))
+ && zap_mask (CONST_DOUBLE_HIGH (op))")
+ (match_operand 0 "register_operand"))))
+
+;; Return 1 if OP is a valid first operand to an IOR or XOR insn.
+(define_predicate "or_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
+ || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100")
+ (match_operand 0 "register_operand")))
+
+;; Return 1 if OP is a constant that is the width, in bits, of an integral
+;; mode not larger than DImode.
+(define_predicate "mode_width_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT i = INTVAL (op);
+ return i == 8 || i == 16 || i == 32 || i == 64;
+})
+
+;; Return 1 if OP is a constant that is a mask of ones of width of an
+;; integral machine mode not larger than DImode.
+(define_predicate "mode_mask_operand"
+ (match_code "const_int,const_double")
+{
+ if (GET_CODE (op) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL (op);
+
+ if (value == 0xff)
+ return 1;
+ if (value == 0xffff)
+ return 1;
+ if (value == 0xffffffff)
+ return 1;
+ if (value == -1)
+ return 1;
+ }
+ else if (HOST_BITS_PER_WIDE_INT == 32 && GET_CODE (op) == CONST_DOUBLE)
+ {
+ if (CONST_DOUBLE_LOW (op) == 0xffffffff && CONST_DOUBLE_HIGH (op) == 0)
+ return 1;
+ }
+ return 0;
+})
+
+;; Return 1 if OP is a multiple of 8 less than 64.
+(define_predicate "mul8_operand"
+ (match_code "const_int")
+{
+ unsigned HOST_WIDE_INT i = INTVAL (op);
+ return i < 64 && i % 8 == 0;
+})
+
+;; Return 1 if OP is a hard floating-point register.
+(define_predicate "hard_fp_register_operand"
+ (match_operand 0 "register_operand")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ return REGNO_REG_CLASS (REGNO (op)) == FLOAT_REGS;
+})
+
+;; Return 1 if OP is a hard general register.
+(define_predicate "hard_int_register_operand"
+ (match_operand 0 "register_operand")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ return REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS;
+})
+
+;; Return 1 if OP is something that can be reloaded into a register;
+;; if it is a MEM, it need not be valid.
+(define_predicate "some_operand"
+ (ior (match_code "reg,mem,const_int,const_double,const_vector,
+ label_ref,symbol_ref,const,high")
+ (and (match_code "subreg")
+ (match_test "some_operand (SUBREG_REG (op), VOIDmode)"))))
+
+;; Likewise, but don't accept constants.
+(define_predicate "some_ni_operand"
+ (ior (match_code "reg,mem")
+ (and (match_code "subreg")
+ (match_test "some_ni_operand (SUBREG_REG (op), VOIDmode)"))))
+
+;; Return 1 if OP is a valid operand for the source of a move insn.
+(define_predicate "input_operand"
+ (match_code "label_ref,symbol_ref,const,high,reg,subreg,mem,
+ const_double,const_vector,const_int")
+{
+ switch (GET_CODE (op))
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST:
+ if (TARGET_EXPLICIT_RELOCS)
+ {
+ /* We don't split symbolic operands into something unintelligable
+ until after reload, but we do not wish non-small, non-global
+ symbolic operands to be reconstructed from their high/lo_sum
+ form. */
+ return (small_symbolic_operand (op, mode)
+ || global_symbolic_operand (op, mode)
+ || gotdtp_symbolic_operand (op, mode)
+ || gottp_symbolic_operand (op, mode));
+ }
+
+ /* This handles both the Windows/NT and OSF cases. */
+ return mode == ptr_mode || mode == DImode;
+
+ case HIGH:
+ return (TARGET_EXPLICIT_RELOCS
+ && local_symbolic_operand (XEXP (op, 0), mode));
+
+ case REG:
+ return 1;
+
+ case SUBREG:
+ if (register_operand (op, mode))
+ return 1;
+ /* ... fall through ... */
+ case MEM:
+ return ((TARGET_BWX || (mode != HImode && mode != QImode))
+ && general_operand (op, mode));
+
+ case CONST_DOUBLE:
+ return op == CONST0_RTX (mode);
+
+ case CONST_VECTOR:
+ if (reload_in_progress || reload_completed)
+ return alpha_legitimate_constant_p (op);
+ return op == CONST0_RTX (mode);
+
+ case CONST_INT:
+ if (mode == QImode || mode == HImode)
+ return true;
+ if (reload_in_progress || reload_completed)
+ return alpha_legitimate_constant_p (op);
+ return add_operand (op, mode);
+
+ default:
+ gcc_unreachable ();
+ }
+ return 0;
+})
+
+;; Return 1 if OP is a SYMBOL_REF for a function known to be in this
+;; file, and in the same section as the current function.
+
+(define_predicate "samegp_function_operand"
+ (match_code "symbol_ref")
+{
+ /* Easy test for recursion. */
+ if (op == XEXP (DECL_RTL (current_function_decl), 0))
+ return true;
+
+ /* Functions that are not local can be overridden, and thus may
+ not share the same gp. */
+ if (! SYMBOL_REF_LOCAL_P (op))
+ return false;
+
+ /* If -msmall-data is in effect, assume that there is only one GP
+ for the module, and so any local symbol has this property. We
+ need explicit relocations to be able to enforce this for symbols
+ not defined in this unit of translation, however. */
+ if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
+ return true;
+
+ /* Functions that are not external are defined in this UoT,
+ and thus must share the same gp. */
+ return ! SYMBOL_REF_EXTERNAL_P (op);
+})
+
+;; Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr.
+(define_predicate "direct_call_operand"
+ (match_operand 0 "samegp_function_operand")
+{
+ tree op_decl, cfun_sec, op_sec;
+
+ /* If profiling is implemented via linker tricks, we can't jump
+ to the nogp alternate entry point. Note that current_function_profile
+ would not be correct, since that doesn't indicate if the target
+ function uses profiling. */
+ /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test,
+ but is approximately correct for the OSF ABIs. Don't know
+ what to do for VMS, NT, or UMK. */
+ if (!TARGET_PROFILING_NEEDS_GP && profile_flag)
+ return false;
+
+ /* Must be a function. In some cases folks create thunks in static
+ data structures and then make calls to them. If we allow the
+ direct call, we'll get an error from the linker about !samegp reloc
+ against a symbol without a .prologue directive. */
+ if (!SYMBOL_REF_FUNCTION_P (op))
+ return false;
+
+ /* Must be "near" so that the branch is assumed to reach. With
+ -msmall-text, this is assumed true of all local symbols. Since
+ we've already checked samegp, locality is already assured. */
+ if (TARGET_SMALL_TEXT)
+ return true;
+
+ /* Otherwise, a decl is "near" if it is defined in the same section. */
+ if (flag_function_sections)
+ return false;
+
+ op_decl = SYMBOL_REF_DECL (op);
+ if (DECL_ONE_ONLY (current_function_decl)
+ || (op_decl && DECL_ONE_ONLY (op_decl)))
+ return false;
+
+ cfun_sec = DECL_SECTION_NAME (current_function_decl);
+ op_sec = op_decl ? DECL_SECTION_NAME (op_decl) : NULL;
+ return ((!cfun_sec && !op_sec)
+ || (cfun_sec && op_sec
+ && strcmp (TREE_STRING_POINTER (cfun_sec),
+ TREE_STRING_POINTER (op_sec)) == 0));
+})
+
+;; Return 1 if OP is a valid operand for the MEM of a CALL insn.
+;;
+;; For TARGET_ABI_OSF, we want to restrict to R27 or a pseudo.
+;; For TARGET_ABI_UNICOSMK, we want to restrict to registers.
+
+(define_predicate "call_operand"
+ (if_then_else (match_code "reg")
+ (match_test "!TARGET_ABI_OSF
+ || REGNO (op) == 27 || REGNO (op) > LAST_VIRTUAL_REGISTER")
+ (and (match_test "!TARGET_ABI_UNICOSMK")
+ (match_code "symbol_ref"))))
+
+;; Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing
+;; a (non-tls) variable known to be defined in this file.
+(define_predicate "local_symbolic_operand"
+ (match_code "label_ref,const,symbol_ref")
+{
+ if (GET_CODE (op) == LABEL_REF)
+ return 1;
+
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
+ op = XEXP (XEXP (op, 0), 0);
+
+ if (GET_CODE (op) != SYMBOL_REF)
+ return 0;
+
+ return (SYMBOL_REF_LOCAL_P (op)
+ && !SYMBOL_REF_WEAK (op)
+ && !SYMBOL_REF_TLS_MODEL (op));
+})
+
+;; Return true if OP is a SYMBOL_REF or CONST referencing a variable
+;; known to be defined in this file in the small data area.
+(define_predicate "small_symbolic_operand"
+ (match_code "const,symbol_ref")
+{
+ if (! TARGET_SMALL_DATA)
+ return 0;
+
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
+ op = XEXP (XEXP (op, 0), 0);
+
+ if (GET_CODE (op) != SYMBOL_REF)
+ return 0;
+
+ /* ??? There's no encode_section_info equivalent for the rtl
+ constant pool, so SYMBOL_FLAG_SMALL never gets set. */
+ if (CONSTANT_POOL_ADDRESS_P (op))
+ return GET_MODE_SIZE (get_pool_mode (op)) <= g_switch_value;
+
+ return (SYMBOL_REF_LOCAL_P (op)
+ && SYMBOL_REF_SMALL_P (op)
+ && !SYMBOL_REF_WEAK (op)
+ && !SYMBOL_REF_TLS_MODEL (op));
+})
+
+;; Return true if OP is a SYMBOL_REF or CONST referencing a variable
+;; not known (or known not) to be defined in this file.
+(define_predicate "global_symbolic_operand"
+ (match_code "const,symbol_ref")
+{
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
+ op = XEXP (XEXP (op, 0), 0);
+
+ if (GET_CODE (op) != SYMBOL_REF)
+ return 0;
+
+ return ((!SYMBOL_REF_LOCAL_P (op) || SYMBOL_REF_WEAK (op))
+ && !SYMBOL_REF_TLS_MODEL (op));
+})
+
+;; Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
+;; possibly with an offset.
+(define_predicate "symbolic_operand"
+ (ior (match_code "symbol_ref,label_ref")
+ (and (match_code "const")
+ (match_test "GET_CODE (XEXP (op,0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op,0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (op,0), 1)) == CONST_INT"))))
+
+;; Return true if OP is valid for 16-bit DTP relative relocations.
+(define_predicate "dtp16_symbolic_operand"
+ (and (match_code "const")
+ (match_test "tls_symbolic_operand_1 (op, 16, UNSPEC_DTPREL)")))
+
+;; Return true if OP is valid for 32-bit DTP relative relocations.
+(define_predicate "dtp32_symbolic_operand"
+ (and (match_code "const")
+ (match_test "tls_symbolic_operand_1 (op, 32, UNSPEC_DTPREL)")))
+
+;; Return true if OP is valid for 64-bit DTP relative relocations.
+(define_predicate "gotdtp_symbolic_operand"
+ (and (match_code "const")
+ (match_test "tls_symbolic_operand_1 (op, 64, UNSPEC_DTPREL)")))
+
+;; Return true if OP is valid for 16-bit TP relative relocations.
+(define_predicate "tp16_symbolic_operand"
+ (and (match_code "const")
+ (match_test "tls_symbolic_operand_1 (op, 16, UNSPEC_TPREL)")))
+
+;; Return true if OP is valid for 32-bit TP relative relocations.
+(define_predicate "tp32_symbolic_operand"
+ (and (match_code "const")
+ (match_test "tls_symbolic_operand_1 (op, 32, UNSPEC_TPREL)")))
+
+;; Return true if OP is valid for 64-bit TP relative relocations.
+(define_predicate "gottp_symbolic_operand"
+ (and (match_code "const")
+ (match_test "tls_symbolic_operand_1 (op, 64, UNSPEC_TPREL)")))
+
+;; Return 1 if this memory address is a known aligned register plus
+;; a constant. It must be a valid address. This means that we can do
+;; this as an aligned reference plus some offset.
+;;
+;; Take into account what reload will do. Oh god this is awful.
+;; The horrible comma-operator construct below is to prevent genrecog
+;; from thinking that this predicate accepts REG and SUBREG. We don't
+;; use recog during reload, so pretending these codes are accepted
+;; pessimizes things a tad.
+
+(define_predicate "aligned_memory_operand"
+ (ior (match_test "op = resolve_reload_operand (op), 0")
+ (match_code "mem"))
+{
+ rtx base;
+
+ if (MEM_ALIGN (op) >= 32)
+ return 1;
+ op = XEXP (op, 0);
+
+ /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
+ sorts of constructs. Dig for the real base register. */
+ if (reload_in_progress
+ && GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 0)) == PLUS)
+ base = XEXP (XEXP (op, 0), 0);
+ else
+ {
+ if (! memory_address_p (mode, op))
+ return 0;
+ base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
+ }
+
+ return (GET_CODE (base) == REG && REGNO_POINTER_ALIGN (REGNO (base)) >= 32);
+})
+
+;; Similar, but return 1 if OP is a MEM which is not alignable.
+
+(define_predicate "unaligned_memory_operand"
+ (ior (match_test "op = resolve_reload_operand (op), 0")
+ (match_code "mem"))
+{
+ rtx base;
+
+ if (MEM_ALIGN (op) >= 32)
+ return 0;
+ op = XEXP (op, 0);
+
+ /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
+ sorts of constructs. Dig for the real base register. */
+ if (reload_in_progress
+ && GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 0)) == PLUS)
+ base = XEXP (XEXP (op, 0), 0);
+ else
+ {
+ if (! memory_address_p (mode, op))
+ return 0;
+ base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
+ }
+
+ return (GET_CODE (base) == REG && REGNO_POINTER_ALIGN (REGNO (base)) < 32);
+})
+
+;; Return 1 if OP is any memory location. During reload a pseudo matches.
+(define_predicate "any_memory_operand"
+ (ior (match_code "mem,reg")
+ (and (match_code "subreg")
+ (match_test "GET_CODE (SUBREG_REG (op)) == REG"))))
+
+;; Return 1 if OP is either a register or an unaligned memory location.
+(define_predicate "reg_or_unaligned_mem_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "unaligned_memory_operand")))
+
+;; Return 1 is OP is a memory location that is not a reference
+;; (using an AND) to an unaligned location. Take into account
+;; what reload will do.
+(define_predicate "normal_memory_operand"
+ (ior (match_test "op = resolve_reload_operand (op), 0")
+ (and (match_code "mem")
+ (match_test "GET_CODE (XEXP (op, 0)) != AND"))))
+
+;; Returns 1 if OP is not an eliminable register.
+;;
+;; This exists to cure a pathological failure in the s8addq (et al) patterns,
+;;
+;; long foo () { long t; bar(); return (long) &t * 26107; }
+;;
+;; which run afoul of a hack in reload to cure a (presumably) similar
+;; problem with lea-type instructions on other targets. But there is
+;; one of us and many of them, so work around the problem by selectively
+;; preventing combine from making the optimization.
+
+(define_predicate "reg_not_elim_operand"
+ (match_operand 0 "register_operand")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ return op != frame_pointer_rtx && op != arg_pointer_rtx;
+})
+
+;; Accept a register, but not a subreg of any kind. This allows us to
+;; avoid pathological cases in reload wrt data movement common in
+;; int->fp conversion. */
+(define_predicate "reg_no_subreg_operand"
+ (and (match_code "reg")
+ (match_operand 0 "register_operand")))
+
+;; Return 1 if OP is a valid Alpha comparison operator for "cmp" style
+;; instructions.
+(define_predicate "alpha_comparison_operator"
+ (match_code "eq,le,lt,leu,ltu"))
+
+;; Similarly, but with swapped operands.
+(define_predicate "alpha_swapped_comparison_operator"
+ (match_code "eq,ge,gt,gtu"))
+
+;; Return 1 if OP is a valid Alpha comparison operator against zero
+;; for "bcc" style instructions.
+(define_predicate "alpha_zero_comparison_operator"
+ (match_code "eq,ne,le,lt,leu,ltu"))
+
+;; Return 1 if OP is a signed comparison operation.
+(define_predicate "signed_comparison_operator"
+ (match_code "eq,ne,le,lt,ge,gt"))
+
+;; Return 1 if OP is a valid Alpha floating point comparison operator.
+(define_predicate "alpha_fp_comparison_operator"
+ (match_code "eq,le,lt,unordered"))
+
+;; Return 1 if this is a divide or modulus operator.
+(define_predicate "divmod_operator"
+ (match_code "div,mod,udiv,umod"))
+
+;; Return 1 if this is a float->int conversion operator.
+(define_predicate "fix_operator"
+ (match_code "fix,unsigned_fix"))
+
+;; Recognize an addition operation that includes a constant. Used to
+;; convince reload to canonize (plus (plus reg c1) c2) during register
+;; elimination.
+
+(define_predicate "addition_operation"
+ (and (match_code "plus")
+ (match_test "register_operand (XEXP (op, 0), mode)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (XEXP (op, 1)), 'K')")))
+
+;; For TARGET_EXPLICIT_RELOCS, we don't obfuscate a SYMBOL_REF to a
+;; small symbolic operand until after reload. At which point we need
+;; to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref))
+;; so that sched2 has the proper dependency information. */
+(define_predicate "some_small_symbolic_operand"
+ (match_code "set,parallel,prefetch,unspec,unspec_volatile")
+{
+ /* Avoid search unless necessary. */
+ if (!TARGET_EXPLICIT_RELOCS || !reload_completed)
+ return false;
+ return for_each_rtx (&op, some_small_symbolic_operand_int, NULL);
+})
diff --git a/gcc-4.2.1/gcc/config/alpha/qrnnd.asm b/gcc-4.2.1/gcc/config/alpha/qrnnd.asm
new file mode 100644
index 000000000..96f31b25c
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/qrnnd.asm
@@ -0,0 +1,167 @@
+ # Alpha 21064 __udiv_qrnnd
+ # Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc.
+
+ # This file is part of GCC.
+
+ # The GNU MP Library is free software; you can redistribute it and/or modify
+ # it under the terms of the GNU General Public License as published by
+ # the Free Software Foundation; either version 2 of the License, or (at your
+ # option) any later version.
+
+ # In addition to the permissions in the GNU General Public License, the
+ # Free Software Foundation gives you unlimited permission to link the
+ # compiled version of this file with other programs, and to distribute
+ # those programs without any restriction coming from the use of this
+ # file. (The General Public License restrictions do apply in other
+ # respects; for example, they cover modification of the file, and
+ # distribution when not linked into another program.)
+
+ # This file is distributed in the hope that it will be useful, but
+ # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+ # License for more details.
+
+ # You should have received a copy of the GNU General Public License
+ # along with GCC; see the file COPYING. If not, write to the
+ # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ # MA 02110-1301, USA.
+
+#ifdef __ELF__
+.section .note.GNU-stack,""
+#endif
+
+ .set noreorder
+ .set noat
+
+ .text
+
+ .globl __udiv_qrnnd
+ .ent __udiv_qrnnd
+__udiv_qrnnd:
+ .frame $30,0,$26,0
+ .prologue 0
+
+#define cnt $2
+#define tmp $3
+#define rem_ptr $16
+#define n1 $17
+#define n0 $18
+#define d $19
+#define qb $20
+#define AT $at
+
+ ldiq cnt,16
+ blt d,$largedivisor
+
+$loop1: cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ subq cnt,1,cnt
+ bgt cnt,$loop1
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+$largedivisor:
+ and n0,1,$4
+
+ srl n0,1,n0
+ sll n1,63,tmp
+ or tmp,n0,n0
+ srl n1,1,n1
+
+ and d,1,$6
+ srl d,1,$5
+ addq $5,$6,$5
+
+$loop2: cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ subq cnt,1,cnt
+ bgt cnt,$loop2
+
+ addq n1,n1,n1
+ addq $4,n1,n1
+ bne $6,$Odd
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+$Odd:
+ /* q' in n0. r' in n1 */
+ addq n1,n0,n1
+
+ cmpult n1,n0,tmp # tmp := carry from addq
+ subq n1,d,AT
+ addq n0,tmp,n0
+ cmovne tmp,AT,n1
+
+ cmpult n1,d,tmp
+ addq n0,1,AT
+ cmoveq tmp,AT,n0
+ subq n1,d,AT
+ cmoveq tmp,AT,n1
+
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+ .end __udiv_qrnnd
diff --git a/gcc-4.2.1/gcc/config/alpha/sync.md b/gcc-4.2.1/gcc/config/alpha/sync.md
new file mode 100644
index 000000000..1c34ce54b
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/sync.md
@@ -0,0 +1,315 @@
+;; GCC machine description for Alpha synchronization instructions.
+;; Copyright (C) 2005
+;; Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+(define_mode_macro I12MODE [QI HI])
+(define_mode_macro I48MODE [SI DI])
+(define_mode_attr modesuffix [(SI "l") (DI "q")])
+
+(define_code_macro FETCHOP [plus minus ior xor and])
+(define_code_attr fetchop_name
+ [(plus "add") (minus "sub") (ior "ior") (xor "xor") (and "and")])
+(define_code_attr fetchop_pred
+ [(plus "add_operand") (minus "reg_or_8bit_operand")
+ (ior "or_operand") (xor "or_operand") (and "and_operand")])
+(define_code_attr fetchop_constr
+ [(plus "rKL") (minus "rI") (ior "rIN") (xor "rIN") (and "riNHM")])
+
+
+(define_expand "memory_barrier"
+ [(set (mem:BLK (match_dup 0))
+ (unspec:BLK [(mem:BLK (match_dup 0))] UNSPEC_MB))]
+ ""
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (DImode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+})
+
+(define_insn "*mb_internal"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_operand:BLK 1 "" "")] UNSPEC_MB))]
+ ""
+ "mb"
+ [(set_attr "type" "mb")])
+
+(define_insn "load_locked_<mode>"
+ [(set (match_operand:I48MODE 0 "register_operand" "=r")
+ (unspec_volatile:I48MODE
+ [(match_operand:I48MODE 1 "memory_operand" "m")]
+ UNSPECV_LL))]
+ ""
+ "ld<modesuffix>_l %0,%1"
+ [(set_attr "type" "ld_l")])
+
+(define_insn "store_conditional_<mode>"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(const_int 0)] UNSPECV_SC))
+ (set (match_operand:I48MODE 1 "memory_operand" "=m")
+ (match_operand:I48MODE 2 "reg_or_0_operand" "0"))]
+ ""
+ "st<modesuffix>_c %0,%1"
+ [(set_attr "type" "st_c")])
+
+;; The Alpha Architecture Handbook says that it is UNPREDICTABLE whether
+;; the lock is cleared by a TAKEN branch. If we were to honor that, it
+;; would mean that we could not expand a ll/sc sequence until after the
+;; final basic-block reordering pass. Fortunately, it appears that no
+;; Alpha implementation ever built actually clears the lock on branches,
+;; taken or not.
+
+(define_insn_and_split "sync_<fetchop_name><mode>"
+ [(set (match_operand:I48MODE 0 "memory_operand" "+m")
+ (unspec:I48MODE
+ [(FETCHOP:I48MODE (match_dup 0)
+ (match_operand:I48MODE 1 "<fetchop_pred>" "<fetchop_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:I48MODE 2 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ alpha_split_atomic_op (<CODE>, operands[0], operands[1],
+ NULL, NULL, operands[2]);
+ DONE;
+}
+ [(set_attr "type" "multi")])
+
+(define_insn_and_split "sync_nand<mode>"
+ [(set (match_operand:I48MODE 0 "memory_operand" "+m")
+ (unspec:I48MODE
+ [(and:I48MODE (not:I48MODE (match_dup 0))
+ (match_operand:I48MODE 1 "register_operand" "r"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:I48MODE 2 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ alpha_split_atomic_op (NOT, operands[0], operands[1],
+ NULL, NULL, operands[2]);
+ DONE;
+}
+ [(set_attr "type" "multi")])
+
+(define_insn_and_split "sync_old_<fetchop_name><mode>"
+ [(set (match_operand:I48MODE 0 "register_operand" "=&r")
+ (match_operand:I48MODE 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec:I48MODE
+ [(FETCHOP:I48MODE (match_dup 1)
+ (match_operand:I48MODE 2 "<fetchop_pred>" "<fetchop_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:I48MODE 3 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ alpha_split_atomic_op (<CODE>, operands[1], operands[2],
+ operands[0], NULL, operands[3]);
+ DONE;
+}
+ [(set_attr "type" "multi")])
+
+(define_insn_and_split "sync_old_nand<mode>"
+ [(set (match_operand:I48MODE 0 "register_operand" "=&r")
+ (match_operand:I48MODE 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec:I48MODE
+ [(and:I48MODE (not:I48MODE (match_dup 1))
+ (match_operand:I48MODE 2 "register_operand" "r"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:I48MODE 3 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ alpha_split_atomic_op (NOT, operands[1], operands[2],
+ operands[0], NULL, operands[3]);
+ DONE;
+}
+ [(set_attr "type" "multi")])
+
+(define_insn_and_split "sync_new_<fetchop_name><mode>"
+ [(set (match_operand:I48MODE 0 "register_operand" "=&r")
+ (FETCHOP:I48MODE
+ (match_operand:I48MODE 1 "memory_operand" "+m")
+ (match_operand:I48MODE 2 "<fetchop_pred>" "<fetchop_constr>")))
+ (set (match_dup 1)
+ (unspec:I48MODE
+ [(FETCHOP:I48MODE (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:I48MODE 3 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ alpha_split_atomic_op (<CODE>, operands[1], operands[2],
+ NULL, operands[0], operands[3]);
+ DONE;
+}
+ [(set_attr "type" "multi")])
+
+(define_insn_and_split "sync_new_nand<mode>"
+ [(set (match_operand:I48MODE 0 "register_operand" "=&r")
+ (and:I48MODE
+ (not:I48MODE (match_operand:I48MODE 1 "memory_operand" "+m"))
+ (match_operand:I48MODE 2 "register_operand" "r")))
+ (set (match_dup 1)
+ (unspec:I48MODE
+ [(and:I48MODE (not:I48MODE (match_dup 1)) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:I48MODE 3 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ alpha_split_atomic_op (NOT, operands[1], operands[2],
+ NULL, operands[0], operands[3]);
+ DONE;
+}
+ [(set_attr "type" "multi")])
+
+(define_expand "sync_compare_and_swap<mode>"
+ [(match_operand:I12MODE 0 "register_operand" "")
+ (match_operand:I12MODE 1 "memory_operand" "")
+ (match_operand:I12MODE 2 "register_operand" "")
+ (match_operand:I12MODE 3 "add_operand" "")]
+ ""
+{
+ alpha_expand_compare_and_swap_12 (operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+})
+
+(define_insn_and_split "sync_compare_and_swap<mode>_1"
+ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
+ (zero_extend:DI
+ (mem:I12MODE (match_operand:DI 1 "register_operand" "r,r"))))
+ (set (mem:I12MODE (match_dup 1))
+ (unspec:I12MODE
+ [(match_operand:DI 2 "reg_or_8bit_operand" "J,rI")
+ (match_operand:DI 3 "register_operand" "r,r")
+ (match_operand:DI 4 "register_operand" "r,r")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:DI 5 "=&r,&r"))
+ (clobber (match_scratch:DI 6 "=X,&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ alpha_split_compare_and_swap_12 (<MODE>mode, operands[0], operands[1],
+ operands[2], operands[3], operands[4],
+ operands[5], operands[6]);
+ DONE;
+}
+ [(set_attr "type" "multi")])
+
+(define_expand "sync_compare_and_swap<mode>"
+ [(parallel
+ [(set (match_operand:I48MODE 0 "register_operand" "")
+ (match_operand:I48MODE 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:I48MODE
+ [(match_operand:I48MODE 2 "reg_or_8bit_operand" "")
+ (match_operand:I48MODE 3 "add_operand" "rKL")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:I48MODE 4 "=&r"))])]
+ ""
+{
+ if (<MODE>mode == SImode)
+ operands[2] = convert_modes (DImode, SImode, operands[2], 0);
+})
+
+(define_insn_and_split "*sync_compare_and_swap<mode>"
+ [(set (match_operand:I48MODE 0 "register_operand" "=&r")
+ (match_operand:I48MODE 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec:I48MODE
+ [(match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (match_operand:I48MODE 3 "add_operand" "rKL")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:I48MODE 4 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ alpha_split_compare_and_swap (operands[0], operands[1], operands[2],
+ operands[3], operands[4]);
+ DONE;
+}
+ [(set_attr "type" "multi")])
+
+(define_expand "sync_lock_test_and_set<mode>"
+ [(match_operand:I12MODE 0 "register_operand" "")
+ (match_operand:I12MODE 1 "memory_operand" "")
+ (match_operand:I12MODE 2 "register_operand" "")]
+ ""
+{
+ alpha_expand_lock_test_and_set_12 (operands[0], operands[1], operands[2]);
+ DONE;
+})
+
+(define_insn_and_split "sync_lock_test_and_set<mode>_1"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (zero_extend:DI
+ (mem:I12MODE (match_operand:DI 1 "register_operand" "r"))))
+ (set (mem:I12MODE (match_dup 1))
+ (unspec:I12MODE
+ [(match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (match_operand:DI 3 "register_operand" "r")]
+ UNSPEC_XCHG))
+ (clobber (match_scratch:DI 4 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ alpha_split_lock_test_and_set_12 (<MODE>mode, operands[0], operands[1],
+ operands[2], operands[3], operands[4]);
+ DONE;
+}
+ [(set_attr "type" "multi")])
+
+(define_insn_and_split "sync_lock_test_and_set<mode>"
+ [(set (match_operand:I48MODE 0 "register_operand" "=&r")
+ (match_operand:I48MODE 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec:I48MODE
+ [(match_operand:I48MODE 2 "add_operand" "rKL")]
+ UNSPEC_XCHG))
+ (clobber (match_scratch:I48MODE 3 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ alpha_split_lock_test_and_set (operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+}
+ [(set_attr "type" "multi")])
diff --git a/gcc-4.2.1/gcc/config/alpha/t-alpha b/gcc-4.2.1/gcc/config/alpha/t-alpha
new file mode 100644
index 000000000..d0b58d69a
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/t-alpha
@@ -0,0 +1,2 @@
+# This is a support routine for longlong.h, used by libgcc2.c.
+LIB2FUNCS_EXTRA = $(srcdir)/config/alpha/qrnnd.asm
diff --git a/gcc-4.2.1/gcc/config/alpha/t-crtfm b/gcc-4.2.1/gcc/config/alpha/t-crtfm
new file mode 100644
index 000000000..5ca8c3f74
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/t-crtfm
@@ -0,0 +1,5 @@
+EXTRA_PARTS += crtfastmath.o
+
+crtfastmath.o: $(srcdir)/config/alpha/crtfastmath.c $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -frandom-seed=gcc-crtfastmath -c \
+ -o crtfastmath.o $(srcdir)/config/alpha/crtfastmath.c
diff --git a/gcc-4.2.1/gcc/config/alpha/t-ieee b/gcc-4.2.1/gcc/config/alpha/t-ieee
new file mode 100644
index 000000000..fe549dfc9
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/t-ieee
@@ -0,0 +1,2 @@
+# All alphas get an IEEE complaint set of libraries.
+TARGET_LIBGCC2_CFLAGS += -mieee
diff --git a/gcc-4.2.1/gcc/config/alpha/t-linux b/gcc-4.2.1/gcc/config/alpha/t-linux
new file mode 100644
index 000000000..fabf38f9c
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/t-linux
@@ -0,0 +1 @@
+SHLIB_MAPFILES += $(srcdir)/config/alpha/libgcc-alpha-ldbl.ver
diff --git a/gcc-4.2.1/gcc/config/alpha/t-osf-pthread b/gcc-4.2.1/gcc/config/alpha/t-osf-pthread
new file mode 100644
index 000000000..968e65cce
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/t-osf-pthread
@@ -0,0 +1,5 @@
+# Provide dummy POSIX threads functions
+LIB2FUNCS_EXTRA += $(srcdir)/gthr-posix.c
+
+# Compile libgcc2 with POSIX threads supports
+TARGET_LIBGCC2_CFLAGS=-pthread
diff --git a/gcc-4.2.1/gcc/config/alpha/t-osf4 b/gcc-4.2.1/gcc/config/alpha/t-osf4
new file mode 100644
index 000000000..ead38d123
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/t-osf4
@@ -0,0 +1,32 @@
+# Compile crtbeginS.o and crtendS.o with pic.
+CRTSTUFF_T_CFLAGS_S = -fPIC
+
+# Compile libgcc2.a with pic.
+TARGET_LIBGCC2_CFLAGS = -fPIC
+
+# Build a shared libgcc library.
+SHLIB_EXT = .so
+SHLIB_NAME = @shlib_base_name@.so
+SHLIB_SONAME = @shlib_base_name@.so.1
+SHLIB_OBJS = @shlib_objs@
+
+# Hide all POSIX threads related symbols provided by gthr-posix.c. This
+# only has an effect if t-osf-pthread is in use.
+SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
+ -Wl,-hidden_symbol,pthread\* -Wl,-hidden_symbol,__pthread\* \
+ -Wl,-hidden_symbol,sched_get_\* -Wl,-hidden_symbol,sched_yield \
+ -Wl,-msym -Wl,-set_version,gcc.1 -Wl,-soname,$(SHLIB_SONAME) \
+ -o $(SHLIB_NAME).tmp @multilib_flags@ $(SHLIB_OBJS) -lc && \
+ rm -f $(SHLIB_SONAME) && \
+ if [ -f $(SHLIB_NAME) ]; then \
+ mv -f $(SHLIB_NAME) $(SHLIB_NAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_NAME).tmp $(SHLIB_NAME) && \
+ $(LN_S) $(SHLIB_NAME) $(SHLIB_SONAME)
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir); \
+ $(INSTALL_DATA) $(SHLIB_NAME) $$(DESTDIR)$$(slibdir)/$(SHLIB_SONAME); \
+ rm -f $$(DESTDIR)$$(slibdir)/$(SHLIB_NAME); \
+ $(LN_S) $(SHLIB_SONAME) $$(DESTDIR)$$(slibdir)/$(SHLIB_NAME)
diff --git a/gcc-4.2.1/gcc/config/alpha/t-unicosmk b/gcc-4.2.1/gcc/config/alpha/t-unicosmk
new file mode 100644
index 000000000..9c52b9847
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/t-unicosmk
@@ -0,0 +1,2 @@
+# This file is empty for now.
+
diff --git a/gcc-4.2.1/gcc/config/alpha/t-vms b/gcc-4.2.1/gcc/config/alpha/t-vms
new file mode 100644
index 000000000..516d3ec6e
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/t-vms
@@ -0,0 +1,26 @@
+LIB2FUNCS_EXTRA = $(srcdir)/config/alpha/vms_tramp.asm
+
+# VMS_EXTRA_PARTS is defined in x-vms and represent object files that
+# are only needed for VMS targets, but can only be compiled on a VMS host
+# (because they need DEC C).
+EXTRA_PARTS = vms-dwarf2.o vms-dwarf2eh.o $(VMS_EXTRA_PARTS)
+
+# This object must be linked with in order to make the executable debuggable.
+# vms-ld handles it automatically when passed -g.
+vms-dwarf2.o : $(srcdir)/config/alpha/vms-dwarf2.asm
+ gcc -c -x assembler $< -o $@
+
+vms-dwarf2eh.o : $(srcdir)/config/alpha/vms-dwarf2eh.asm
+ gcc -c -x assembler $< -o $@
+
+# Assemble startup files.
+vcrt0.o: $(CRT0_S) $(GCC_PASSES)
+ decc -c /names=as_is $(srcdir)/config/alpha/vms-crt0.c -o vcrt0.o
+
+pcrt0.o: $(CRT0_S) $(GCC_PASSES)
+ decc -c /names=as_is $(srcdir)/config/alpha/vms-psxcrt0.c -o pcrt0.o
+
+MULTILIB_OPTIONS = mcpu=ev6
+MULTILIB_DIRNAMES = ev6
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc-4.2.1/gcc/config/alpha/t-vms64 b/gcc-4.2.1/gcc/config/alpha/t-vms64
new file mode 100644
index 000000000..e3fc3be64
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/t-vms64
@@ -0,0 +1,8 @@
+# Assemble startup files.
+vcrt0.o: $(CRT0_S) $(GCC_PASSES)
+ decc -c /names=as_is /pointer_size=64 \
+ $(srcdir)/config/alpha/vms-crt0-64.c -o vcrt0.o
+
+pcrt0.o: $(CRT0_S) $(GCC_PASSES)
+ decc -c /names=as_is /pointer_size=64 \
+ $(srcdir)/config/alpha/vms-psxcrt0-64.c -o pcrt0.o
diff --git a/gcc-4.2.1/gcc/config/alpha/unicosmk.h b/gcc-4.2.1/gcc/config/alpha/unicosmk.h
new file mode 100644
index 000000000..a05b33396
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/unicosmk.h
@@ -0,0 +1,441 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha on Cray
+ T3E running Unicos/Mk.
+ Copyright (C) 2001, 2002, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by Roman Lechtchinsky (rl@cs.tu-berlin.de)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef TARGET_ABI_UNICOSMK
+#define TARGET_ABI_UNICOSMK 1
+
+/* CAM requires a slash before floating-pointing instruction suffixes. */
+
+#undef TARGET_AS_SLASH_BEFORE_SUFFIX
+#define TARGET_AS_SLASH_BEFORE_SUFFIX 1
+
+/* The following defines are necessary for the standard headers to work
+ correctly. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define ("__unix"); \
+ builtin_define ("_UNICOS=205"); \
+ builtin_define ("_CRAY"); \
+ builtin_define ("_CRAYT3E"); \
+ builtin_define ("_CRAYMPP"); \
+ builtin_define ("_CRAYIEEE"); \
+ builtin_define ("_ADDR64"); \
+ builtin_define ("_LD64"); \
+ builtin_define ("__UNICOSMK__"); \
+ } while (0)
+
+#define SHORT_TYPE_SIZE 32
+
+#undef INT_TYPE_SIZE
+#define INT_TYPE_SIZE 64
+
+/* This is consistent with the definition Cray CC uses. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 64
+
+/*
+#define SIZE_TYPE "unsigned int"
+#define PTRDIFF_TYPE "int"
+*/
+
+/* Alphas are operated in big endian mode on the Cray T3E. */
+
+#undef BITS_BIG_ENDIAN
+#undef BYTES_BIG_ENDIAN
+#undef WORDS_BIG_ENDIAN
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN 1
+#define WORDS_BIG_ENDIAN 1
+
+
+/* Every structure's size must be a multiple of this. */
+
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 64
+
+/* No data type wants to be aligned rounder than this. */
+
+#undef BIGGEST_ALIGNMENT
+#define BIGGEST_ALIGNMENT 256
+
+/* Include the frame pointer in fixed_regs and call_used_regs as it can't be
+ used as a general-purpose register even in frameless functions.
+ ??? The global_regs hack is needed for now because -O2 sometimes tries to
+ eliminate $15 increments/decrements in frameless functions. */
+
+#undef CONDITIONAL_REGISTER_USAGE
+#define CONDITIONAL_REGISTER_USAGE \
+ do { \
+ fixed_regs[15] = 1; \
+ call_used_regs[15] = 1; \
+ global_regs[15] = 1; \
+ } while(0)
+
+/* The stack frame grows downward. */
+
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Define the offset between two registers, one to be eliminated, and the
+ other its replacement, at the start of a routine. This is somewhat
+ complicated on the T3E which is why we use a function. */
+
+#undef INITIAL_ELIMINATION_OFFSET
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ do { \
+ (OFFSET) = unicosmk_initial_elimination_offset ((FROM), (TO)); \
+ } while (0)
+
+
+/* Define this if stack space is still allocated for a parameter passed
+ in a register. On the T3E, stack space is preallocated for all outgoing
+ arguments, including those passed in registers. To avoid problems, we
+ assume that at least 48 bytes (i.e. enough space for all arguments passed
+ in registers) are allocated. */
+
+#define REG_PARM_STACK_SPACE(DECL) 48
+#define OUTGOING_REG_PARM_STACK_SPACE
+
+/* If an argument can't be passed in registers even though not all argument
+ registers have been used yet, it is passed on the stack in the space
+ preallocated for these registers. */
+
+#define STACK_PARMS_IN_REG_PARM_AREA
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On Unicos/Mk, this is a structure that contains various information for
+ the static subroutine information block (SSIB) and the call information
+ word (CIW). */
+
+typedef struct {
+
+ /* The overall number of arguments. */
+ int num_args;
+
+ /* The overall size of the arguments in words. */
+ int num_arg_words;
+
+ /* The number of words passed in registers. */
+ int num_reg_words;
+
+ /* If an argument must be passed in the stack, all subsequent arguments
+ must be passed there, too. This flag indicates whether this is the
+ case. */
+ int force_stack;
+
+ /* This array indicates whether a word is passed in an integer register or
+ a floating point one. */
+
+ /* For each of the 6 register arguments, the corresponding flag in this
+ array indicates whether the argument is passed in an integer or a
+ floating point register. */
+ int reg_args_type[6];
+
+} unicosmk_arg_info;
+
+#undef CUMULATIVE_ARGS
+#define CUMULATIVE_ARGS unicosmk_arg_info
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS for a call to a
+ function whose data type is FNTYPE. For a library call, FNTYPE is 0. */
+
+#undef INIT_CUMULATIVE_ARGS
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ do { (CUM).num_args = 0; \
+ (CUM).num_arg_words = 0; \
+ (CUM).num_reg_words = 0; \
+ (CUM).force_stack = 0; \
+ } while(0)
+
+/* Update the data in CUM to advance over an argument of mode MODE and data
+ type TYPE. (TYPE is null for libcalls where that information may not be
+ available.)
+
+ On Unicos/Mk, at most 6 words can be passed in registers. Structures
+ which fit in two words are passed in registers, larger structures are
+ passed on stack. */
+
+#undef FUNCTION_ARG_ADVANCE
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+do { \
+ int size; \
+ \
+ size = ALPHA_ARG_SIZE (MODE, TYPE, NAMED); \
+ \
+ if (size > 2 \
+ || (CUM).num_reg_words + size > 6 \
+ || targetm.calls.must_pass_in_stack (MODE, TYPE)) \
+ (CUM).force_stack = 1; \
+ \
+ if (! (CUM).force_stack) \
+ { \
+ int i; \
+ int isfloat; \
+ isfloat = (GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT); \
+ for (i = 0; i < size; i++) \
+ { \
+ (CUM).reg_args_type[(CUM).num_reg_words] = isfloat; \
+ ++(CUM).num_reg_words; \
+ } \
+ } \
+ (CUM).num_arg_words += size; \
+ ++(CUM).num_args; \
+} while(0)
+
+/* This ensures that $15 increments/decrements in leaf functions won't get
+ eliminated. */
+
+#undef EPILOGUE_USES
+#define EPILOGUE_USES(REGNO) ((REGNO) == 26 || (REGNO) == 15)
+
+/* Would have worked, only the stack doesn't seem to be executable
+#undef TRAMPOLINE_TEMPLATE
+#define TRAMPOLINE_TEMPLATE(FILE) \
+do { fprintf (FILE, "\tbr $1,0\n"); \
+ fprintf (FILE, "\tldq $0,12($1)\n"); \
+ fprintf (FILE, "\tldq $1,20($1)\n"); \
+ fprintf (FILE, "\tjmp $31,(r0)\n"); \
+ fprintf (FILE, "\tbis $31,$31,$31\n"); \
+ fprintf (FILE, "\tbis $31,$31,$31\n"); \
+} while (0) */
+
+/* We don't support nested functions (yet). */
+
+#undef TRAMPOLINE_TEMPLATE
+#define TRAMPOLINE_TEMPLATE(FILE) gcc_unreachable ()
+
+/* Specify the machine mode that this machine uses for the index in the
+ tablejump instruction. On Unicos/Mk, we don't support relative case
+ vectors yet, thus the entries should be absolute addresses. */
+
+#undef CASE_VECTOR_MODE
+#define CASE_VECTOR_MODE DImode
+
+#undef CASE_VECTOR_PC_RELATIVE
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+/* #define DEFAULT_SIGNED_CHAR 1 */
+
+/* There are no read-only sections on Unicos/Mk. */
+
+#undef READONLY_DATA_SECTION_ASM_OP
+
+/* We take care of this in unicosmk_file_start. */
+
+#undef ASM_OUTPUT_SOURCE_FILENAME
+
+/* This is how to output a label for a jump table. Arguments are the same as
+ for (*targetm.asm_out.internal_label), except the insn for the jump table is
+ passed. */
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,TABLEINSN) \
+ (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM)
+
+/* CAM has some restrictions with respect to string literals. It won't
+ accept lines with more that 256 characters which means that we have
+ to split long strings. Moreover, it only accepts escape sequences of
+ the form \nnn in the range 0 to 127. We generate .byte directives for
+ escapes characters greater than 127. And finally, ` must be escaped. */
+
+#undef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(MYFILE, MYSTRING, MYLENGTH) \
+ do { \
+ FILE *_hide_asm_out_file = (MYFILE); \
+ const unsigned char *_hide_p = (const unsigned char *) (MYSTRING); \
+ int _hide_thissize = (MYLENGTH); \
+ int _size_so_far = 0; \
+ { \
+ FILE *asm_out_file = _hide_asm_out_file; \
+ const unsigned char *p = _hide_p; \
+ int thissize = _hide_thissize; \
+ int in_ascii = 0; \
+ int i; \
+ \
+ for (i = 0; i < thissize; i++) \
+ { \
+ register int c = p[i]; \
+ \
+ if (c > 127) \
+ { \
+ if (in_ascii) \
+ { \
+ fprintf (asm_out_file, "\"\n"); \
+ in_ascii = 0; \
+ } \
+ \
+ fprintf (asm_out_file, "\t.byte\t%d\n", c); \
+ } \
+ else \
+ { \
+ if (! in_ascii) \
+ { \
+ fprintf (asm_out_file, "\t.ascii\t\""); \
+ in_ascii = 1; \
+ _size_so_far = 0; \
+ } \
+ else if (_size_so_far >= 64) \
+ { \
+ fprintf (asm_out_file, "\"\n\t.ascii\t\""); \
+ _size_so_far = 0; \
+ } \
+ \
+ if (c == '\"' || c == '\\' || c == '`') \
+ putc ('\\', asm_out_file); \
+ if (c >= ' ') \
+ putc (c, asm_out_file); \
+ else \
+ fprintf (asm_out_file, "\\%.3o", c); \
+ ++ _size_so_far; \
+ } \
+ } \
+ if (in_ascii) \
+ fprintf (asm_out_file, "\"\n"); \
+ } \
+ } while(0)
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#undef ASM_OUTPUT_ADDR_VEC_ELT
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\t.quad $L%d\n", (VALUE))
+
+/* This is how to output an element of a case-vector that is relative.
+ (Unicos/Mk does not use such vectors yet). */
+
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) gcc_unreachable ()
+
+/* We can't output case vectors in the same section as the function code
+ because CAM doesn't allow data definitions in code sections. Thus, we
+ simply record the case vectors and put them in a separate section after
+ the function. */
+
+#define ASM_OUTPUT_ADDR_VEC(LAB,VEC) \
+ unicosmk_defer_case_vector ((LAB),(VEC))
+
+#define ASM_OUTPUT_ADDR_DIFF_VEC(LAB,VEC) gcc_unreachable ()
+
+/* This is how to output an assembler line that says to advance the location
+ counter to a multiple of 2**LOG bytes. Annoyingly, CAM always uses zeroes
+ to fill the unused space which does not work in code sections. We have to
+ be careful not to use the .align directive in code sections. */
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) unicosmk_output_align (STREAM, LOG)
+
+/* This is how to advance the location counter by SIZE bytes. */
+
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(STREAM,SIZE) \
+ fprintf ((STREAM), "\t.byte\t0:"HOST_WIDE_INT_PRINT_UNSIGNED"\n",\
+ (SIZE));
+
+/* This says how to output an assembler line to define a global common
+ symbol. We need the alignment information because it has to be supplied
+ in the section header. */
+
+#undef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ unicosmk_output_common ((FILE), (NAME), (SIZE), (ALIGN))
+
+/* This says how to output an assembler line to define a local symbol. */
+
+#undef ASM_OUTPUT_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+ do { switch_to_section (data_section); \
+ fprintf (FILE, "\t.align\t%d\n", floor_log2 ((ALIGN) / BITS_PER_UNIT));\
+ ASM_OUTPUT_LABEL ((FILE), (NAME)); \
+ fprintf (FILE, "\t.byte 0:"HOST_WIDE_INT_PRINT_UNSIGNED"\n",(SIZE));\
+ } while (0)
+
+/* CAM does not allow us to declare a symbol as external first and then
+ define it in the same file later. Thus, we keep a list of all external
+ references, remove all symbols defined locally from it and output it at
+ the end of the asm file. */
+
+#define ASM_OUTPUT_EXTERNAL(FILE,DECL,NAME) \
+ unicosmk_add_extern ((NAME))
+
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(STREAM,SYMREF) \
+ unicosmk_add_extern (XSTR ((SYMREF), 0))
+
+/* This is how to declare an object. We don't have to output anything if
+ it is a global variable because those go into unique `common' sections
+ and the section name is globally visible. For local variables, we simply
+ output the label. In any case, we have to record that no extern
+ declaration should be generated for the symbol. */
+
+#define ASM_DECLARE_OBJECT_NAME(STREAM,NAME,DECL) \
+ do { tree name_tree; \
+ name_tree = get_identifier ((NAME)); \
+ TREE_ASM_WRITTEN (name_tree) = 1; \
+ if (!TREE_PUBLIC (DECL)) \
+ { \
+ assemble_name (STREAM, NAME); \
+ fputs (":\n", STREAM); \
+ } \
+ } while(0)
+
+/* Switch into a generic section. */
+#define TARGET_ASM_NAMED_SECTION unicosmk_asm_named_section
+#define TARGET_ASM_INIT_SECTIONS unicosmk_init_sections
+
+#undef ASM_OUTPUT_MAX_SKIP_ALIGN
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(STREAM,POWER,MAXSKIP)
+
+#undef NM_FLAGS
+
+#undef OBJECT_FORMAT_COFF
+
+/* We cannot generate debugging information on Unicos/Mk. */
+
+#undef SDB_DEBUGGING_INFO
+#undef MIPS_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#undef DWARF2_DEBUGGING_INFO
+#undef DWARF2_UNWIND_INFO
+#undef INCOMING_RETURN_ADDR_RTX
+#undef PREFERRED_DEBUGGING_TYPE
+
+/* We don't need a start file. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+/* These are the libraries we have to link with.
+ ??? The Craylibs directory should be autoconfed. */
+#undef LIB_SPEC
+#define LIB_SPEC "-L/opt/ctl/craylibs/craylibs -lu -lm -lc -lsma"
+
+#undef EXPAND_BUILTIN_VA_START
+
+#define EH_FRAME_IN_DATA_SECTION 1
diff --git a/gcc-4.2.1/gcc/config/alpha/va_list.h b/gcc-4.2.1/gcc/config/alpha/va_list.h
new file mode 100644
index 000000000..df58ed0b5
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/va_list.h
@@ -0,0 +1,19 @@
+/* A replacement for Digital Unix's <va_list.h>. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef __builtin_va_list __gnuc_va_list;
+#endif
+
+#if !defined(_VA_LIST) && !defined(_HIDDEN_VA_LIST)
+#define _VA_LIST
+typedef __gnuc_va_list va_list;
+
+#elif defined(_HIDDEN_VA_LIST) && !defined(_HIDDEN_VA_LIST_DONE)
+#define _HIDDEN_VA_LIST_DONE
+typedef __gnuc_va_list __va_list;
+
+#elif defined(_HIDDEN_VA_LIST) && defined(_VA_LIST)
+#undef _HIDDEN_VA_LIST
+
+#endif
diff --git a/gcc-4.2.1/gcc/config/alpha/vms-cc.c b/gcc-4.2.1/gcc/config/alpha/vms-cc.c
new file mode 100644
index 000000000..0dcf1508f
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms-cc.c
@@ -0,0 +1,356 @@
+/* VMS DEC C wrapper.
+ Copyright (C) 2001, 2003 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* This program is a wrapper around the VMS DEC C compiler.
+ It translates Unix style command line options into corresponding
+ VMS style qualifiers and then spawns the DEC C compiler. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+
+#undef PATH_SEPARATOR
+#undef PATH_SEPARATOR_STR
+#define PATH_SEPARATOR ','
+#define PATH_SEPARATOR_STR ","
+
+/* These can be set by command line arguments */
+static int verbose = 0;
+static int save_temps = 0;
+
+static int comp_arg_max = -1;
+static const char **comp_args = 0;
+static int comp_arg_index = -1;
+static char *objfilename = 0;
+
+static char *system_search_dirs = (char *) "";
+static char *search_dirs;
+
+static char *default_defines = (char *) "";
+static char *defines;
+
+/* Translate a Unix syntax directory specification into VMS syntax.
+ If indicators of VMS syntax found, return input string. */
+static char *to_host_dir_spec (char *);
+
+/* Translate a Unix syntax file specification into VMS syntax.
+ If indicators of VMS syntax found, return input string. */
+static char *to_host_file_spec (char *);
+
+/* Add a translated arg to the list to be passed to DEC CC. */
+static void addarg (const char *);
+
+/* Preprocess the number of args in P_ARGC and contained in ARGV.
+ Look for special flags, etc. that must be handled first. */
+static void preprocess_args (int *, char **);
+
+/* Process the number of args in P_ARGC and contained in ARGV. Look
+ for special flags, etc. that must be handled for the VMS compiler. */
+static void process_args (int *, char **);
+
+/* Action routine called by decc$to_vms */
+static int translate_unix (char *, int);
+
+/* Add the argument contained in STR to the list of arguments to pass to the
+ compiler. */
+
+static void
+addarg (const char *str)
+{
+ int i;
+
+ if (++comp_arg_index >= comp_arg_max)
+ {
+ const char **new_comp_args
+ = (const char **) xcalloc (comp_arg_max + 1000, sizeof (char *));
+
+ for (i = 0; i <= comp_arg_max; i++)
+ new_comp_args [i] = comp_args [i];
+
+ if (comp_args)
+ free (comp_args);
+
+ comp_arg_max += 1000;
+ comp_args = new_comp_args;
+ }
+
+ comp_args [comp_arg_index] = str;
+}
+
+static void
+preprocess_args (int *p_argc, char *argv[])
+{
+ int i;
+
+ for (i = 1; i < *p_argc; i++)
+ {
+ if (strcmp (argv[i], "-o") == 0)
+ {
+ char *buff, *ptr;
+
+ i++;
+ ptr = to_host_file_spec (argv[i]);
+ objfilename = xstrdup (ptr);
+ buff = concat ("/obj=", ptr, NULL);
+ addarg (buff);
+ }
+ }
+}
+
+static void
+process_args (int *p_argc, char *argv[])
+{
+ int i;
+
+ for (i = 1; i < *p_argc; i++)
+ {
+ if (strlen (argv[i]) < 2)
+ continue;
+
+ if (strncmp (argv[i], "-I", 2) == 0)
+ {
+ char *ptr;
+ int new_len, search_dirs_len;
+
+ ptr = to_host_dir_spec (&argv[i][2]);
+ new_len = strlen (ptr);
+ search_dirs_len = strlen (search_dirs);
+
+ search_dirs = xrealloc (search_dirs, search_dirs_len + new_len + 2);
+ if (search_dirs_len > 0)
+ strcat (search_dirs, PATH_SEPARATOR_STR);
+ strcat (search_dirs, ptr);
+ }
+ else if (strncmp (argv[i], "-D", 2) == 0)
+ {
+ char *ptr;
+ int new_len, defines_len;
+
+ ptr = &argv[i][2];
+ new_len = strlen (ptr);
+ defines_len = strlen (defines);
+
+ defines = xrealloc (defines, defines_len + new_len + 4);
+ if (defines_len > 0)
+ strcat (defines, ",");
+
+ strcat (defines, "\"");
+ strcat (defines, ptr);
+ strcat (defines, "\"");
+ }
+ else if (strcmp (argv[i], "-v") == 0)
+ verbose = 1;
+ else if (strcmp (argv[i], "-g0") == 0)
+ addarg ("/nodebug");
+ else if (strcmp (argv[i], "-O0") == 0)
+ addarg ("/noopt");
+ else if (strncmp (argv[i], "-g", 2) == 0)
+ addarg ("/debug");
+ else if (strcmp (argv[i], "-E") == 0)
+ addarg ("/preprocess");
+ else if (strcmp (argv[i], "-save-temps") == 0)
+ save_temps = 1;
+ }
+}
+
+/* The main program. Spawn the VMS DEC C compiler after fixing up the
+ Unix-like flags and args to be what VMS DEC C wants. */
+
+typedef struct dsc {unsigned short len, mbz; char *adr; } Descr;
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ char cwdev [128], *devptr;
+ int devlen;
+ char *cwd = getcwd (0, 1024);
+
+ devptr = strchr (cwd, ':');
+ devlen = (devptr - cwd) + 1;
+ strncpy (cwdev, cwd, devlen);
+ cwdev [devlen] = '\0';
+
+ search_dirs = xstrdup (system_search_dirs);
+ defines = xstrdup (default_defines);
+
+ addarg ("cc");
+ preprocess_args (&argc , argv);
+ process_args (&argc , argv);
+
+ if (strlen (search_dirs) > 0)
+ {
+ addarg ("/include=(");
+ addarg (search_dirs);
+ addarg (")");
+ }
+
+ if (strlen (defines) > 0)
+ {
+ addarg ("/define=(");
+ addarg (defines);
+ addarg (")");
+ }
+
+ for (i = 1; i < argc; i++)
+ {
+ int arg_len = strlen (argv[i]);
+
+ if (strcmp (argv[i], "-o") == 0)
+ i++;
+ else if (strcmp (argv[i], "-v" ) == 0
+ || strcmp (argv[i], "-E") == 0
+ || strcmp (argv[i], "-c") == 0
+ || strncmp (argv[i], "-g", 2 ) == 0
+ || strncmp (argv[i], "-O", 2 ) == 0
+ || strcmp (argv[i], "-save-temps") == 0
+ || (arg_len > 2 && strncmp (argv[i], "-I", 2) == 0)
+ || (arg_len > 2 && strncmp (argv[i], "-D", 2) == 0))
+ ;
+
+ /* Unix style file specs and VMS style switches look alike, so assume
+ an arg consisting of one and only one slash, and that being first, is
+ really a switch. */
+ else if ((argv[i][0] == '/') && (strchr (&argv[i][1], '/') == 0))
+ addarg (argv[i]);
+ else
+ {
+ /* Assume filename arg */
+ char buff [256], *ptr;
+
+ ptr = to_host_file_spec (argv[i]);
+ arg_len = strlen (ptr);
+
+ if (ptr[0] == '[')
+ sprintf (buff, "%s%s", cwdev, ptr);
+ else if (strchr (ptr, ':'))
+ sprintf (buff, "%s", ptr);
+ else
+ sprintf (buff, "%s%s", cwd, ptr);
+
+ ptr = xstrdup (buff);
+ addarg (ptr);
+ }
+ }
+
+ addarg (NULL);
+
+ if (verbose)
+ {
+ int i;
+
+ for (i = 0; i < comp_arg_index; i++)
+ printf ("%s ", comp_args [i]);
+
+ putchar ('\n');
+ }
+
+ {
+ int i;
+ int len = 0;
+
+ for (i = 0; comp_args[i]; i++)
+ len = len + strlen (comp_args[i]) + 1;
+
+ {
+ char *allargs = (char *) alloca (len + 1);
+ Descr cmd;
+ int status;
+ int status1 = 1;
+
+ for (i = 0; i < len + 1; i++)
+ allargs [i] = 0;
+
+ for (i = 0; comp_args [i]; i++)
+ {
+ strcat (allargs, comp_args [i]);
+ strcat (allargs, " ");
+ }
+
+ cmd.adr = allargs;
+ cmd.len = len;
+ cmd.mbz = 0;
+
+ i = LIB$SPAWN (&cmd, 0, 0, 0, 0, 0, &status);
+
+ if ((i & 1) != 1)
+ {
+ LIB$SIGNAL (i);
+ exit (1);
+ }
+
+ if ((status & 1) == 1 && (status1 & 1) == 1)
+ exit (0);
+
+ exit (1);
+ }
+ }
+}
+
+static char new_host_filespec [255];
+static char new_host_dirspec [255];
+static char filename_buff [256];
+
+static int
+translate_unix (char *name, int type ATTRIBUTE_UNUSED)
+{
+ strcpy (filename_buff, name);
+ return 0;
+}
+
+static char *
+to_host_dir_spec (char *dirspec)
+{
+ int len = strlen (dirspec);
+
+ strcpy (new_host_dirspec, dirspec);
+
+ if (strchr (new_host_dirspec, ']') || strchr (new_host_dirspec, ':'))
+ return new_host_dirspec;
+
+ while (len > 1 && new_host_dirspec [len-1] == '/')
+ {
+ new_host_dirspec [len-1] = 0;
+ len--;
+ }
+
+ decc$to_vms (new_host_dirspec, translate_unix, 1, 2);
+ strcpy (new_host_dirspec, filename_buff);
+
+ return new_host_dirspec;
+
+}
+
+static char *
+to_host_file_spec (char *filespec)
+{
+ strcpy (new_host_filespec, "");
+ if (strchr (filespec, ']') || strchr (filespec, ':'))
+ strcpy (new_host_filespec, filespec);
+ else
+ {
+ decc$to_vms (filespec, translate_unix, 1, 1);
+ strcpy (new_host_filespec, filename_buff);
+ }
+
+ return new_host_filespec;
+}
diff --git a/gcc-4.2.1/gcc/config/alpha/vms-crt0-64.c b/gcc-4.2.1/gcc/config/alpha/vms-crt0-64.c
new file mode 100644
index 000000000..2e87965e6
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms-crt0-64.c
@@ -0,0 +1,99 @@
+/* VMS 64bit crt0 returning VMS style condition codes .
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#if !defined(__DECC)
+You Lose! This file can only be compiled with DEC C.
+#else
+
+/* This file can only be compiled with DEC C, due to the call to
+ lib$establish and the pragmas pointer_size. */
+
+#pragma __pointer_size short
+
+#include <stdlib.h>
+#include <string.h>
+#include <ssdef.h>
+
+extern void decc$main ();
+
+extern int main ();
+
+static int
+handler (sigargs, mechargs)
+ void *sigargs;
+ void *mechargs;
+{
+ return SS$_RESIGNAL;
+}
+
+int
+__main (arg1, arg2, arg3, image_file_desc, arg5, arg6)
+ void *arg1, *arg2, *arg3;
+ void *image_file_desc;
+ void *arg5, *arg6;
+{
+ int argc;
+ char **argv;
+ char **envp;
+
+#pragma __pointer_size long
+
+ int i;
+ char **long_argv;
+ char **long_envp;
+
+#pragma __pointer_size short
+
+ lib$establish (handler);
+ decc$main (arg1, arg2, arg3, image_file_desc,
+ arg5, arg6, &argc, &argv, &envp);
+
+#pragma __pointer_size long
+
+ /* Reallocate argv with 64 bit pointers. */
+ long_argv = (char **) malloc (sizeof (char *) * (argc + 1));
+
+ for (i = 0; i < argc; i++)
+ long_argv[i] = strdup (argv[i]);
+
+ long_argv[argc] = (char *) 0;
+
+ long_envp = (char **) malloc (sizeof (char *) * 5);
+
+ for (i = 0; envp[i]; i++)
+ long_envp[i] = strdup (envp[i]);
+
+ long_envp[i] = (char *) 0;
+
+#pragma __pointer_size short
+
+ return main (argc, long_argv, long_envp);
+}
+#endif
diff --git a/gcc-4.2.1/gcc/config/alpha/vms-crt0.c b/gcc-4.2.1/gcc/config/alpha/vms-crt0.c
new file mode 100644
index 000000000..6161d1127
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms-crt0.c
@@ -0,0 +1,71 @@
+/* VMS crt0 returning VMS style condition codes .
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#if !defined(__DECC)
+You Lose! This file can only be compiled with DEC C.
+#else
+
+/* This file can only be compiled with DEC C, due to the call to
+ lib$establish. */
+
+#include <stdlib.h>
+#include <string.h>
+#include <ssdef.h>
+
+extern void decc$main ();
+
+extern int main ();
+
+static int
+handler (sigargs, mechargs)
+ void *sigargs;
+ void *mechargs;
+{
+ return SS$_RESIGNAL;
+}
+
+int
+__main (arg1, arg2, arg3, image_file_desc, arg5, arg6)
+ void *arg1, *arg2, *arg3;
+ void *image_file_desc;
+ void *arg5, *arg6;
+{
+ int argc;
+ char **argv;
+ char **envp;
+
+ lib$establish (handler);
+
+ decc$main(arg1, arg2, arg3, image_file_desc, arg5, arg6,
+ &argc, &argv, &envp);
+
+ return main (argc, argv, envp);
+}
+#endif
diff --git a/gcc-4.2.1/gcc/config/alpha/vms-dwarf2.asm b/gcc-4.2.1/gcc/config/alpha/vms-dwarf2.asm
new file mode 100644
index 000000000..ea62051fa
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms-dwarf2.asm
@@ -0,0 +1,82 @@
+/* VMS dwarf2 section sequentializer.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Linking with this file forces Dwarf2 debug sections to be
+ sequentially loaded by the VMS linker, enabling GDB to read them. */
+
+.section .debug_abbrev,NOWRT
+ .align 0
+ .globl $dwarf2.debug_abbrev
+$dwarf2.debug_abbrev:
+
+.section .debug_aranges,NOWRT
+ .align 0
+ .globl $dwarf2.debug_aranges
+$dwarf2.debug_aranges:
+
+.section .debug_frame,NOWRT
+ .align 0
+ .globl $dwarf2.debug_frame
+$dwarf2.debug_frame:
+
+.section .debug_info,NOWRT
+ .align 0
+ .globl $dwarf2.debug_info
+$dwarf2.debug_info:
+
+.section .debug_line,NOWRT
+ .align 0
+ .globl $dwarf2.debug_line
+$dwarf2.debug_line:
+
+.section .debug_loc,NOWRT
+ .align 0
+ .globl $dwarf2.debug_loc
+$dwarf2.debug_loc:
+
+.section .debug_macinfo,NOWRT
+ .align 0
+ .globl $dwarf2.debug_macinfo
+$dwarf2.debug_macinfo:
+
+.section .debug_pubnames,NOWRT
+ .align 0
+ .globl $dwarf2.debug_pubnames
+$dwarf2.debug_pubnames:
+
+.section .debug_str,NOWRT
+ .align 0
+ .globl $dwarf2.debug_str
+$dwarf2.debug_str:
+
+.section .debug_zzzzzz,NOWRT
+ .align 0
+ .globl $dwarf2.debug_zzzzzz
+$dwarf2.debug_zzzzzz:
diff --git a/gcc-4.2.1/gcc/config/alpha/vms-dwarf2eh.asm b/gcc-4.2.1/gcc/config/alpha/vms-dwarf2eh.asm
new file mode 100644
index 000000000..d9067e9d2
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms-dwarf2eh.asm
@@ -0,0 +1,37 @@
+/* VMS dwarf2 exception handling section sequentializer.
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Linking with this file forces the Dwarf2 EH section to be
+ individually loaded by the VMS linker an the unwinder to read it. */
+
+.section .eh_frame,NOWRT
+ .align 0
+ .global __EH_FRAME_BEGIN__
+__EH_FRAME_BEGIN__:
diff --git a/gcc-4.2.1/gcc/config/alpha/vms-ld.c b/gcc-4.2.1/gcc/config/alpha/vms-ld.c
new file mode 100644
index 000000000..026b762be
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms-ld.c
@@ -0,0 +1,764 @@
+/* VMS linker wrapper.
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
+ Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* This program is a wrapper around the VMS linker.
+ It translates Unix style command line options into corresponding
+ VMS style qualifiers and then spawns the VMS linker. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+
+typedef struct dsc {unsigned short len, mbz; char *adr; } Descr;
+
+#undef PATH_SEPARATOR
+#undef PATH_SEPARATOR_STR
+#define PATH_SEPARATOR ','
+#define PATH_SEPARATOR_STR ","
+
+/* Local variable declarations. */
+
+/* File specification for vms-dwarf2.o. */
+static char *vmsdwarf2spec = 0;
+
+/* File specification for vms-dwarf2eh.o. */
+static char *vmsdwarf2ehspec = 0;
+
+/* verbose = 1 if -v passed. */
+static int verbose = 0;
+
+/* save_temps = 1 if -save-temps passed. */
+static int save_temps = 0;
+
+/* By default don't generate executable file if there are errors
+ in the link. Override with --noinhibit-exec. */
+static int inhibit_exec = 1;
+
+/* debug = 1 if -g passed. */
+static int debug = 0;
+
+/* By default prefer to link with shareable image libraries.
+ Override with -static. */
+static int staticp = 0;
+
+/* By default generate an executable, not a shareable image library.
+ Override with -shared. */
+static int share = 0;
+
+/* Remember if IDENTIFICATION given on command line. */
+static int ident = 0;
+
+/* Keep track of arg translations. */
+static int link_arg_max = -1;
+static const char **link_args = 0;
+static int link_arg_index = -1;
+
+/* Keep track of filenames */
+static char optfilefullname [267];
+static char *sharefilename = 0;
+static char *exefilename = 0;
+
+/* System search dir list. Leave blank since link handles this
+ internally. */
+static char *system_search_dirs = "";
+
+/* Search dir list passed on command line (with -L). */
+static char *search_dirs;
+
+/* Local function declarations. */
+
+/* Add STR to the list of arguments to pass to the linker. Expand the list as
+ necessary to accommodate. */
+static void addarg (const char *);
+
+/* Check to see if NAME is a regular file, i.e. not a directory */
+static int is_regular_file (char *);
+
+/* Translate a Unix syntax file specification FILESPEC into VMS syntax.
+ If indicators of VMS syntax found, return input string. */
+static char *to_host_file_spec (char *);
+
+/* Locate the library named LIB_NAME in the set of paths PATH_VAL. */
+static char *locate_lib (char *, char *);
+
+/* Given a library name NAME, i.e. foo, Look for libfoo.lib and then
+ libfoo.a in the set of directories we are allowed to search in. */
+static const char *expand_lib (char *);
+
+/* Preprocess the number of args P_ARGC in ARGV.
+ Look for special flags, etc. that must be handled first. */
+static void preprocess_args (int *, char **);
+
+/* Preprocess the number of args P_ARGC in ARGV. Look for
+ special flags, etc. that must be handled for the VMS linker. */
+static void process_args (int *, char **);
+
+/* Action routine called by decc$to_vms. NAME is a file name or
+ directory name. TYPE is unused. */
+static int translate_unix (char *, int);
+
+int main (int, char **);
+
+static void
+addarg (const char *str)
+{
+ int i;
+
+ if (++link_arg_index >= link_arg_max)
+ {
+ const char **new_link_args
+ = (const char **) xcalloc (link_arg_max + 1000, sizeof (char *));
+
+ for (i = 0; i <= link_arg_max; i++)
+ new_link_args [i] = link_args [i];
+
+ if (link_args)
+ free (link_args);
+
+ link_arg_max += 1000;
+ link_args = new_link_args;
+ }
+
+ link_args [link_arg_index] = str;
+}
+
+static char *
+locate_lib (char *lib_name, char *path_val)
+{
+ int lib_len = strlen (lib_name);
+ char *eptr, *sptr;
+
+ for (sptr = path_val; *sptr; sptr = eptr)
+ {
+ char *buf, *ptr;
+
+ while (*sptr == PATH_SEPARATOR)
+ sptr ++;
+
+ eptr = strchr (sptr, PATH_SEPARATOR);
+ if (eptr == 0)
+ eptr = strchr (sptr, 0);
+
+ buf = alloca ((eptr-sptr) + lib_len + 4 + 2);
+ strncpy (buf, sptr, eptr-sptr);
+ buf [eptr-sptr] = 0;
+ strcat (buf, "/");
+ strcat (buf, lib_name);
+ ptr = strchr (buf, 0);
+
+ if (debug || staticp)
+ {
+ /* For debug or static links, look for shareable image libraries
+ last. */
+ strcpy (ptr, ".a");
+ if (is_regular_file (buf))
+ return xstrdup (to_host_file_spec (buf));
+
+ strcpy (ptr, ".olb");
+ if (is_regular_file (buf))
+ return xstrdup (to_host_file_spec (buf));
+
+ strcpy (ptr, ".exe");
+ if (is_regular_file (buf))
+ return xstrdup (to_host_file_spec (buf));
+ }
+ else
+ {
+ /* Otherwise look for shareable image libraries first. */
+ strcpy (ptr, ".exe");
+ if (is_regular_file (buf))
+ return xstrdup (to_host_file_spec (buf));
+
+ strcpy (ptr, ".a");
+ if (is_regular_file (buf))
+ return xstrdup (to_host_file_spec (buf));
+
+ strcpy (ptr, ".olb");
+ if (is_regular_file (buf))
+ return xstrdup (to_host_file_spec (buf));
+ }
+ }
+
+ return 0;
+}
+
+static const char *
+expand_lib (char *name)
+{
+ char *lib, *lib_path;
+
+ if (strcmp (name, "c") == 0)
+ /* IEEE VAX C compatible library for non-prefixed (e.g. no DECC$)
+ C RTL functions. */
+ return "sys$library:vaxcrtltx.olb";
+
+ else if (strcmp (name, "m") == 0)
+ /* No separate library for math functions */
+ return "";
+
+ else
+ {
+ lib = xmalloc (strlen (name) + 14);
+
+ strcpy (lib, "lib");
+ strcat (lib, name);
+ lib_path = locate_lib (lib, search_dirs);
+
+ if (lib_path)
+ return lib_path;
+ }
+
+ fprintf (stderr,
+ "Couldn't locate library: lib%s.exe, lib%s.a or lib%s.olb\n",
+ name, name, name);
+
+ exit (1);
+}
+
+static int
+is_regular_file (char *name)
+{
+ int ret;
+ struct stat statbuf;
+
+ ret = stat (name, &statbuf);
+ return !ret && S_ISREG (statbuf.st_mode);
+}
+
+static void
+preprocess_args (int *p_argc, char **argv)
+{
+ int i;
+
+ for (i = 1; i < *p_argc; i++)
+ if (strlen (argv[i]) >= 6 && strncmp (argv[i], "-shared", 7) == 0)
+ share = 1;
+
+ for (i = 1; i < *p_argc; i++)
+ if (strcmp (argv[i], "-o") == 0)
+ {
+ char *buff, *ptr;
+ int out_len;
+ int len;
+
+ i++;
+ ptr = to_host_file_spec (argv[i]);
+ exefilename = xstrdup (ptr);
+ out_len = strlen (ptr);
+ buff = xmalloc (out_len + 18);
+
+ if (share)
+ strcpy (buff, "/share=");
+ else
+ strcpy (buff, "/exe=");
+
+ strcat (buff, ptr);
+ addarg (buff);
+
+ if (share)
+ {
+ sharefilename = xmalloc (out_len+5);
+ if (ptr == strchr (argv[i], ']'))
+ strcpy (sharefilename, ++ptr);
+ else if (ptr == strchr (argv[i], ':'))
+ strcpy (sharefilename, ++ptr);
+ else if (ptr == strrchr (argv[i], '/'))
+ strcpy (sharefilename, ++ptr);
+ else
+ strcpy (sharefilename, argv[i]);
+
+ len = strlen (sharefilename);
+ if (strncasecmp (&sharefilename[len-4], ".exe", 4) == 0)
+ sharefilename[len-4] = 0;
+
+ for (ptr = sharefilename; *ptr; ptr++)
+ *ptr = TOUPPER (*ptr);
+ }
+ }
+}
+
+static void
+process_args (int *p_argc, char **argv)
+{
+ int i;
+
+ for (i = 1; i < *p_argc; i++)
+ {
+ if (strlen (argv[i]) < 2)
+ continue;
+
+ if (strncmp (argv[i], "-L", 2) == 0)
+ {
+ char *nbuff, *ptr;
+ int new_len, search_dirs_len;
+
+ ptr = &argv[i][2];
+ new_len = strlen (ptr);
+ search_dirs_len = strlen (search_dirs);
+
+ nbuff = xmalloc (new_len + 1);
+ strcpy (nbuff, ptr);
+
+ /* Remove trailing slashes. */
+ while (new_len > 1 && nbuff [new_len - 1] == '/')
+ {
+ nbuff [new_len - 1] = 0;
+ new_len--;
+ }
+
+ search_dirs = xrealloc (search_dirs, search_dirs_len + new_len + 2);
+ if (search_dirs_len > 0)
+ strcat (search_dirs, PATH_SEPARATOR_STR);
+
+ strcat (search_dirs, nbuff);
+ free (nbuff);
+ }
+
+ /* -v turns on verbose option here and is passed on to gcc. */
+ else if (strcmp (argv[i], "-v") == 0)
+ verbose = 1;
+ else if (strcmp (argv[i], "-g0") == 0)
+ addarg ("/notraceback");
+ else if (strncmp (argv[i], "-g", 2) == 0)
+ {
+ addarg ("/debug");
+ debug = 1;
+ }
+ else if (strcmp (argv[i], "-static") == 0)
+ staticp = 1;
+ else if (strcmp (argv[i], "-map") == 0)
+ {
+ char *buff, *ptr;
+
+ buff = xmalloc (strlen (exefilename) + 5);
+ strcpy (buff, exefilename);
+ ptr = strchr (buff, '.');
+ if (ptr)
+ *ptr = 0;
+
+ strcat (buff, ".map");
+ addarg ("/map=");
+ addarg (buff);
+ addarg ("/full");
+ }
+ else if (strcmp (argv[i], "-save-temps") == 0)
+ save_temps = 1;
+ else if (strcmp (argv[i], "--noinhibit-exec") == 0)
+ inhibit_exec = 0;
+ }
+}
+
+/* The main program. Spawn the VMS linker after fixing up the Unix-like flags
+ and args to be what the VMS linker wants. */
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ char cwdev [128], *devptr;
+ int devlen;
+ int optfd;
+ FILE *optfile;
+ char *cwd = getcwd (0, 1024);
+ char *optfilename;
+
+ devptr = strchr (cwd, ':');
+ devlen = (devptr - cwd) + 1;
+ strncpy (cwdev, cwd, devlen);
+ cwdev [devlen] = '\0';
+
+ search_dirs = xstrdup (system_search_dirs);
+
+ addarg ("link");
+
+ /* Pass to find args that have to be append first. */
+ preprocess_args (&argc , argv);
+
+ /* Pass to find the rest of the args. */
+ process_args (&argc , argv);
+
+ /* Create a temp file to hold args, otherwise we can easily exceed the VMS
+ command line length limits. */
+ optfilename = alloca (strlen ("LDXXXXXX") + 1);
+ strcpy (optfilename, "LDXXXXXX");
+ optfd = mkstemp (optfilename);
+ getcwd (optfilefullname, 256, 1); /* VMS style cwd. */
+ strcat (optfilefullname, optfilename);
+ strcat (optfilefullname, ".");
+ optfile = fdopen (optfd, "w");
+
+ /* Write out the IDENTIFICATION argument first so that it can be overridden
+ by an options file. */
+ for (i = 1; i < argc; i++)
+ {
+ int arg_len = strlen (argv[i]);
+
+ if (arg_len > 6 && strncasecmp (argv[i], "IDENT=", 6) == 0)
+ {
+ /* Comes from command line. If present will always appear before
+ IDENTIFICATION=... and will override. */
+
+ if (!ident)
+ ident = 1;
+ }
+ else if (arg_len > 15
+ && strncasecmp (argv[i], "IDENTIFICATION=", 15) == 0)
+ {
+ /* Comes from pragma Ident (). */
+
+ if (!ident)
+ {
+ fprintf (optfile, "case_sensitive=yes\n");
+ fprintf (optfile, "IDENTIFICATION=\"%15.15s\"\n", &argv[i][15]);
+ fprintf (optfile, "case_sensitive=NO\n");
+ ident = 1;
+ }
+ }
+ }
+
+ for (i = 1; i < argc; i++)
+ {
+ int arg_len = strlen (argv[i]);
+
+ if (strcmp (argv[i], "-o") == 0)
+ i++;
+ else if (arg_len > 2 && strncmp (argv[i], "-l", 2) == 0)
+ {
+ const char *libname = expand_lib (&argv[i][2]);
+ const char *ext;
+ int len;
+
+ if ((len = strlen (libname)) > 0)
+ {
+ char buff [256];
+
+ if (len > 4 && strcasecmp (&libname [len-4], ".exe") == 0)
+ ext = "/shareable";
+ else
+ ext = "/library";
+
+ if (libname[0] == '[')
+ sprintf (buff, "%s%s", cwdev, libname);
+ else
+ sprintf (buff, "%s", libname);
+
+ fprintf (optfile, "%s%s\n", buff, ext);
+ }
+ }
+
+ else if (strcmp (argv[i], "-v" ) == 0
+ || strncmp (argv[i], "-g", 2 ) == 0
+ || strcmp (argv[i], "-static" ) == 0
+ || strcmp (argv[i], "-map" ) == 0
+ || strcmp (argv[i], "-save-temps") == 0
+ || strcmp (argv[i], "--noinhibit-exec") == 0
+ || (arg_len > 2 && strncmp (argv[i], "-L", 2) == 0)
+ || (arg_len >= 6 && strncmp (argv[i], "-share", 6) == 0))
+ ;
+ else if (arg_len > 1 && argv[i][0] == '@')
+ {
+ FILE *atfile;
+ char *ptr, *ptr1;
+ struct stat statbuf;
+ char *buff;
+ int len;
+
+ if (stat (&argv[i][1], &statbuf))
+ {
+ fprintf (stderr, "Couldn't open linker response file: %s\n",
+ &argv[i][1]);
+ exit (1);
+ }
+
+ buff = xmalloc (statbuf.st_size + 1);
+ atfile = fopen (&argv[i][1], "r");
+ fgets (buff, statbuf.st_size + 1, atfile);
+ fclose (atfile);
+
+ len = strlen (buff);
+ if (buff [len - 1] == '\n')
+ {
+ buff [len - 1] = 0;
+ len--;
+ }
+
+ ptr = buff;
+
+ do
+ {
+ ptr1 = strchr (ptr, ' ');
+ if (ptr1)
+ *ptr1 = 0;
+ ptr = to_host_file_spec (ptr);
+ if (ptr[0] == '[')
+ fprintf (optfile, "%s%s\n", cwdev, ptr);
+ else
+ fprintf (optfile, "%s\n", ptr);
+ ptr = ptr1 + 1;
+ } while (ptr1);
+ }
+
+ /* Unix style file specs and VMS style switches look alike, so assume an
+ arg consisting of one and only one slash, and that being first, is
+ really a switch. */
+ else if ((argv[i][0] == '/') && (strchr (&argv[i][1], '/') == 0))
+ addarg (argv[i]);
+ else if (arg_len > 4
+ && strncasecmp (&argv[i][arg_len-4], ".OPT", 4) == 0)
+ {
+ FILE *optfile1;
+ char buff [256];
+
+ optfile1 = fopen (argv[i], "r");
+ while (fgets (buff, 256, optfile1))
+ fputs (buff, optfile);
+
+ fclose (optfile1);
+ }
+ else if (arg_len > 7 && strncasecmp (argv[i], "GSMATCH", 7) == 0)
+ fprintf (optfile, "%s\n", argv[i]);
+ else if (arg_len > 6 && strncasecmp (argv[i], "IDENT=", 6) == 0)
+ {
+ /* Comes from command line and will override pragma. */
+ fprintf (optfile, "case_sensitive=yes\n");
+ fprintf (optfile, "IDENT=\"%15.15s\"\n", &argv[i][6]);
+ fprintf (optfile, "case_sensitive=NO\n");
+ ident = 1;
+ }
+ else if (arg_len > 15
+ && strncasecmp (argv[i], "IDENTIFICATION=", 15) == 0)
+ ;
+ else
+ {
+ /* Assume filename arg. */
+ const char *addswitch = "";
+ char buff [256];
+ int buff_len;
+ int is_cld = 0;
+
+ argv[i] = to_host_file_spec (argv[i]);
+ arg_len = strlen (argv[i]);
+
+ if (arg_len > 4 && strcasecmp (&argv[i][arg_len-4], ".exe") == 0)
+ addswitch = "/shareable";
+
+ if (arg_len > 4 && strcasecmp (&argv[i][arg_len-4], ".cld") == 0)
+ {
+ addswitch = "/shareable";
+ is_cld = 1;
+ }
+
+ if (arg_len > 2 && strcasecmp (&argv[i][arg_len-2], ".a") == 0)
+ addswitch = "/lib";
+
+ if (arg_len > 4 && strcasecmp (&argv[i][arg_len-4], ".olb") == 0)
+ addswitch = "/lib";
+
+ if (argv[i][0] == '[')
+ sprintf (buff, "%s%s%s\n", cwdev, argv[i], addswitch);
+ else if (strchr (argv[i], ':'))
+ sprintf (buff, "%s%s\n", argv[i], addswitch);
+ else
+ sprintf (buff, "%s%s%s\n", cwd, argv[i], addswitch);
+
+ buff_len = strlen (buff);
+
+ if (buff_len >= 15
+ && strcasecmp (&buff[buff_len - 15], "vms-dwarf2eh.o\n") == 0)
+ vmsdwarf2ehspec = xstrdup (buff);
+ else if (buff_len >= 13
+ && strcasecmp (&buff[buff_len - 13],"vms-dwarf2.o\n") == 0)
+ vmsdwarf2spec = xstrdup (buff);
+ else if (is_cld)
+ {
+ addarg (buff);
+ addarg (",");
+ }
+ else
+ fprintf (optfile, buff);
+ }
+ }
+
+#if 0
+ if (share)
+ fprintf (optfile, "symbol_vector=(main=procedure)\n");
+#endif
+
+ if (vmsdwarf2ehspec)
+ {
+ fprintf (optfile, "case_sensitive=yes\n");
+ fprintf (optfile, "cluster=DWARF2eh,,,%s", vmsdwarf2ehspec);
+ fprintf (optfile, "collect=DWARF2eh,eh_frame\n");
+ fprintf (optfile, "case_sensitive=NO\n");
+ }
+
+ if (debug && vmsdwarf2spec)
+ {
+ fprintf (optfile, "case_sensitive=yes\n");
+ fprintf (optfile, "cluster=DWARF2debug,,,%s", vmsdwarf2spec);
+ fprintf (optfile, "collect=DWARF2debug,debug_abbrev,debug_aranges,-\n");
+ fprintf (optfile, " debug_frame,debug_info,debug_line,debug_loc,-\n");
+ fprintf (optfile, " debug_macinfo,debug_pubnames,debug_str,-\n");
+ fprintf (optfile, " debug_zzzzzz\n");
+ fprintf (optfile, "case_sensitive=NO\n");
+ }
+
+ if (debug && share)
+ {
+ fprintf (optfile, "case_sensitive=yes\n");
+ fprintf (optfile, "symbol_vector=(-\n");
+ fprintf (optfile,
+ "%s$DWARF2.DEBUG_ABBREV/$dwarf2.debug_abbrev=DATA,-\n",
+ sharefilename);
+ fprintf (optfile,
+ "%s$DWARF2.DEBUG_ARANGES/$dwarf2.debug_aranges=DATA,-\n",
+ sharefilename);
+ fprintf (optfile, "%s$DWARF2.DEBUG_FRAME/$dwarf2.debug_frame=DATA,-\n",
+ sharefilename);
+ fprintf (optfile, "%s$DWARF2.DEBUG_INFO/$dwarf2.debug_info=DATA,-\n",
+ sharefilename);
+ fprintf (optfile, "%s$DWARF2.DEBUG_LINE/$dwarf2.debug_line=DATA,-\n",
+ sharefilename);
+ fprintf (optfile, "%s$DWARF2.DEBUG_LOC/$dwarf2.debug_loc=DATA,-\n",
+ sharefilename);
+ fprintf (optfile,
+ "%s$DWARF2.DEBUG_MACINFO/$dwarf2.debug_macinfo=DATA,-\n",
+ sharefilename);
+ fprintf (optfile,
+ "%s$DWARF2.DEBUG_PUBNAMES/$dwarf2.debug_pubnames=DATA,-\n",
+ sharefilename);
+ fprintf (optfile, "%s$DWARF2.DEBUG_STR/$dwarf2.debug_str=DATA,-\n",
+ sharefilename);
+ fprintf (optfile, "%s$DWARF2.DEBUG_ZZZZZZ/$dwarf2.debug_zzzzzz=DATA)\n",
+ sharefilename);
+ fprintf (optfile, "case_sensitive=NO\n");
+ }
+
+ fclose (optfile);
+ addarg (optfilefullname);
+ addarg ("/opt");
+
+ addarg (NULL);
+
+ if (verbose)
+ {
+ int i;
+
+ for (i = 0; i < link_arg_index; i++)
+ printf ("%s ", link_args [i]);
+ putchar ('\n');
+ }
+
+ {
+ int i;
+ int len = 0;
+
+ for (i = 0; link_args[i]; i++)
+ len = len + strlen (link_args[i]) + 1;
+
+ {
+ char *allargs = (char *) alloca (len + 1);
+ Descr cmd;
+ int status;
+ int status1 = 1;
+
+ for (i = 0; i < len + 1; i++)
+ allargs [i] = 0;
+
+ for (i = 0; link_args [i]; i++)
+ {
+ strcat (allargs, link_args [i]);
+ strcat (allargs, " ");
+ }
+
+ cmd.adr = allargs;
+ cmd.len = len;
+ cmd.mbz = 0;
+
+ i = LIB$SPAWN (&cmd, 0, 0, 0, 0, 0, &status);
+ if ((i & 1) != 1)
+ {
+ LIB$SIGNAL (i);
+ exit (1);
+ }
+
+ if (debug && !share)
+ {
+ strcpy (allargs, "@gnu:[bin]set_exe ");
+ strcat (allargs, exefilename);
+ strcat (allargs, " /nodebug /silent");
+ len = strlen (allargs);
+ cmd.adr = allargs;
+ cmd.len = len;
+ cmd.mbz = 0;
+
+ if (verbose)
+ printf (allargs);
+
+ i = LIB$SPAWN (&cmd, 0, 0, 0, 0, 0, &status1);
+
+ if ((i & 1) != 1)
+ {
+ LIB$SIGNAL (i);
+ exit (1);
+ }
+ }
+
+ if (!save_temps)
+ remove (optfilefullname);
+
+ if ((status & 1) == 1 && (status1 & 1) == 1)
+ exit (0);
+
+ if (exefilename && inhibit_exec == 1)
+ remove (exefilename);
+
+ exit (1);
+ }
+ }
+}
+
+static char new_host_filespec [255];
+static char filename_buff [256];
+
+static int
+translate_unix (char *name, int type ATTRIBUTE_UNUSED)
+{
+ strcpy (filename_buff, name);
+ return 0;
+}
+
+static char *
+to_host_file_spec (char *filespec)
+{
+ strcpy (new_host_filespec, "");
+ if (strchr (filespec, ']') || strchr (filespec, ':'))
+ strcpy (new_host_filespec, filespec);
+ else
+ {
+ decc$to_vms (filespec, translate_unix, 1, 1);
+ strcpy (new_host_filespec, filename_buff);
+ }
+
+ return new_host_filespec;
+}
diff --git a/gcc-4.2.1/gcc/config/alpha/vms-psxcrt0-64.c b/gcc-4.2.1/gcc/config/alpha/vms-psxcrt0-64.c
new file mode 100644
index 000000000..02c77019a
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms-psxcrt0-64.c
@@ -0,0 +1,128 @@
+/* VMS 64bit crt0 returning Unix style condition codes .
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#if !defined(__DECC)
+You Lose! This file can only be compiled with DEC C.
+#else
+
+/* This file can only be compiled with DEC C, due to the call to
+ lib$establish and the pragmas pointer_size. */
+
+#pragma __pointer_size short
+
+#include <stdlib.h>
+#include <string.h>
+#include <ssdef.h>
+#include <stsdef.h>
+#include <errnodef.h>
+
+extern void decc$main ();
+extern int main ();
+
+static int
+handler (sigargs, mechargs)
+ void *sigargs;
+ void *mechargs;
+{
+ return SS$_RESIGNAL;
+}
+
+int
+__main (arg1, arg2, arg3, image_file_desc, arg5, arg6)
+ void *arg1, *arg2, *arg3;
+ void *image_file_desc;
+ void *arg5, *arg6)
+{
+ int argc;
+ char **argv;
+ char **envp;
+
+#pragma __pointer_size long
+
+ int i;
+ char **long_argv;
+ char **long_envp;
+ int status;
+
+#pragma __pointer_size short
+
+ lib$establish (handler);
+ decc$main (arg1, arg2, arg3, image_file_desc,
+ arg5, arg6, &argc, &argv, &envp);
+
+#pragma __pointer_size long
+
+ /* Reallocate argv with 64 bit pointers. */
+ long_argv = (char **) malloc (sizeof (char *) * (argc + 1));
+
+ for (i = 0; i < argc; i++)
+ long_argv[i] = strdup (argv[i]);
+
+ long_argv[argc] = (char *) 0;
+
+ long_envp = (char **) malloc (sizeof (char *) * 5);
+
+ for (i = 0; envp[i]; i++)
+ long_envp[i] = strdup (envp[i]);
+
+ long_envp[i] = (char *) 0;
+
+#pragma __pointer_size short
+
+ status = main (argc, long_argv, long_envp);
+
+ /* Map into a range of 0 - 255. */
+ status = status & 255;
+
+ if (status > 0)
+ {
+ int save_status = status;
+
+ status = C$_EXIT1 + ((status - 1) << STS$V_MSG_NO);
+
+ /* An exit failure status requires a "severe" error. All status values
+ are defined in errno with a successful (1) severity but can be
+ changed to an error (2) severity by adding 1. In addition for
+ compatibility with UNIX exit() routines we inhibit a run-time error
+ message from being generated on exit(1). */
+
+ if (save_status == 1)
+ {
+ status++;
+ status |= STS$M_INHIB_MSG;
+ }
+ }
+
+ if (status == 0)
+ status = SS$_NORMAL;
+
+ return status;
+}
+#endif
diff --git a/gcc-4.2.1/gcc/config/alpha/vms-psxcrt0.c b/gcc-4.2.1/gcc/config/alpha/vms-psxcrt0.c
new file mode 100644
index 000000000..a4c2349ba
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms-psxcrt0.c
@@ -0,0 +1,99 @@
+/* VMS crt0 returning Unix style condition codes .
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#if !defined(__DECC)
+You Lose! This file can only be compiled with DEC C.
+#else
+
+/* This file can only be compiled with DEC C, due to the call to
+ lib$establish. */
+
+#include <stdlib.h>
+#include <string.h>
+#include <ssdef.h>
+#include <stsdef.h>
+#include <errnodef.h>
+
+extern void decc$main ();
+extern int main ();
+
+static int
+handler (sigargs, mechargs)
+ void *sigargs;
+ void *mechargs;
+{
+ return SS$_RESIGNAL;
+}
+
+int
+__main (arg1, arg2, arg3, image_file_desc, arg5, arg6)
+ void *arg1, *arg2, *arg3;
+ void *image_file_desc;
+ void *arg5, *arg6;
+{
+ int argc;
+ char **argv;
+ char **envp;
+ int status;
+
+ lib$establish (handler);
+ decc$main (arg1, arg2, arg3, image_file_desc, arg5, arg6,
+ &argc, &argv, &envp);
+
+ status = main (argc, argv, envp);
+
+ /* Map into a range of 0 - 255. */
+ status = status & 255;
+
+ if (status > 0)
+ {
+ int save_status = status;
+
+ status = C$_EXIT1 + ((status - 1) << STS$V_MSG_NO);
+
+ /* An exit failure status requires a "severe" error
+ All status values are defined in errno with a successful
+ (1) severity but can be changed to an error (2) severity by adding 1.
+ In addition for compatibility with UNIX exit() routines we inhibit
+ a run-time error message from being generated on exit(1). */
+
+ if (save_status == 1)
+ {
+ status++;
+ status |= STS$M_INHIB_MSG;
+ }
+ }
+
+ if (status == 0)
+ status = SS$_NORMAL;
+
+ return status;
+}
+#endif
diff --git a/gcc-4.2.1/gcc/config/alpha/vms-unwind.h b/gcc-4.2.1/gcc/config/alpha/vms-unwind.h
new file mode 100644
index 000000000..4fb6a198f
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms-unwind.h
@@ -0,0 +1,72 @@
+/* DWARF2 EH unwinding support for Alpha VMS.
+ Copyright (C) 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include <pdscdef.h>
+
+#define MD_FALLBACK_FRAME_STATE_FOR alpha_fallback_frame_state
+
+static _Unwind_Reason_Code
+alpha_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ PDSCDEF *pv = *((PDSCDEF **) context->reg [29]);
+
+ if (pv && ((long) pv & 0x7) == 0) /* low bits 0 means address */
+ pv = *(PDSCDEF **) pv;
+
+ if (pv && ((pv->pdsc$w_flags & 0xf) == PDSC$K_KIND_FP_STACK))
+ {
+ int i, j;
+
+ fs->cfa_offset = pv->pdsc$l_size;
+ fs->cfa_reg = pv->pdsc$w_flags & PDSC$M_BASE_REG_IS_FP ? 29 : 30;
+ fs->retaddr_column = 26;
+ fs->cfa_how = CFA_REG_OFFSET;
+ fs->regs.reg[27].loc.offset = -pv->pdsc$l_size;
+ fs->regs.reg[27].how = REG_SAVED_OFFSET;
+ fs->regs.reg[26].loc.offset
+ = -(pv->pdsc$l_size - pv->pdsc$w_rsa_offset);
+ fs->regs.reg[26].how = REG_SAVED_OFFSET;
+
+ for (i = 0, j = 0; i < 32; i++)
+ if (1<<i & pv->pdsc$l_ireg_mask)
+ {
+ fs->regs.reg[i].loc.offset
+ = -(pv->pdsc$l_size - pv->pdsc$w_rsa_offset - 8 * ++j);
+ fs->regs.reg[i].how = REG_SAVED_OFFSET;
+ }
+
+ return _URC_NO_REASON;
+ }
+ else if (pv && ((pv->pdsc$w_flags & 0xf) == PDSC$K_KIND_FP_REGISTER))
+ {
+ fs->cfa_offset = pv->pdsc$l_size;
+ fs->cfa_reg = pv->pdsc$w_flags & PDSC$M_BASE_REG_IS_FP ? 29 : 30;
+ fs->retaddr_column = 26;
+ fs->cfa_how = CFA_REG_OFFSET;
+ fs->regs.reg[26].loc.reg = pv->pdsc$b_save_ra;
+ fs->regs.reg[26].how = REG_SAVED_REG;
+ fs->regs.reg[29].loc.reg = pv->pdsc$b_save_fp;
+ fs->regs.reg[29].how = REG_SAVED_REG;
+
+ return _URC_NO_REASON;
+ }
+ return _URC_END_OF_STACK;
+}
diff --git a/gcc-4.2.1/gcc/config/alpha/vms.h b/gcc-4.2.1/gcc/config/alpha/vms.h
new file mode 100644
index 000000000..835458771
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms.h
@@ -0,0 +1,369 @@
+/* Output variables, constants and external declarations, for GNU compiler.
+ Copyright (C) 1996, 1997, 1998, 2000, 2001, 2002, 2004, 2005
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#define TARGET_OBJECT_SUFFIX ".obj"
+#define TARGET_EXECUTABLE_SUFFIX ".exe"
+
+/* This enables certain macros in alpha.h, which will make an indirect
+ reference to an external symbol an invalid address. This needs to be
+ defined before we include alpha.h, since it determines which macros
+ are used for GO_IF_*. */
+
+#define NO_EXTERNAL_INDIRECT_ADDRESS
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define_std ("vms"); \
+ builtin_define_std ("VMS"); \
+ builtin_define ("__ALPHA"); \
+ builtin_assert ("system=vms"); \
+ if (TARGET_FLOAT_VAX) \
+ builtin_define ("__G_FLOAT"); \
+ else \
+ builtin_define ("__IEEE_FLOAT"); \
+ } while (0)
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_FPREGS|MASK_GAS)
+#undef TARGET_ABI_OPEN_VMS
+#define TARGET_ABI_OPEN_VMS 1
+
+#undef TARGET_NAME
+#define TARGET_NAME "OpenVMS/Alpha"
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (%s)", TARGET_NAME);
+
+#undef PCC_STATIC_STRUCT_RETURN
+
+/* "long" is 32 bits, but 64 bits for Ada. */
+#undef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE 32
+#define ADA_LONG_TYPE_SIZE 64
+
+/* Pointer is 32 bits but the hardware has 64-bit addresses, sign extended. */
+#undef POINTER_SIZE
+#define POINTER_SIZE 32
+#define POINTERS_EXTEND_UNSIGNED 0
+
+#define MAX_OFILE_ALIGNMENT 524288 /* 8 x 2^16 by DEC Ada Test CD40VRA */
+
+#undef FIXED_REGISTERS
+#define FIXED_REGISTERS \
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }
+
+#undef CALL_USED_REGISTERS
+#define CALL_USED_REGISTERS \
+ {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }
+
+/* List the order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS.
+
+ We allocate in the following order:
+ $f1 (nonsaved floating-point register)
+ $f10-$f15 (likewise)
+ $f22-$f30 (likewise)
+ $f21-$f16 (likewise, but input args)
+ $f0 (nonsaved, but return value)
+ $f2-$f9 (saved floating-point registers)
+ $1 (nonsaved integer registers)
+ $22-$25 (likewise)
+ $28 (likewise)
+ $0 (likewise, but return value)
+ $21-$16 (likewise, but input args)
+ $27 (procedure value in OSF, nonsaved in NT)
+ $2-$8 (saved integer registers)
+ $9-$14 (saved integer registers)
+ $26 (return PC)
+ $15 (frame pointer)
+ $29 (global pointer)
+ $30, $31, $f31 (stack pointer and always zero/ap & fp) */
+
+#undef REG_ALLOC_ORDER
+#define REG_ALLOC_ORDER \
+ {33, \
+ 42, 43, 44, 45, 46, 47, \
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, \
+ 53, 52, 51, 50, 49, 48, \
+ 32, \
+ 34, 35, 36, 37, 38, 39, 40, 41, \
+ 1, \
+ 22, 23, 24, 25, \
+ 28, \
+ 0, \
+ 21, 20, 19, 18, 17, 16, \
+ 27, \
+ 2, 3, 4, 5, 6, 7, 8, \
+ 9, 10, 11, 12, 13, 14, \
+ 26, \
+ 15, \
+ 29, \
+ 30, 31, 63 }
+
+#undef HARD_FRAME_POINTER_REGNUM
+#define HARD_FRAME_POINTER_REGNUM 29
+
+/* Define registers used by the epilogue and return instruction. */
+#undef EPILOGUE_USES
+#define EPILOGUE_USES(REGNO) ((REGNO) == 26 || (REGNO) == 29)
+
+#undef CAN_ELIMINATE
+#define CAN_ELIMINATE(FROM, TO) \
+((TO) != STACK_POINTER_REGNUM || ! alpha_using_fp ())
+
+#undef INITIAL_ELIMINATION_OFFSET
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ switch (FROM) \
+ { \
+ case FRAME_POINTER_REGNUM: \
+ (OFFSET) = alpha_sa_size () + alpha_pv_save_size (); \
+ break; \
+ case ARG_POINTER_REGNUM: \
+ (OFFSET) = (ALPHA_ROUND (alpha_sa_size () + alpha_pv_save_size () \
+ + get_frame_size () \
+ + current_function_pretend_args_size) \
+ - current_function_pretend_args_size); \
+ break; \
+ default: \
+ gcc_unreachable (); \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += ALPHA_ROUND (current_function_outgoing_args_size); \
+}
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On Alpha/VMS, this is a structure that contains the number of
+ arguments and, for each argument, the datatype of that argument.
+
+ The number of arguments is a number of words of arguments scanned so far.
+ Thus 6 or more means all following args should go on the stack. */
+
+enum avms_arg_type {I64, FF, FD, FG, FS, FT};
+typedef struct {int num_args; enum avms_arg_type atypes[6];} avms_arg_info;
+
+#undef CUMULATIVE_ARGS
+#define CUMULATIVE_ARGS avms_arg_info
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#undef INIT_CUMULATIVE_ARGS
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ (CUM).num_args = 0; \
+ (CUM).atypes[0] = (CUM).atypes[1] = (CUM).atypes[2] = I64; \
+ (CUM).atypes[3] = (CUM).atypes[4] = (CUM).atypes[5] = I64;
+
+#undef FUNCTION_ARG_ADVANCE
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ if (targetm.calls.must_pass_in_stack (MODE, TYPE)) \
+ (CUM).num_args += 6; \
+ else \
+ { \
+ if ((CUM).num_args < 6) \
+ (CUM).atypes[(CUM).num_args] = alpha_arg_type (MODE); \
+ \
+ (CUM).num_args += ALPHA_ARG_SIZE (MODE, TYPE, NAMED); \
+ }
+
+/* ABI has stack checking, but it's broken. */
+#undef STACK_CHECK_BUILTIN
+#define STACK_CHECK_BUILTIN 0
+
+#define READONLY_DATA_SECTION_ASM_OP "\t.rdata"
+#define CTORS_SECTION_ASM_OP "\t.ctors"
+#define DTORS_SECTION_ASM_OP "\t.dtors"
+
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) gcc_unreachable ()
+
+#undef ASM_OUTPUT_ADDR_VEC_ELT
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\t.quad $L%d\n", (VALUE))
+
+#undef CASE_VECTOR_MODE
+#define CASE_VECTOR_MODE DImode
+#undef CASE_VECTOR_PC_RELATIVE
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,TABLEINSN) \
+{ ASM_OUTPUT_ALIGN (FILE, 3); (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); }
+
+/* This says how to output assembler code to declare an
+ uninitialized external linkage data object. */
+
+#define COMMON_ASM_OP "\t.comm\t"
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n", (SIZE), (ALIGN) / BITS_PER_UNIT); \
+} while (0)
+
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ The trampoline should set the static chain pointer to value placed
+ into the trampoline and should branch to the specified routine.
+ Note that $27 has been set to the address of the trampoline, so we can
+ use it for addressability of the two data items. */
+
+#undef TRAMPOLINE_TEMPLATE
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf (FILE, "\t.quad 0\n"); \
+ fprintf (FILE, "\t.linkage __tramp\n"); \
+ fprintf (FILE, "\t.quad 0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#undef TRAMPOLINE_SIZE
+#define TRAMPOLINE_SIZE 32
+
+/* The alignment of a trampoline, in bits. */
+
+#undef TRAMPOLINE_ALIGNMENT
+#define TRAMPOLINE_ALIGNMENT 64
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#undef INITIALIZE_TRAMPOLINE
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+ alpha_initialize_trampoline (TRAMP, FNADDR, CXT, 16, 24, -1)
+
+/* Control how constructors and destructors are emitted. */
+#define TARGET_ASM_CONSTRUCTOR vms_asm_out_constructor
+#define TARGET_ASM_DESTRUCTOR vms_asm_out_destructor
+
+#undef SDB_DEBUGGING_INFO
+#undef MIPS_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+
+#define DWARF2_DEBUGGING_INFO 1
+#define VMS_DEBUGGING_INFO 1
+
+#define DWARF2_UNWIND_INFO 1
+
+#undef EH_RETURN_HANDLER_RTX
+#define EH_RETURN_HANDLER_RTX \
+ gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 8))
+
+#define LINK_EH_SPEC "vms-dwarf2eh.o%s "
+
+#define MD_UNWIND_SUPPORT "config/alpha/vms-unwind.h"
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ fprintf (FILE, "\t.align %d\n", LOG);
+
+/* Switch into a generic section. */
+#define TARGET_ASM_NAMED_SECTION vms_asm_named_section
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t.literals\n"); \
+ in_section = NULL; \
+ fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE VMS_AND_DWARF2_DEBUG
+
+#define ASM_PN_FORMAT "%s___%lu"
+
+/* ??? VMS uses different linkage. */
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+
+#undef ASM_SPEC
+#undef ASM_FINAL_SPEC
+
+/* The VMS convention is to always provide minimal debug info
+ for a traceback unless specifically overridden. Defaulting this here
+ is a kludge. */
+
+#define OPTIMIZATION_OPTIONS(OPTIMIZE, OPTIMIZE_SIZE) \
+{ \
+ write_symbols = VMS_DEBUG; \
+ debug_info_level = (enum debug_info_level) 1; \
+}
+
+/* Override traceback debug info on -g0. */
+#undef OVERRIDE_OPTIONS
+#define OVERRIDE_OPTIONS \
+{ \
+ if (write_symbols == NO_DEBUG) \
+ debug_info_level = (enum debug_info_level) 0; \
+ override_options (); \
+}
+
+/* Link with vms-dwarf2.o if -g (except -g0). This causes the
+ VMS link to pull all the dwarf2 debug sections together. */
+#undef LINK_SPEC
+#define LINK_SPEC "%{g:-g vms-dwarf2.o%s} %{g0} %{g1:-g1 vms-dwarf2.o%s} \
+%{g2:-g2 vms-dwarf2.o%s} %{g3:-g3 vms-dwarf2.o%s} %{shared} %{v} %{map}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:%{mvms-return-codes:vcrt0.o%s} \
+%{!mvms-return-codes:pcrt0.o%s}}"
+
+#undef LIB_SPEC
+#define LIB_SPEC "-lc"
+
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define MD_EXEC_PREFIX "/gnu/lib/gcc-lib/"
+#define MD_STARTFILE_PREFIX "/gnu/lib/gcc-lib/"
+
+/* Specify the list of include file directories. */
+#define INCLUDE_DEFAULTS \
+{ \
+ { "/gnu/lib/gcc-lib/include", 0, 0, 0 }, \
+ { "/gnu_gxx_include", 0, 1, 1 }, \
+ { "/gnu_cc_include", 0, 0, 0 }, \
+ { "/gnu/include", 0, 0, 0 }, \
+ { 0, 0, 0, 0 } \
+}
+
+#define LONGLONG_STANDALONE 1
diff --git a/gcc-4.2.1/gcc/config/alpha/vms64.h b/gcc-4.2.1/gcc/config/alpha/vms64.h
new file mode 100644
index 000000000..6fd30cdc1
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms64.h
@@ -0,0 +1,32 @@
+/* Output variables, constants and external declarations, for GNU compiler.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Douglas Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Defaults to BITS_PER_WORD, e.g. 64 which is what is wanted.
+ This is incompatible with DEC C, but matches DEC Ada */
+#undef LONG_TYPE_SIZE
+
+/* Defaults to "long int" */
+#undef SIZE_TYPE
+#undef PTRDIFF_TYPE
+
+#undef POINTERS_EXTEND_UNSIGNED
+#undef POINTER_SIZE
+#define POINTER_SIZE 64
diff --git a/gcc-4.2.1/gcc/config/alpha/vms_tramp.asm b/gcc-4.2.1/gcc/config/alpha/vms_tramp.asm
new file mode 100644
index 000000000..d197521a8
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/vms_tramp.asm
@@ -0,0 +1,52 @@
+/* VMS trampoline for nested functions
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+;# Alpha OpenVMS trampoline
+;#
+ .set noreorder
+ .set volatile
+ .set noat
+ .file 1 "vms_tramp.asm"
+.text
+ .align 3
+ .globl __tramp
+ .ent __tramp
+__tramp..en:
+
+.link
+ .align 3
+__tramp:
+ .pdesc __tramp..en,null
+.text
+ ldq $1,24($27)
+ ldq $27,16($27)
+ ldq $28,8($27)
+ jmp $31,($28),0
+ .end __tramp
diff --git a/gcc-4.2.1/gcc/config/alpha/x-vms b/gcc-4.2.1/gcc/config/alpha/x-vms
new file mode 100644
index 000000000..053263a60
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/x-vms
@@ -0,0 +1,21 @@
+# Under VMS, directory names cannot contain dots.
+version:=$(shell echo $(BASEVER_c) | sed -e 's/\./_/g')
+
+libsubdir=$(libdir)/gcc-lib
+
+# Rules for linker and compiler wrappers. These are only useful on
+# a VMS host.
+EXTRA_PROGRAMS=ld.exe decc.exe
+vms-ld.o : $(srcdir)/config/alpha/vms-ld.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< $(OUTPUT_OPTION)
+ld.exe : vms-ld.o
+ $(CC) -o $@ vms-ld.o ../libiberty/libiberty.a
+
+vms-cc.o : $(srcdir)/config/alpha/vms-cc.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< $(OUTPUT_OPTION)
+decc.exe : vms-cc.o
+ $(CC) -o $@ vms-cc.o ../libiberty/libiberty.a
+
+# These extra parts can only be compiled on a VMS host and are only needed
+# on a VMS target. The rules are in t-vms.
+VMS_EXTRA_PARTS=vcrt0.o pcrt0.o
diff --git a/gcc-4.2.1/gcc/config/alpha/xm-vms.h b/gcc-4.2.1/gcc/config/alpha/xm-vms.h
new file mode 100644
index 000000000..6c418985d
--- /dev/null
+++ b/gcc-4.2.1/gcc/config/alpha/xm-vms.h
@@ -0,0 +1,57 @@
+/* Configuration for GNU C-compiler for openVMS/Alpha.
+ Copyright (C) 1996, 1997, 2001, 2004 Free Software Foundation, Inc.
+ Contributed by Klaus Kaempf (kkaempf@progis.de).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* A couple of conditionals for execution machine are controlled here. */
+#ifndef VMS
+#define VMS
+#endif
+
+/* Define a local equivalent (sort of) for unlink */
+#define unlink remove
+
+/* Causes exit() to be redefined to __posix_exit() and
+ Posix compatible failure and success codes to be used */
+#define _POSIX_EXIT 1
+
+/* Open files in stream mode if not otherwise explicitly specified */
+#define __UNIX_FOPEN 1
+
+/* Write to stdout using fputc to avoid record terminators in pipes */
+#define __UNIX_FWRITE 1
+
+#define STDC_HEADERS 1
+
+#define HOST_EXECUTABLE_SUFFIX ".exe"
+#define HOST_OBJECT_SUFFIX ".obj"
+
+#define DUMPFILE_FORMAT "_%02d_"
+
+#define DELETE_IF_ORDINARY(NAME,ST,VERBOSE_FLAG) \
+do \
+ { \
+ while (stat (NAME, &ST) >= 0 && S_ISREG (ST.st_mode)) \
+ if (unlink (NAME) < 0) \
+ { \
+ if (VERBOSE_FLAG) \
+ perror_with_name (NAME); \
+ break; \
+ } \
+ } while (0)