diff options
Diffstat (limited to 'gcc-4.9/gcc/config/alpha')
24 files changed, 19978 insertions, 0 deletions
diff --git a/gcc-4.9/gcc/config/alpha/alpha-modes.def b/gcc-4.9/gcc/config/alpha/alpha-modes.def new file mode 100644 index 000000000..dbfbed0b7 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/alpha-modes.def @@ -0,0 +1,27 @@ +/* Alpha extra machine modes. + Copyright (C) 2003-2014 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +/* 128-bit floating point. This gets reset in alpha_option_override + if VAX float format is in use. */ +FLOAT_MODE (TF, 16, ieee_quad_format); + +/* Vector modes. */ +VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */ +VECTOR_MODE (INT, QI, 4); /* V4QI */ +VECTOR_MODE (INT, QI, 2); /* V2QI */ diff --git a/gcc-4.9/gcc/config/alpha/alpha-protos.h b/gcc-4.9/gcc/config/alpha/alpha-protos.h new file mode 100644 index 000000000..753a762a5 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/alpha-protos.h @@ -0,0 +1,117 @@ +/* Prototypes for alpha.c functions used in the md file & elsewhere. + Copyright (C) 1999-2014 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +extern int alpha_next_sequence_number; + +extern void literal_section (void); +extern int zap_mask (HOST_WIDE_INT); +extern int direct_return (void); + +extern int alpha_sa_size (void); +extern HOST_WIDE_INT alpha_initial_elimination_offset (unsigned int, + unsigned int); +extern void alpha_expand_prologue (void); +extern void alpha_expand_epilogue (void); +extern void alpha_output_filename (FILE *, const char *); + +extern bool alpha_legitimate_constant_p (enum machine_mode, rtx); +extern rtx alpha_legitimize_reload_address (rtx, enum machine_mode, + int, int, int); + +extern rtx split_small_symbolic_operand (rtx); + +extern void get_aligned_mem (rtx, rtx *, rtx *); +extern rtx get_unaligned_address (rtx); +extern rtx get_unaligned_offset (rtx, HOST_WIDE_INT); +extern enum reg_class alpha_preferred_reload_class (rtx, enum reg_class); + +extern void alpha_set_memflags (rtx, rtx); +extern bool alpha_split_const_mov (enum machine_mode, rtx *); +extern bool alpha_expand_mov (enum machine_mode, rtx *); +extern bool alpha_expand_mov_nobwx (enum machine_mode, rtx *); +extern void alpha_expand_movmisalign (enum machine_mode, rtx *); +extern void alpha_emit_floatuns (rtx[]); +extern rtx alpha_emit_conditional_move (rtx, enum machine_mode); +extern void alpha_split_tmode_pair (rtx[], enum machine_mode, bool); +extern void alpha_split_tfmode_frobsign (rtx[], rtx (*)(rtx, rtx, rtx)); +extern void alpha_expand_unaligned_load (rtx, rtx, HOST_WIDE_INT, + HOST_WIDE_INT, int); +extern void alpha_expand_unaligned_store (rtx, rtx, HOST_WIDE_INT, + HOST_WIDE_INT); +extern int alpha_expand_block_move (rtx []); +extern int alpha_expand_block_clear (rtx []); +extern rtx alpha_expand_zap_mask (HOST_WIDE_INT); +extern void alpha_expand_builtin_vector_binop (rtx (*)(rtx, rtx, rtx), + enum machine_mode, + rtx, rtx, rtx); +extern void alpha_expand_builtin_establish_vms_condition_handler (rtx, rtx); +extern void alpha_expand_builtin_revert_vms_condition_handler (rtx); + +extern rtx alpha_return_addr (int, rtx); +extern rtx alpha_gp_save_rtx (void); +extern void print_operand (FILE *, rtx, int); +extern void print_operand_address (FILE *, rtx); +extern void alpha_initialize_trampoline (rtx, rtx, rtx, int, int, int); + +extern rtx alpha_va_arg (tree, tree); +extern rtx function_value (const_tree, const_tree, enum machine_mode); + +extern void alpha_start_function (FILE *, const char *, tree); +extern void alpha_end_function (FILE *, const char *, tree); + +extern int alpha_find_lo_sum_using_gp (rtx); + +#ifdef REAL_VALUE_TYPE +extern int check_float_value (enum machine_mode, REAL_VALUE_TYPE *, int); +#endif + +#ifdef RTX_CODE +extern void alpha_emit_conditional_branch (rtx[], enum machine_mode); +extern bool alpha_emit_setcc (rtx[], enum machine_mode); +extern int alpha_split_conditional_move (enum rtx_code, rtx, rtx, rtx, rtx); +extern void alpha_emit_xfloating_arith (enum rtx_code, rtx[]); +extern void alpha_emit_xfloating_cvt (enum rtx_code, rtx[]); +extern void alpha_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, + enum memmodel); +extern void alpha_split_compare_and_swap (rtx op[]); +extern void alpha_expand_compare_and_swap_12 (rtx op[]); +extern void alpha_split_compare_and_swap_12 (rtx op[]); +extern void alpha_split_atomic_exchange (rtx op[]); +extern void alpha_expand_atomic_exchange_12 (rtx op[]); +extern void alpha_split_atomic_exchange_12 (rtx op[]); +#endif + +extern rtx alpha_use_linkage (rtx, bool, bool); + +#if TARGET_ABI_OPEN_VMS +extern enum avms_arg_type alpha_arg_type (enum machine_mode); +extern rtx alpha_arg_info_reg_val (CUMULATIVE_ARGS); +extern void avms_asm_output_external (FILE *, tree, const char *); +extern void vms_output_aligned_decl_common (FILE *, tree, const char *, + unsigned HOST_WIDE_INT, + unsigned int); +extern HOST_WIDE_INT alpha_vms_initial_elimination_offset (unsigned int, + unsigned int); +#endif + +extern rtx unicosmk_add_call_info_word (rtx); + +extern int some_small_symbolic_operand_int (rtx *, void *); +extern int tls_symbolic_operand_1 (rtx, int, int); +extern rtx resolve_reload_operand (rtx); diff --git a/gcc-4.9/gcc/config/alpha/alpha.c b/gcc-4.9/gcc/config/alpha/alpha.c new file mode 100644 index 000000000..df4cc1b1c --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/alpha.c @@ -0,0 +1,9898 @@ +/* Subroutines used for code generation on the DEC Alpha. + Copyright (C) 1992-2014 Free Software Foundation, Inc. + Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "rtl.h" +#include "tree.h" +#include "stor-layout.h" +#include "calls.h" +#include "varasm.h" +#include "regs.h" +#include "hard-reg-set.h" +#include "insn-config.h" +#include "conditions.h" +#include "output.h" +#include "insn-attr.h" +#include "flags.h" +#include "recog.h" +#include "expr.h" +#include "optabs.h" +#include "reload.h" +#include "obstack.h" +#include "except.h" +#include "function.h" +#include "diagnostic-core.h" +#include "ggc.h" +#include "tm_p.h" +#include "target.h" +#include "target-def.h" +#include "common/common-target.h" +#include "debug.h" +#include "langhooks.h" +#include "splay-tree.h" +#include "pointer-set.h" +#include "hash-table.h" +#include "vec.h" +#include "basic-block.h" +#include "tree-ssa-alias.h" +#include "internal-fn.h" +#include "gimple-fold.h" +#include "tree-eh.h" +#include "gimple-expr.h" +#include "is-a.h" +#include "gimple.h" +#include "gimplify.h" +#include "gimple-ssa.h" +#include "stringpool.h" +#include "tree-ssanames.h" +#include "tree-stdarg.h" +#include "tm-constrs.h" +#include "df.h" +#include "libfuncs.h" +#include "opts.h" +#include "params.h" + +/* Specify which cpu to schedule for. */ +enum processor_type alpha_tune; + +/* Which cpu we're generating code for. */ +enum processor_type alpha_cpu; + +static const char * const alpha_cpu_name[] = +{ + "ev4", "ev5", "ev6" +}; + +/* Specify how accurate floating-point traps need to be. */ + +enum alpha_trap_precision alpha_tp; + +/* Specify the floating-point rounding mode. */ + +enum alpha_fp_rounding_mode alpha_fprm; + +/* Specify which things cause traps. */ + +enum alpha_fp_trap_mode alpha_fptm; + +/* Nonzero if inside of a function, because the Alpha asm can't + handle .files inside of functions. */ + +static int inside_function = FALSE; + +/* The number of cycles of latency we should assume on memory reads. */ + +int alpha_memory_latency = 3; + +/* Whether the function needs the GP. */ + +static int alpha_function_needs_gp; + +/* The assembler name of the current function. */ + +static const char *alpha_fnname; + +/* The next explicit relocation sequence number. */ +extern GTY(()) int alpha_next_sequence_number; +int alpha_next_sequence_number = 1; + +/* The literal and gpdisp sequence numbers for this insn, as printed + by %# and %* respectively. */ +extern GTY(()) int alpha_this_literal_sequence_number; +extern GTY(()) int alpha_this_gpdisp_sequence_number; +int alpha_this_literal_sequence_number; +int alpha_this_gpdisp_sequence_number; + +/* Costs of various operations on the different architectures. */ + +struct alpha_rtx_cost_data +{ + unsigned char fp_add; + unsigned char fp_mult; + unsigned char fp_div_sf; + unsigned char fp_div_df; + unsigned char int_mult_si; + unsigned char int_mult_di; + unsigned char int_shift; + unsigned char int_cmov; + unsigned short int_div; +}; + +static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] = +{ + { /* EV4 */ + COSTS_N_INSNS (6), /* fp_add */ + COSTS_N_INSNS (6), /* fp_mult */ + COSTS_N_INSNS (34), /* fp_div_sf */ + COSTS_N_INSNS (63), /* fp_div_df */ + COSTS_N_INSNS (23), /* int_mult_si */ + COSTS_N_INSNS (23), /* int_mult_di */ + COSTS_N_INSNS (2), /* int_shift */ + COSTS_N_INSNS (2), /* int_cmov */ + COSTS_N_INSNS (97), /* int_div */ + }, + { /* EV5 */ + COSTS_N_INSNS (4), /* fp_add */ + COSTS_N_INSNS (4), /* fp_mult */ + COSTS_N_INSNS (15), /* fp_div_sf */ + COSTS_N_INSNS (22), /* fp_div_df */ + COSTS_N_INSNS (8), /* int_mult_si */ + COSTS_N_INSNS (12), /* int_mult_di */ + COSTS_N_INSNS (1) + 1, /* int_shift */ + COSTS_N_INSNS (1), /* int_cmov */ + COSTS_N_INSNS (83), /* int_div */ + }, + { /* EV6 */ + COSTS_N_INSNS (4), /* fp_add */ + COSTS_N_INSNS (4), /* fp_mult */ + COSTS_N_INSNS (12), /* fp_div_sf */ + COSTS_N_INSNS (15), /* fp_div_df */ + COSTS_N_INSNS (7), /* int_mult_si */ + COSTS_N_INSNS (7), /* int_mult_di */ + COSTS_N_INSNS (1), /* int_shift */ + COSTS_N_INSNS (2), /* int_cmov */ + COSTS_N_INSNS (86), /* int_div */ + }, +}; + +/* Similar but tuned for code size instead of execution latency. The + extra +N is fractional cost tuning based on latency. It's used to + encourage use of cheaper insns like shift, but only if there's just + one of them. */ + +static struct alpha_rtx_cost_data const alpha_rtx_cost_size = +{ + COSTS_N_INSNS (1), /* fp_add */ + COSTS_N_INSNS (1), /* fp_mult */ + COSTS_N_INSNS (1), /* fp_div_sf */ + COSTS_N_INSNS (1) + 1, /* fp_div_df */ + COSTS_N_INSNS (1) + 1, /* int_mult_si */ + COSTS_N_INSNS (1) + 2, /* int_mult_di */ + COSTS_N_INSNS (1), /* int_shift */ + COSTS_N_INSNS (1), /* int_cmov */ + COSTS_N_INSNS (6), /* int_div */ +}; + +/* Get the number of args of a function in one of two ways. */ +#if TARGET_ABI_OPEN_VMS +#define NUM_ARGS crtl->args.info.num_args +#else +#define NUM_ARGS crtl->args.info +#endif + +#define REG_PV 27 +#define REG_RA 26 + +/* Declarations of static functions. */ +static struct machine_function *alpha_init_machine_status (void); +static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx); + +#if TARGET_ABI_OPEN_VMS +static void alpha_write_linkage (FILE *, const char *); +static bool vms_valid_pointer_mode (enum machine_mode); +#else +#define vms_patch_builtins() gcc_unreachable() +#endif + +#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING +/* Implement TARGET_MANGLE_TYPE. */ + +static const char * +alpha_mangle_type (const_tree type) +{ + if (TYPE_MAIN_VARIANT (type) == long_double_type_node + && TARGET_LONG_DOUBLE_128) + return "g"; + + /* For all other types, use normal C++ mangling. */ + return NULL; +} +#endif + +/* Parse target option strings. */ + +static void +alpha_option_override (void) +{ + static const struct cpu_table { + const char *const name; + const enum processor_type processor; + const int flags; + const unsigned short line_size; /* in bytes */ + const unsigned short l1_size; /* in kb. */ + const unsigned short l2_size; /* in kb. */ + } cpu_table[] = { + /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches. + EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45 + had 64k to 8M 8-byte direct Bcache. */ + { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 }, + { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 }, + { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 }, + + /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2, + and 1M to 16M 64 byte L3 (not modeled). + PCA56 had 16k 64-byte cache; PCA57 had 32k Icache. + PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */ + { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 }, + { "21164", PROCESSOR_EV5, 0, 32, 8, 96 }, + { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 }, + { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 }, + { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 }, + { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 }, + { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 }, + + /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */ + { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 }, + { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 }, + { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX, + 64, 64, 16*1024 }, + { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX, + 64, 64, 16*1024 } + }; + + int const ct_size = ARRAY_SIZE (cpu_table); + int line_size = 0, l1_size = 0, l2_size = 0; + int i; + +#ifdef SUBTARGET_OVERRIDE_OPTIONS + SUBTARGET_OVERRIDE_OPTIONS; +#endif + + /* Default to full IEEE compliance mode for Go language. */ + if (strcmp (lang_hooks.name, "GNU Go") == 0 + && !(target_flags_explicit & MASK_IEEE)) + target_flags |= MASK_IEEE; + + alpha_fprm = ALPHA_FPRM_NORM; + alpha_tp = ALPHA_TP_PROG; + alpha_fptm = ALPHA_FPTM_N; + + if (TARGET_IEEE) + { + alpha_tp = ALPHA_TP_INSN; + alpha_fptm = ALPHA_FPTM_SU; + } + if (TARGET_IEEE_WITH_INEXACT) + { + alpha_tp = ALPHA_TP_INSN; + alpha_fptm = ALPHA_FPTM_SUI; + } + + if (alpha_tp_string) + { + if (! strcmp (alpha_tp_string, "p")) + alpha_tp = ALPHA_TP_PROG; + else if (! strcmp (alpha_tp_string, "f")) + alpha_tp = ALPHA_TP_FUNC; + else if (! strcmp (alpha_tp_string, "i")) + alpha_tp = ALPHA_TP_INSN; + else + error ("bad value %qs for -mtrap-precision switch", alpha_tp_string); + } + + if (alpha_fprm_string) + { + if (! strcmp (alpha_fprm_string, "n")) + alpha_fprm = ALPHA_FPRM_NORM; + else if (! strcmp (alpha_fprm_string, "m")) + alpha_fprm = ALPHA_FPRM_MINF; + else if (! strcmp (alpha_fprm_string, "c")) + alpha_fprm = ALPHA_FPRM_CHOP; + else if (! strcmp (alpha_fprm_string,"d")) + alpha_fprm = ALPHA_FPRM_DYN; + else + error ("bad value %qs for -mfp-rounding-mode switch", + alpha_fprm_string); + } + + if (alpha_fptm_string) + { + if (strcmp (alpha_fptm_string, "n") == 0) + alpha_fptm = ALPHA_FPTM_N; + else if (strcmp (alpha_fptm_string, "u") == 0) + alpha_fptm = ALPHA_FPTM_U; + else if (strcmp (alpha_fptm_string, "su") == 0) + alpha_fptm = ALPHA_FPTM_SU; + else if (strcmp (alpha_fptm_string, "sui") == 0) + alpha_fptm = ALPHA_FPTM_SUI; + else + error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string); + } + + if (alpha_cpu_string) + { + for (i = 0; i < ct_size; i++) + if (! strcmp (alpha_cpu_string, cpu_table [i].name)) + { + alpha_tune = alpha_cpu = cpu_table[i].processor; + line_size = cpu_table[i].line_size; + l1_size = cpu_table[i].l1_size; + l2_size = cpu_table[i].l2_size; + target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX); + target_flags |= cpu_table[i].flags; + break; + } + if (i == ct_size) + error ("bad value %qs for -mcpu switch", alpha_cpu_string); + } + + if (alpha_tune_string) + { + for (i = 0; i < ct_size; i++) + if (! strcmp (alpha_tune_string, cpu_table [i].name)) + { + alpha_tune = cpu_table[i].processor; + line_size = cpu_table[i].line_size; + l1_size = cpu_table[i].l1_size; + l2_size = cpu_table[i].l2_size; + break; + } + if (i == ct_size) + error ("bad value %qs for -mtune switch", alpha_tune_string); + } + + if (line_size) + maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size, + global_options.x_param_values, + global_options_set.x_param_values); + if (l1_size) + maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size, + global_options.x_param_values, + global_options_set.x_param_values); + if (l2_size) + maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size, + global_options.x_param_values, + global_options_set.x_param_values); + + /* Do some sanity checks on the above options. */ + + if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI) + && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6) + { + warning (0, "fp software completion requires -mtrap-precision=i"); + alpha_tp = ALPHA_TP_INSN; + } + + if (alpha_cpu == PROCESSOR_EV6) + { + /* Except for EV6 pass 1 (not released), we always have precise + arithmetic traps. Which means we can do software completion + without minding trap shadows. */ + alpha_tp = ALPHA_TP_PROG; + } + + if (TARGET_FLOAT_VAX) + { + if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN) + { + warning (0, "rounding mode not supported for VAX floats"); + alpha_fprm = ALPHA_FPRM_NORM; + } + if (alpha_fptm == ALPHA_FPTM_SUI) + { + warning (0, "trap mode not supported for VAX floats"); + alpha_fptm = ALPHA_FPTM_SU; + } + if (target_flags_explicit & MASK_LONG_DOUBLE_128) + warning (0, "128-bit long double not supported for VAX floats"); + target_flags &= ~MASK_LONG_DOUBLE_128; + } + + { + char *end; + int lat; + + if (!alpha_mlat_string) + alpha_mlat_string = "L1"; + + if (ISDIGIT ((unsigned char)alpha_mlat_string[0]) + && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0')) + ; + else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l') + && ISDIGIT ((unsigned char)alpha_mlat_string[1]) + && alpha_mlat_string[2] == '\0') + { + static int const cache_latency[][4] = + { + { 3, 30, -1 }, /* ev4 -- Bcache is a guess */ + { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */ + { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */ + }; + + lat = alpha_mlat_string[1] - '0'; + if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1) + { + warning (0, "L%d cache latency unknown for %s", + lat, alpha_cpu_name[alpha_tune]); + lat = 3; + } + else + lat = cache_latency[alpha_tune][lat-1]; + } + else if (! strcmp (alpha_mlat_string, "main")) + { + /* Most current memories have about 370ns latency. This is + a reasonable guess for a fast cpu. */ + lat = 150; + } + else + { + warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string); + lat = 3; + } + + alpha_memory_latency = lat; + } + + /* Default the definition of "small data" to 8 bytes. */ + if (!global_options_set.x_g_switch_value) + g_switch_value = 8; + + /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */ + if (flag_pic == 1) + target_flags |= MASK_SMALL_DATA; + else if (flag_pic == 2) + target_flags &= ~MASK_SMALL_DATA; + + /* Align labels and loops for optimal branching. */ + /* ??? Kludge these by not doing anything if we don't optimize. */ + if (optimize > 0) + { + if (align_loops <= 0) + align_loops = 16; + if (align_jumps <= 0) + align_jumps = 16; + } + if (align_functions <= 0) + align_functions = 16; + + /* Register variables and functions with the garbage collector. */ + + /* Set up function hooks. */ + init_machine_status = alpha_init_machine_status; + + /* Tell the compiler when we're using VAX floating point. */ + if (TARGET_FLOAT_VAX) + { + REAL_MODE_FORMAT (SFmode) = &vax_f_format; + REAL_MODE_FORMAT (DFmode) = &vax_g_format; + REAL_MODE_FORMAT (TFmode) = NULL; + } + +#ifdef TARGET_DEFAULT_LONG_DOUBLE_128 + if (!(target_flags_explicit & MASK_LONG_DOUBLE_128)) + target_flags |= MASK_LONG_DOUBLE_128; +#endif +} + +/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */ + +int +zap_mask (HOST_WIDE_INT value) +{ + int i; + + for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; + i++, value >>= 8) + if ((value & 0xff) != 0 && (value & 0xff) != 0xff) + return 0; + + return 1; +} + +/* Return true if OP is valid for a particular TLS relocation. + We are already guaranteed that OP is a CONST. */ + +int +tls_symbolic_operand_1 (rtx op, int size, int unspec) +{ + op = XEXP (op, 0); + + if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec) + return 0; + op = XVECEXP (op, 0, 0); + + if (GET_CODE (op) != SYMBOL_REF) + return 0; + + switch (SYMBOL_REF_TLS_MODEL (op)) + { + case TLS_MODEL_LOCAL_DYNAMIC: + return unspec == UNSPEC_DTPREL && size == alpha_tls_size; + case TLS_MODEL_INITIAL_EXEC: + return unspec == UNSPEC_TPREL && size == 64; + case TLS_MODEL_LOCAL_EXEC: + return unspec == UNSPEC_TPREL && size == alpha_tls_size; + default: + gcc_unreachable (); + } +} + +/* Used by aligned_memory_operand and unaligned_memory_operand to + resolve what reload is going to do with OP if it's a register. */ + +rtx +resolve_reload_operand (rtx op) +{ + if (reload_in_progress) + { + rtx tmp = op; + if (GET_CODE (tmp) == SUBREG) + tmp = SUBREG_REG (tmp); + if (REG_P (tmp) + && REGNO (tmp) >= FIRST_PSEUDO_REGISTER) + { + op = reg_equiv_memory_loc (REGNO (tmp)); + if (op == 0) + return 0; + } + } + return op; +} + +/* The scalar modes supported differs from the default check-what-c-supports + version in that sometimes TFmode is available even when long double + indicates only DFmode. */ + +static bool +alpha_scalar_mode_supported_p (enum machine_mode mode) +{ + switch (mode) + { + case QImode: + case HImode: + case SImode: + case DImode: + case TImode: /* via optabs.c */ + return true; + + case SFmode: + case DFmode: + return true; + + case TFmode: + return TARGET_HAS_XFLOATING_LIBS; + + default: + return false; + } +} + +/* Alpha implements a couple of integer vector mode operations when + TARGET_MAX is enabled. We do not check TARGET_MAX here, however, + which allows the vectorizer to operate on e.g. move instructions, + or when expand_vector_operations can do something useful. */ + +static bool +alpha_vector_mode_supported_p (enum machine_mode mode) +{ + return mode == V8QImode || mode == V4HImode || mode == V2SImode; +} + +/* Return 1 if this function can directly return via $26. */ + +int +direct_return (void) +{ + return (TARGET_ABI_OSF + && reload_completed + && alpha_sa_size () == 0 + && get_frame_size () == 0 + && crtl->outgoing_args_size == 0 + && crtl->args.pretend_args_size == 0); +} + +/* Return the TLS model to use for SYMBOL. */ + +static enum tls_model +tls_symbolic_operand_type (rtx symbol) +{ + enum tls_model model; + + if (GET_CODE (symbol) != SYMBOL_REF) + return TLS_MODEL_NONE; + model = SYMBOL_REF_TLS_MODEL (symbol); + + /* Local-exec with a 64-bit size is the same code as initial-exec. */ + if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64) + model = TLS_MODEL_INITIAL_EXEC; + + return model; +} + +/* Return true if the function DECL will share the same GP as any + function in the current unit of translation. */ + +static bool +decl_has_samegp (const_tree decl) +{ + /* Functions that are not local can be overridden, and thus may + not share the same gp. */ + if (!(*targetm.binds_local_p) (decl)) + return false; + + /* If -msmall-data is in effect, assume that there is only one GP + for the module, and so any local symbol has this property. We + need explicit relocations to be able to enforce this for symbols + not defined in this unit of translation, however. */ + if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA) + return true; + + /* Functions that are not external are defined in this UoT. */ + /* ??? Irritatingly, static functions not yet emitted are still + marked "external". Apply this to non-static functions only. */ + return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl); +} + +/* Return true if EXP should be placed in the small data section. */ + +static bool +alpha_in_small_data_p (const_tree exp) +{ + /* We want to merge strings, so we never consider them small data. */ + if (TREE_CODE (exp) == STRING_CST) + return false; + + /* Functions are never in the small data area. Duh. */ + if (TREE_CODE (exp) == FUNCTION_DECL) + return false; + + if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp)) + { + const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp)); + if (strcmp (section, ".sdata") == 0 + || strcmp (section, ".sbss") == 0) + return true; + } + else + { + HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp)); + + /* If this is an incomplete type with size 0, then we can't put it + in sdata because it might be too big when completed. */ + if (size > 0 && size <= g_switch_value) + return true; + } + + return false; +} + +#if TARGET_ABI_OPEN_VMS +static bool +vms_valid_pointer_mode (enum machine_mode mode) +{ + return (mode == SImode || mode == DImode); +} + +static bool +alpha_linkage_symbol_p (const char *symname) +{ + int symlen = strlen (symname); + + if (symlen > 4) + return strcmp (&symname [symlen - 4], "..lk") == 0; + + return false; +} + +#define LINKAGE_SYMBOL_REF_P(X) \ + ((GET_CODE (X) == SYMBOL_REF \ + && alpha_linkage_symbol_p (XSTR (X, 0))) \ + || (GET_CODE (X) == CONST \ + && GET_CODE (XEXP (X, 0)) == PLUS \ + && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \ + && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0)))) +#endif + +/* legitimate_address_p recognizes an RTL expression that is a valid + memory address for an instruction. The MODE argument is the + machine mode for the MEM expression that wants to use this address. + + For Alpha, we have either a constant address or the sum of a + register and a constant address, or just a register. For DImode, + any of those forms can be surrounded with an AND that clear the + low-order three bits; this is an "unaligned" access. */ + +static bool +alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict) +{ + /* If this is an ldq_u type address, discard the outer AND. */ + if (mode == DImode + && GET_CODE (x) == AND + && CONST_INT_P (XEXP (x, 1)) + && INTVAL (XEXP (x, 1)) == -8) + x = XEXP (x, 0); + + /* Discard non-paradoxical subregs. */ + if (GET_CODE (x) == SUBREG + && (GET_MODE_SIZE (GET_MODE (x)) + < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))) + x = SUBREG_REG (x); + + /* Unadorned general registers are valid. */ + if (REG_P (x) + && (strict + ? STRICT_REG_OK_FOR_BASE_P (x) + : NONSTRICT_REG_OK_FOR_BASE_P (x))) + return true; + + /* Constant addresses (i.e. +/- 32k) are valid. */ + if (CONSTANT_ADDRESS_P (x)) + return true; + +#if TARGET_ABI_OPEN_VMS + if (LINKAGE_SYMBOL_REF_P (x)) + return true; +#endif + + /* Register plus a small constant offset is valid. */ + if (GET_CODE (x) == PLUS) + { + rtx ofs = XEXP (x, 1); + x = XEXP (x, 0); + + /* Discard non-paradoxical subregs. */ + if (GET_CODE (x) == SUBREG + && (GET_MODE_SIZE (GET_MODE (x)) + < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))) + x = SUBREG_REG (x); + + if (REG_P (x)) + { + if (! strict + && NONSTRICT_REG_OK_FP_BASE_P (x) + && CONST_INT_P (ofs)) + return true; + if ((strict + ? STRICT_REG_OK_FOR_BASE_P (x) + : NONSTRICT_REG_OK_FOR_BASE_P (x)) + && CONSTANT_ADDRESS_P (ofs)) + return true; + } + } + + /* If we're managing explicit relocations, LO_SUM is valid, as are small + data symbols. Avoid explicit relocations of modes larger than word + mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */ + else if (TARGET_EXPLICIT_RELOCS + && GET_MODE_SIZE (mode) <= UNITS_PER_WORD) + { + if (small_symbolic_operand (x, Pmode)) + return true; + + if (GET_CODE (x) == LO_SUM) + { + rtx ofs = XEXP (x, 1); + x = XEXP (x, 0); + + /* Discard non-paradoxical subregs. */ + if (GET_CODE (x) == SUBREG + && (GET_MODE_SIZE (GET_MODE (x)) + < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))) + x = SUBREG_REG (x); + + /* Must have a valid base register. */ + if (! (REG_P (x) + && (strict + ? STRICT_REG_OK_FOR_BASE_P (x) + : NONSTRICT_REG_OK_FOR_BASE_P (x)))) + return false; + + /* The symbol must be local. */ + if (local_symbolic_operand (ofs, Pmode) + || dtp32_symbolic_operand (ofs, Pmode) + || tp32_symbolic_operand (ofs, Pmode)) + return true; + } + } + + return false; +} + +/* Build the SYMBOL_REF for __tls_get_addr. */ + +static GTY(()) rtx tls_get_addr_libfunc; + +static rtx +get_tls_get_addr (void) +{ + if (!tls_get_addr_libfunc) + tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr"); + return tls_get_addr_libfunc; +} + +/* Try machine-dependent ways of modifying an illegitimate address + to be legitimate. If we find one, return the new, valid address. */ + +static rtx +alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode) +{ + HOST_WIDE_INT addend; + + /* If the address is (plus reg const_int) and the CONST_INT is not a + valid offset, compute the high part of the constant and add it to + the register. Then our address is (plus temp low-part-const). */ + if (GET_CODE (x) == PLUS + && REG_P (XEXP (x, 0)) + && CONST_INT_P (XEXP (x, 1)) + && ! CONSTANT_ADDRESS_P (XEXP (x, 1))) + { + addend = INTVAL (XEXP (x, 1)); + x = XEXP (x, 0); + goto split_addend; + } + + /* If the address is (const (plus FOO const_int)), find the low-order + part of the CONST_INT. Then load FOO plus any high-order part of the + CONST_INT into a register. Our address is (plus reg low-part-const). + This is done to reduce the number of GOT entries. */ + if (can_create_pseudo_p () + && GET_CODE (x) == CONST + && GET_CODE (XEXP (x, 0)) == PLUS + && CONST_INT_P (XEXP (XEXP (x, 0), 1))) + { + addend = INTVAL (XEXP (XEXP (x, 0), 1)); + x = force_reg (Pmode, XEXP (XEXP (x, 0), 0)); + goto split_addend; + } + + /* If we have a (plus reg const), emit the load as in (2), then add + the two registers, and finally generate (plus reg low-part-const) as + our address. */ + if (can_create_pseudo_p () + && GET_CODE (x) == PLUS + && REG_P (XEXP (x, 0)) + && GET_CODE (XEXP (x, 1)) == CONST + && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS + && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1))) + { + addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1)); + x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0), + XEXP (XEXP (XEXP (x, 1), 0), 0), + NULL_RTX, 1, OPTAB_LIB_WIDEN); + goto split_addend; + } + + /* If this is a local symbol, split the address into HIGH/LO_SUM parts. + Avoid modes larger than word mode since i.e. $LC0+8($1) can fold + around +/- 32k offset. */ + if (TARGET_EXPLICIT_RELOCS + && GET_MODE_SIZE (mode) <= UNITS_PER_WORD + && symbolic_operand (x, Pmode)) + { + rtx r0, r16, eqv, tga, tp, insn, dest, seq; + + switch (tls_symbolic_operand_type (x)) + { + case TLS_MODEL_NONE: + break; + + case TLS_MODEL_GLOBAL_DYNAMIC: + start_sequence (); + + r0 = gen_rtx_REG (Pmode, 0); + r16 = gen_rtx_REG (Pmode, 16); + tga = get_tls_get_addr (); + dest = gen_reg_rtx (Pmode); + seq = GEN_INT (alpha_next_sequence_number++); + + emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq)); + insn = gen_call_value_osf_tlsgd (r0, tga, seq); + insn = emit_call_insn (insn); + RTL_CONST_CALL_P (insn) = 1; + use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16); + + insn = get_insns (); + end_sequence (); + + emit_libcall_block (insn, dest, r0, x); + return dest; + + case TLS_MODEL_LOCAL_DYNAMIC: + start_sequence (); + + r0 = gen_rtx_REG (Pmode, 0); + r16 = gen_rtx_REG (Pmode, 16); + tga = get_tls_get_addr (); + scratch = gen_reg_rtx (Pmode); + seq = GEN_INT (alpha_next_sequence_number++); + + emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq)); + insn = gen_call_value_osf_tlsldm (r0, tga, seq); + insn = emit_call_insn (insn); + RTL_CONST_CALL_P (insn) = 1; + use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16); + + insn = get_insns (); + end_sequence (); + + eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), + UNSPEC_TLSLDM_CALL); + emit_libcall_block (insn, scratch, r0, eqv); + + eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL); + eqv = gen_rtx_CONST (Pmode, eqv); + + if (alpha_tls_size == 64) + { + dest = gen_reg_rtx (Pmode); + emit_insn (gen_rtx_SET (VOIDmode, dest, eqv)); + emit_insn (gen_adddi3 (dest, dest, scratch)); + return dest; + } + if (alpha_tls_size == 32) + { + insn = gen_rtx_HIGH (Pmode, eqv); + insn = gen_rtx_PLUS (Pmode, scratch, insn); + scratch = gen_reg_rtx (Pmode); + emit_insn (gen_rtx_SET (VOIDmode, scratch, insn)); + } + return gen_rtx_LO_SUM (Pmode, scratch, eqv); + + case TLS_MODEL_INITIAL_EXEC: + eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL); + eqv = gen_rtx_CONST (Pmode, eqv); + tp = gen_reg_rtx (Pmode); + scratch = gen_reg_rtx (Pmode); + dest = gen_reg_rtx (Pmode); + + emit_insn (gen_get_thread_pointerdi (tp)); + emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv)); + emit_insn (gen_adddi3 (dest, tp, scratch)); + return dest; + + case TLS_MODEL_LOCAL_EXEC: + eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL); + eqv = gen_rtx_CONST (Pmode, eqv); + tp = gen_reg_rtx (Pmode); + + emit_insn (gen_get_thread_pointerdi (tp)); + if (alpha_tls_size == 32) + { + insn = gen_rtx_HIGH (Pmode, eqv); + insn = gen_rtx_PLUS (Pmode, tp, insn); + tp = gen_reg_rtx (Pmode); + emit_insn (gen_rtx_SET (VOIDmode, tp, insn)); + } + return gen_rtx_LO_SUM (Pmode, tp, eqv); + + default: + gcc_unreachable (); + } + + if (local_symbolic_operand (x, Pmode)) + { + if (small_symbolic_operand (x, Pmode)) + return x; + else + { + if (can_create_pseudo_p ()) + scratch = gen_reg_rtx (Pmode); + emit_insn (gen_rtx_SET (VOIDmode, scratch, + gen_rtx_HIGH (Pmode, x))); + return gen_rtx_LO_SUM (Pmode, scratch, x); + } + } + } + + return NULL; + + split_addend: + { + HOST_WIDE_INT low, high; + + low = ((addend & 0xffff) ^ 0x8000) - 0x8000; + addend -= low; + high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000; + addend -= high; + + if (addend) + x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend), + (!can_create_pseudo_p () ? scratch : NULL_RTX), + 1, OPTAB_LIB_WIDEN); + if (high) + x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high), + (!can_create_pseudo_p () ? scratch : NULL_RTX), + 1, OPTAB_LIB_WIDEN); + + return plus_constant (Pmode, x, low); + } +} + + +/* Try machine-dependent ways of modifying an illegitimate address + to be legitimate. Return X or the new, valid address. */ + +static rtx +alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, + enum machine_mode mode) +{ + rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode); + return new_x ? new_x : x; +} + +/* Return true if ADDR has an effect that depends on the machine mode it + is used for. On the Alpha this is true only for the unaligned modes. + We can simplify the test since we know that the address must be valid. */ + +static bool +alpha_mode_dependent_address_p (const_rtx addr, + addr_space_t as ATTRIBUTE_UNUSED) +{ + return GET_CODE (addr) == AND; +} + +/* Primarily this is required for TLS symbols, but given that our move + patterns *ought* to be able to handle any symbol at any time, we + should never be spilling symbolic operands to the constant pool, ever. */ + +static bool +alpha_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x) +{ + enum rtx_code code = GET_CODE (x); + return code == SYMBOL_REF || code == LABEL_REF || code == CONST; +} + +/* We do not allow indirect calls to be optimized into sibling calls, nor + can we allow a call to a function with a different GP to be optimized + into a sibcall. */ + +static bool +alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) +{ + /* Can't do indirect tail calls, since we don't know if the target + uses the same GP. */ + if (!decl) + return false; + + /* Otherwise, we can make a tail call if the target function shares + the same GP. */ + return decl_has_samegp (decl); +} + +int +some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED) +{ + rtx x = *px; + + /* Don't re-split. */ + if (GET_CODE (x) == LO_SUM) + return -1; + + return small_symbolic_operand (x, Pmode) != 0; +} + +static int +split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED) +{ + rtx x = *px; + + /* Don't re-split. */ + if (GET_CODE (x) == LO_SUM) + return -1; + + if (small_symbolic_operand (x, Pmode)) + { + x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x); + *px = x; + return -1; + } + + return 0; +} + +rtx +split_small_symbolic_operand (rtx x) +{ + x = copy_insn (x); + for_each_rtx (&x, split_small_symbolic_operand_1, NULL); + return x; +} + +/* Indicate that INSN cannot be duplicated. This is true for any insn + that we've marked with gpdisp relocs, since those have to stay in + 1-1 correspondence with one another. + + Technically we could copy them if we could set up a mapping from one + sequence number to another, across the set of insns to be duplicated. + This seems overly complicated and error-prone since interblock motion + from sched-ebb could move one of the pair of insns to a different block. + + Also cannot allow jsr insns to be duplicated. If they throw exceptions, + then they'll be in a different block from their ldgp. Which could lead + the bb reorder code to think that it would be ok to copy just the block + containing the call and branch to the block containing the ldgp. */ + +static bool +alpha_cannot_copy_insn_p (rtx insn) +{ + if (!reload_completed || !TARGET_EXPLICIT_RELOCS) + return false; + if (recog_memoized (insn) >= 0) + return get_attr_cannot_copy (insn); + else + return false; +} + + +/* Try a machine-dependent way of reloading an illegitimate address + operand. If we find one, push the reload and return the new rtx. */ + +rtx +alpha_legitimize_reload_address (rtx x, + enum machine_mode mode ATTRIBUTE_UNUSED, + int opnum, int type, + int ind_levels ATTRIBUTE_UNUSED) +{ + /* We must recognize output that we have already generated ourselves. */ + if (GET_CODE (x) == PLUS + && GET_CODE (XEXP (x, 0)) == PLUS + && REG_P (XEXP (XEXP (x, 0), 0)) + && CONST_INT_P (XEXP (XEXP (x, 0), 1)) + && CONST_INT_P (XEXP (x, 1))) + { + push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL, + BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0, + opnum, (enum reload_type) type); + return x; + } + + /* We wish to handle large displacements off a base register by + splitting the addend across an ldah and the mem insn. This + cuts number of extra insns needed from 3 to 1. */ + if (GET_CODE (x) == PLUS + && REG_P (XEXP (x, 0)) + && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER + && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0))) + && GET_CODE (XEXP (x, 1)) == CONST_INT) + { + HOST_WIDE_INT val = INTVAL (XEXP (x, 1)); + HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000; + HOST_WIDE_INT high + = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000; + + /* Check for 32-bit overflow. */ + if (high + low != val) + return NULL_RTX; + + /* Reload the high part into a base reg; leave the low part + in the mem directly. */ + x = gen_rtx_PLUS (GET_MODE (x), + gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0), + GEN_INT (high)), + GEN_INT (low)); + + push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL, + BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0, + opnum, (enum reload_type) type); + return x; + } + + return NULL_RTX; +} + +/* Compute a (partial) cost for rtx X. Return true if the complete + cost has been computed, and false if subexpressions should be + scanned. In either case, *TOTAL contains the cost result. */ + +static bool +alpha_rtx_costs (rtx x, int code, int outer_code, int opno, int *total, + bool speed) +{ + enum machine_mode mode = GET_MODE (x); + bool float_mode_p = FLOAT_MODE_P (mode); + const struct alpha_rtx_cost_data *cost_data; + + if (!speed) + cost_data = &alpha_rtx_cost_size; + else + cost_data = &alpha_rtx_cost_data[alpha_tune]; + + switch (code) + { + case CONST_INT: + /* If this is an 8-bit constant, return zero since it can be used + nearly anywhere with no cost. If it is a valid operand for an + ADD or AND, likewise return 0 if we know it will be used in that + context. Otherwise, return 2 since it might be used there later. + All other constants take at least two insns. */ + if (INTVAL (x) >= 0 && INTVAL (x) < 256) + { + *total = 0; + return true; + } + /* FALLTHRU */ + + case CONST_DOUBLE: + if (x == CONST0_RTX (mode)) + *total = 0; + else if ((outer_code == PLUS && add_operand (x, VOIDmode)) + || (outer_code == AND && and_operand (x, VOIDmode))) + *total = 0; + else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode)) + *total = 2; + else + *total = COSTS_N_INSNS (2); + return true; + + case CONST: + case SYMBOL_REF: + case LABEL_REF: + if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode)) + *total = COSTS_N_INSNS (outer_code != MEM); + else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode)) + *total = COSTS_N_INSNS (1 + (outer_code != MEM)); + else if (tls_symbolic_operand_type (x)) + /* Estimate of cost for call_pal rduniq. */ + /* ??? How many insns do we emit here? More than one... */ + *total = COSTS_N_INSNS (15); + else + /* Otherwise we do a load from the GOT. */ + *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency); + return true; + + case HIGH: + /* This is effectively an add_operand. */ + *total = 2; + return true; + + case PLUS: + case MINUS: + if (float_mode_p) + *total = cost_data->fp_add; + else if (GET_CODE (XEXP (x, 0)) == MULT + && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode)) + { + *total = (rtx_cost (XEXP (XEXP (x, 0), 0), + (enum rtx_code) outer_code, opno, speed) + + rtx_cost (XEXP (x, 1), + (enum rtx_code) outer_code, opno, speed) + + COSTS_N_INSNS (1)); + return true; + } + return false; + + case MULT: + if (float_mode_p) + *total = cost_data->fp_mult; + else if (mode == DImode) + *total = cost_data->int_mult_di; + else + *total = cost_data->int_mult_si; + return false; + + case ASHIFT: + if (CONST_INT_P (XEXP (x, 1)) + && INTVAL (XEXP (x, 1)) <= 3) + { + *total = COSTS_N_INSNS (1); + return false; + } + /* FALLTHRU */ + + case ASHIFTRT: + case LSHIFTRT: + *total = cost_data->int_shift; + return false; + + case IF_THEN_ELSE: + if (float_mode_p) + *total = cost_data->fp_add; + else + *total = cost_data->int_cmov; + return false; + + case DIV: + case UDIV: + case MOD: + case UMOD: + if (!float_mode_p) + *total = cost_data->int_div; + else if (mode == SFmode) + *total = cost_data->fp_div_sf; + else + *total = cost_data->fp_div_df; + return false; + + case MEM: + *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency); + return true; + + case NEG: + if (! float_mode_p) + { + *total = COSTS_N_INSNS (1); + return false; + } + /* FALLTHRU */ + + case ABS: + if (! float_mode_p) + { + *total = COSTS_N_INSNS (1) + cost_data->int_cmov; + return false; + } + /* FALLTHRU */ + + case FLOAT: + case UNSIGNED_FLOAT: + case FIX: + case UNSIGNED_FIX: + case FLOAT_TRUNCATE: + *total = cost_data->fp_add; + return false; + + case FLOAT_EXTEND: + if (MEM_P (XEXP (x, 0))) + *total = 0; + else + *total = cost_data->fp_add; + return false; + + default: + return false; + } +} + +/* REF is an alignable memory location. Place an aligned SImode + reference into *PALIGNED_MEM and the number of bits to shift into + *PBITNUM. SCRATCH is a free register for use in reloading out + of range stack slots. */ + +void +get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum) +{ + rtx base; + HOST_WIDE_INT disp, offset; + + gcc_assert (MEM_P (ref)); + + if (reload_in_progress + && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0))) + { + base = find_replacement (&XEXP (ref, 0)); + gcc_assert (memory_address_p (GET_MODE (ref), base)); + } + else + base = XEXP (ref, 0); + + if (GET_CODE (base) == PLUS) + disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0); + else + disp = 0; + + /* Find the byte offset within an aligned word. If the memory itself is + claimed to be aligned, believe it. Otherwise, aligned_memory_operand + will have examined the base register and determined it is aligned, and + thus displacements from it are naturally alignable. */ + if (MEM_ALIGN (ref) >= 32) + offset = 0; + else + offset = disp & 3; + + /* The location should not cross aligned word boundary. */ + gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref)) + <= GET_MODE_SIZE (SImode)); + + /* Access the entire aligned word. */ + *paligned_mem = widen_memory_access (ref, SImode, -offset); + + /* Convert the byte offset within the word to a bit offset. */ + offset *= BITS_PER_UNIT; + *pbitnum = GEN_INT (offset); +} + +/* Similar, but just get the address. Handle the two reload cases. + Add EXTRA_OFFSET to the address we return. */ + +rtx +get_unaligned_address (rtx ref) +{ + rtx base; + HOST_WIDE_INT offset = 0; + + gcc_assert (MEM_P (ref)); + + if (reload_in_progress + && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0))) + { + base = find_replacement (&XEXP (ref, 0)); + + gcc_assert (memory_address_p (GET_MODE (ref), base)); + } + else + base = XEXP (ref, 0); + + if (GET_CODE (base) == PLUS) + offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0); + + return plus_constant (Pmode, base, offset); +} + +/* Compute a value X, such that X & 7 == (ADDR + OFS) & 7. + X is always returned in a register. */ + +rtx +get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs) +{ + if (GET_CODE (addr) == PLUS) + { + ofs += INTVAL (XEXP (addr, 1)); + addr = XEXP (addr, 0); + } + + return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7), + NULL_RTX, 1, OPTAB_LIB_WIDEN); +} + +/* On the Alpha, all (non-symbolic) constants except zero go into + a floating-point register via memory. Note that we cannot + return anything that is not a subset of RCLASS, and that some + symbolic constants cannot be dropped to memory. */ + +enum reg_class +alpha_preferred_reload_class(rtx x, enum reg_class rclass) +{ + /* Zero is present in any register class. */ + if (x == CONST0_RTX (GET_MODE (x))) + return rclass; + + /* These sorts of constants we can easily drop to memory. */ + if (CONST_INT_P (x) + || GET_CODE (x) == CONST_DOUBLE + || GET_CODE (x) == CONST_VECTOR) + { + if (rclass == FLOAT_REGS) + return NO_REGS; + if (rclass == ALL_REGS) + return GENERAL_REGS; + return rclass; + } + + /* All other kinds of constants should not (and in the case of HIGH + cannot) be dropped to memory -- instead we use a GENERAL_REGS + secondary reload. */ + if (CONSTANT_P (x)) + return (rclass == ALL_REGS ? GENERAL_REGS : rclass); + + return rclass; +} + +/* Inform reload about cases where moving X with a mode MODE to a register in + RCLASS requires an extra scratch or immediate register. Return the class + needed for the immediate register. */ + +static reg_class_t +alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i, + enum machine_mode mode, secondary_reload_info *sri) +{ + enum reg_class rclass = (enum reg_class) rclass_i; + + /* Loading and storing HImode or QImode values to and from memory + usually requires a scratch register. */ + if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode)) + { + if (any_memory_operand (x, mode)) + { + if (in_p) + { + if (!aligned_memory_operand (x, mode)) + sri->icode = direct_optab_handler (reload_in_optab, mode); + } + else + sri->icode = direct_optab_handler (reload_out_optab, mode); + return NO_REGS; + } + } + + /* We also cannot do integral arithmetic into FP regs, as might result + from register elimination into a DImode fp register. */ + if (rclass == FLOAT_REGS) + { + if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND) + return GENERAL_REGS; + if (in_p && INTEGRAL_MODE_P (mode) + && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x)) + return GENERAL_REGS; + } + + return NO_REGS; +} + +/* Subfunction of the following function. Update the flags of any MEM + found in part of X. */ + +static int +alpha_set_memflags_1 (rtx *xp, void *data) +{ + rtx x = *xp, orig = (rtx) data; + + if (!MEM_P (x)) + return 0; + + MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig); + MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig); + MEM_READONLY_P (x) = MEM_READONLY_P (orig); + + /* Sadly, we cannot use alias sets because the extra aliasing + produced by the AND interferes. Given that two-byte quantities + are the only thing we would be able to differentiate anyway, + there does not seem to be any point in convoluting the early + out of the alias check. */ + + return -1; +} + +/* Given SEQ, which is an INSN list, look for any MEMs in either + a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and + volatile flags from REF into each of the MEMs found. If REF is not + a MEM, don't do anything. */ + +void +alpha_set_memflags (rtx seq, rtx ref) +{ + rtx insn; + + if (!MEM_P (ref)) + return; + + /* This is only called from alpha.md, after having had something + generated from one of the insn patterns. So if everything is + zero, the pattern is already up-to-date. */ + if (!MEM_VOLATILE_P (ref) + && !MEM_NOTRAP_P (ref) + && !MEM_READONLY_P (ref)) + return; + + for (insn = seq; insn; insn = NEXT_INSN (insn)) + if (INSN_P (insn)) + for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref); + else + gcc_unreachable (); +} + +static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT, + int, bool); + +/* Internal routine for alpha_emit_set_const to check for N or below insns. + If NO_OUTPUT is true, then we only check to see if N insns are possible, + and return pc_rtx if successful. */ + +static rtx +alpha_emit_set_const_1 (rtx target, enum machine_mode mode, + HOST_WIDE_INT c, int n, bool no_output) +{ + HOST_WIDE_INT new_const; + int i, bits; + /* Use a pseudo if highly optimizing and still generating RTL. */ + rtx subtarget + = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target); + rtx temp, insn; + + /* If this is a sign-extended 32-bit constant, we can do this in at most + three insns, so do it if we have enough insns left. We always have + a sign-extended 32-bit constant when compiling on a narrow machine. */ + + if (HOST_BITS_PER_WIDE_INT != 64 + || c >> 31 == -1 || c >> 31 == 0) + { + HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000; + HOST_WIDE_INT tmp1 = c - low; + HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000; + HOST_WIDE_INT extra = 0; + + /* If HIGH will be interpreted as negative but the constant is + positive, we must adjust it to do two ldha insns. */ + + if ((high & 0x8000) != 0 && c >= 0) + { + extra = 0x4000; + tmp1 -= 0x40000000; + high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000); + } + + if (c == low || (low == 0 && extra == 0)) + { + /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode) + but that meant that we can't handle INT_MIN on 32-bit machines + (like NT/Alpha), because we recurse indefinitely through + emit_move_insn to gen_movdi. So instead, since we know exactly + what we want, create it explicitly. */ + + if (no_output) + return pc_rtx; + if (target == NULL) + target = gen_reg_rtx (mode); + emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c))); + return target; + } + else if (n >= 2 + (extra != 0)) + { + if (no_output) + return pc_rtx; + if (!can_create_pseudo_p ()) + { + emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16))); + temp = target; + } + else + temp = copy_to_suggested_reg (GEN_INT (high << 16), + subtarget, mode); + + /* As of 2002-02-23, addsi3 is only available when not optimizing. + This means that if we go through expand_binop, we'll try to + generate extensions, etc, which will require new pseudos, which + will fail during some split phases. The SImode add patterns + still exist, but are not named. So build the insns by hand. */ + + if (extra != 0) + { + if (! subtarget) + subtarget = gen_reg_rtx (mode); + insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16)); + insn = gen_rtx_SET (VOIDmode, subtarget, insn); + emit_insn (insn); + temp = subtarget; + } + + if (target == NULL) + target = gen_reg_rtx (mode); + insn = gen_rtx_PLUS (mode, temp, GEN_INT (low)); + insn = gen_rtx_SET (VOIDmode, target, insn); + emit_insn (insn); + return target; + } + } + + /* If we couldn't do it that way, try some other methods. But if we have + no instructions left, don't bother. Likewise, if this is SImode and + we can't make pseudos, we can't do anything since the expand_binop + and expand_unop calls will widen and try to make pseudos. */ + + if (n == 1 || (mode == SImode && !can_create_pseudo_p ())) + return 0; + + /* Next, see if we can load a related constant and then shift and possibly + negate it to get the constant we want. Try this once each increasing + numbers of insns. */ + + for (i = 1; i < n; i++) + { + /* First, see if minus some low bits, we've an easy load of + high bits. */ + + new_const = ((c & 0xffff) ^ 0x8000) - 0x8000; + if (new_const != 0) + { + temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output); + if (temp) + { + if (no_output) + return temp; + return expand_binop (mode, add_optab, temp, GEN_INT (new_const), + target, 0, OPTAB_WIDEN); + } + } + + /* Next try complementing. */ + temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output); + if (temp) + { + if (no_output) + return temp; + return expand_unop (mode, one_cmpl_optab, temp, target, 0); + } + + /* Next try to form a constant and do a left shift. We can do this + if some low-order bits are zero; the exact_log2 call below tells + us that information. The bits we are shifting out could be any + value, but here we'll just try the 0- and sign-extended forms of + the constant. To try to increase the chance of having the same + constant in more than one insn, start at the highest number of + bits to shift, but try all possibilities in case a ZAPNOT will + be useful. */ + + bits = exact_log2 (c & -c); + if (bits > 0) + for (; bits > 0; bits--) + { + new_const = c >> bits; + temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output); + if (!temp && c < 0) + { + new_const = (unsigned HOST_WIDE_INT)c >> bits; + temp = alpha_emit_set_const (subtarget, mode, new_const, + i, no_output); + } + if (temp) + { + if (no_output) + return temp; + return expand_binop (mode, ashl_optab, temp, GEN_INT (bits), + target, 0, OPTAB_WIDEN); + } + } + + /* Now try high-order zero bits. Here we try the shifted-in bits as + all zero and all ones. Be careful to avoid shifting outside the + mode and to avoid shifting outside the host wide int size. */ + /* On narrow hosts, don't shift a 1 into the high bit, since we'll + confuse the recursive call and set all of the high 32 bits. */ + + bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8) + - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64)); + if (bits > 0) + for (; bits > 0; bits--) + { + new_const = c << bits; + temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output); + if (!temp) + { + new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1); + temp = alpha_emit_set_const (subtarget, mode, new_const, + i, no_output); + } + if (temp) + { + if (no_output) + return temp; + return expand_binop (mode, lshr_optab, temp, GEN_INT (bits), + target, 1, OPTAB_WIDEN); + } + } + + /* Now try high-order 1 bits. We get that with a sign-extension. + But one bit isn't enough here. Be careful to avoid shifting outside + the mode and to avoid shifting outside the host wide int size. */ + + bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8) + - floor_log2 (~ c) - 2); + if (bits > 0) + for (; bits > 0; bits--) + { + new_const = c << bits; + temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output); + if (!temp) + { + new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1); + temp = alpha_emit_set_const (subtarget, mode, new_const, + i, no_output); + } + if (temp) + { + if (no_output) + return temp; + return expand_binop (mode, ashr_optab, temp, GEN_INT (bits), + target, 0, OPTAB_WIDEN); + } + } + } + +#if HOST_BITS_PER_WIDE_INT == 64 + /* Finally, see if can load a value into the target that is the same as the + constant except that all bytes that are 0 are changed to be 0xff. If we + can, then we can do a ZAPNOT to obtain the desired constant. */ + + new_const = c; + for (i = 0; i < 64; i += 8) + if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0) + new_const |= (HOST_WIDE_INT) 0xff << i; + + /* We are only called for SImode and DImode. If this is SImode, ensure that + we are sign extended to a full word. */ + + if (mode == SImode) + new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000; + + if (new_const != c) + { + temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output); + if (temp) + { + if (no_output) + return temp; + return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const), + target, 0, OPTAB_WIDEN); + } + } +#endif + + return 0; +} + +/* Try to output insns to set TARGET equal to the constant C if it can be + done in less than N insns. Do all computations in MODE. Returns the place + where the output has been placed if it can be done and the insns have been + emitted. If it would take more than N insns, zero is returned and no + insns and emitted. */ + +static rtx +alpha_emit_set_const (rtx target, enum machine_mode mode, + HOST_WIDE_INT c, int n, bool no_output) +{ + enum machine_mode orig_mode = mode; + rtx orig_target = target; + rtx result = 0; + int i; + + /* If we can't make any pseudos, TARGET is an SImode hard register, we + can't load this constant in one insn, do this in DImode. */ + if (!can_create_pseudo_p () && mode == SImode + && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER) + { + result = alpha_emit_set_const_1 (target, mode, c, 1, no_output); + if (result) + return result; + + target = no_output ? NULL : gen_lowpart (DImode, target); + mode = DImode; + } + else if (mode == V8QImode || mode == V4HImode || mode == V2SImode) + { + target = no_output ? NULL : gen_lowpart (DImode, target); + mode = DImode; + } + + /* Try 1 insn, then 2, then up to N. */ + for (i = 1; i <= n; i++) + { + result = alpha_emit_set_const_1 (target, mode, c, i, no_output); + if (result) + { + rtx insn, set; + + if (no_output) + return result; + + insn = get_last_insn (); + set = single_set (insn); + if (! CONSTANT_P (SET_SRC (set))) + set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c)); + break; + } + } + + /* Allow for the case where we changed the mode of TARGET. */ + if (result) + { + if (result == target) + result = orig_target; + else if (mode != orig_mode) + result = gen_lowpart (orig_mode, result); + } + + return result; +} + +/* Having failed to find a 3 insn sequence in alpha_emit_set_const, + fall back to a straight forward decomposition. We do this to avoid + exponential run times encountered when looking for longer sequences + with alpha_emit_set_const. */ + +static rtx +alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2) +{ + HOST_WIDE_INT d1, d2, d3, d4; + + /* Decompose the entire word */ +#if HOST_BITS_PER_WIDE_INT >= 64 + gcc_assert (c2 == -(c1 < 0)); + d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000; + c1 -= d1; + d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000; + c1 = (c1 - d2) >> 32; + d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000; + c1 -= d3; + d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000; + gcc_assert (c1 == d4); +#else + d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000; + c1 -= d1; + d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000; + gcc_assert (c1 == d2); + c2 += (d2 < 0); + d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000; + c2 -= d3; + d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000; + gcc_assert (c2 == d4); +#endif + + /* Construct the high word */ + if (d4) + { + emit_move_insn (target, GEN_INT (d4)); + if (d3) + emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3))); + } + else + emit_move_insn (target, GEN_INT (d3)); + + /* Shift it into place */ + emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32))); + + /* Add in the low bits. */ + if (d2) + emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2))); + if (d1) + emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1))); + + return target; +} + +/* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return + the low 64 bits. */ + +static void +alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1) +{ + HOST_WIDE_INT i0, i1; + + if (GET_CODE (x) == CONST_VECTOR) + x = simplify_subreg (DImode, x, GET_MODE (x), 0); + + + if (CONST_INT_P (x)) + { + i0 = INTVAL (x); + i1 = -(i0 < 0); + } + else if (HOST_BITS_PER_WIDE_INT >= 64) + { + i0 = CONST_DOUBLE_LOW (x); + i1 = -(i0 < 0); + } + else + { + i0 = CONST_DOUBLE_LOW (x); + i1 = CONST_DOUBLE_HIGH (x); + } + + *p0 = i0; + *p1 = i1; +} + +/* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which + we are willing to load the value into a register via a move pattern. + Normally this is all symbolic constants, integral constants that + take three or fewer instructions, and floating-point zero. */ + +bool +alpha_legitimate_constant_p (enum machine_mode mode, rtx x) +{ + HOST_WIDE_INT i0, i1; + + switch (GET_CODE (x)) + { + case LABEL_REF: + case HIGH: + return true; + + case CONST: + if (GET_CODE (XEXP (x, 0)) == PLUS + && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) + x = XEXP (XEXP (x, 0), 0); + else + return true; + + if (GET_CODE (x) != SYMBOL_REF) + return true; + + /* FALLTHRU */ + + case SYMBOL_REF: + /* TLS symbols are never valid. */ + return SYMBOL_REF_TLS_MODEL (x) == 0; + + case CONST_DOUBLE: + if (x == CONST0_RTX (mode)) + return true; + if (FLOAT_MODE_P (mode)) + return false; + goto do_integer; + + case CONST_VECTOR: + if (x == CONST0_RTX (mode)) + return true; + if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT) + return false; + if (GET_MODE_SIZE (mode) != 8) + return false; + goto do_integer; + + case CONST_INT: + do_integer: + if (TARGET_BUILD_CONSTANTS) + return true; + alpha_extract_integer (x, &i0, &i1); + if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0)) + return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL; + return false; + + default: + return false; + } +} + +/* Operand 1 is known to be a constant, and should require more than one + instruction to load. Emit that multi-part load. */ + +bool +alpha_split_const_mov (enum machine_mode mode, rtx *operands) +{ + HOST_WIDE_INT i0, i1; + rtx temp = NULL_RTX; + + alpha_extract_integer (operands[1], &i0, &i1); + + if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0)) + temp = alpha_emit_set_const (operands[0], mode, i0, 3, false); + + if (!temp && TARGET_BUILD_CONSTANTS) + temp = alpha_emit_set_long_const (operands[0], i0, i1); + + if (temp) + { + if (!rtx_equal_p (operands[0], temp)) + emit_move_insn (operands[0], temp); + return true; + } + + return false; +} + +/* Expand a move instruction; return true if all work is done. + We don't handle non-bwx subword loads here. */ + +bool +alpha_expand_mov (enum machine_mode mode, rtx *operands) +{ + rtx tmp; + + /* If the output is not a register, the input must be. */ + if (MEM_P (operands[0]) + && ! reg_or_0_operand (operands[1], mode)) + operands[1] = force_reg (mode, operands[1]); + + /* Allow legitimize_address to perform some simplifications. */ + if (mode == Pmode && symbolic_operand (operands[1], mode)) + { + tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode); + if (tmp) + { + if (tmp == operands[0]) + return true; + operands[1] = tmp; + return false; + } + } + + /* Early out for non-constants and valid constants. */ + if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode)) + return false; + + /* Split large integers. */ + if (CONST_INT_P (operands[1]) + || GET_CODE (operands[1]) == CONST_DOUBLE + || GET_CODE (operands[1]) == CONST_VECTOR) + { + if (alpha_split_const_mov (mode, operands)) + return true; + } + + /* Otherwise we've nothing left but to drop the thing to memory. */ + tmp = force_const_mem (mode, operands[1]); + + if (tmp == NULL_RTX) + return false; + + if (reload_in_progress) + { + emit_move_insn (operands[0], XEXP (tmp, 0)); + operands[1] = replace_equiv_address (tmp, operands[0]); + } + else + operands[1] = validize_mem (tmp); + return false; +} + +/* Expand a non-bwx QImode or HImode move instruction; + return true if all work is done. */ + +bool +alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands) +{ + rtx seq; + + /* If the output is not a register, the input must be. */ + if (MEM_P (operands[0])) + operands[1] = force_reg (mode, operands[1]); + + /* Handle four memory cases, unaligned and aligned for either the input + or the output. The only case where we can be called during reload is + for aligned loads; all other cases require temporaries. */ + + if (any_memory_operand (operands[1], mode)) + { + if (aligned_memory_operand (operands[1], mode)) + { + if (reload_in_progress) + { + if (mode == QImode) + seq = gen_reload_inqi_aligned (operands[0], operands[1]); + else + seq = gen_reload_inhi_aligned (operands[0], operands[1]); + emit_insn (seq); + } + else + { + rtx aligned_mem, bitnum; + rtx scratch = gen_reg_rtx (SImode); + rtx subtarget; + bool copyout; + + get_aligned_mem (operands[1], &aligned_mem, &bitnum); + + subtarget = operands[0]; + if (REG_P (subtarget)) + subtarget = gen_lowpart (DImode, subtarget), copyout = false; + else + subtarget = gen_reg_rtx (DImode), copyout = true; + + if (mode == QImode) + seq = gen_aligned_loadqi (subtarget, aligned_mem, + bitnum, scratch); + else + seq = gen_aligned_loadhi (subtarget, aligned_mem, + bitnum, scratch); + emit_insn (seq); + + if (copyout) + emit_move_insn (operands[0], gen_lowpart (mode, subtarget)); + } + } + else + { + /* Don't pass these as parameters since that makes the generated + code depend on parameter evaluation order which will cause + bootstrap failures. */ + + rtx temp1, temp2, subtarget, ua; + bool copyout; + + temp1 = gen_reg_rtx (DImode); + temp2 = gen_reg_rtx (DImode); + + subtarget = operands[0]; + if (REG_P (subtarget)) + subtarget = gen_lowpart (DImode, subtarget), copyout = false; + else + subtarget = gen_reg_rtx (DImode), copyout = true; + + ua = get_unaligned_address (operands[1]); + if (mode == QImode) + seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2); + else + seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2); + + alpha_set_memflags (seq, operands[1]); + emit_insn (seq); + + if (copyout) + emit_move_insn (operands[0], gen_lowpart (mode, subtarget)); + } + return true; + } + + if (any_memory_operand (operands[0], mode)) + { + if (aligned_memory_operand (operands[0], mode)) + { + rtx aligned_mem, bitnum; + rtx temp1 = gen_reg_rtx (SImode); + rtx temp2 = gen_reg_rtx (SImode); + + get_aligned_mem (operands[0], &aligned_mem, &bitnum); + + emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum, + temp1, temp2)); + } + else + { + rtx temp1 = gen_reg_rtx (DImode); + rtx temp2 = gen_reg_rtx (DImode); + rtx temp3 = gen_reg_rtx (DImode); + rtx ua = get_unaligned_address (operands[0]); + + if (mode == QImode) + seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3); + else + seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3); + + alpha_set_memflags (seq, operands[0]); + emit_insn (seq); + } + return true; + } + + return false; +} + +/* Implement the movmisalign patterns. One of the operands is a memory + that is not naturally aligned. Emit instructions to load it. */ + +void +alpha_expand_movmisalign (enum machine_mode mode, rtx *operands) +{ + /* Honor misaligned loads, for those we promised to do so. */ + if (MEM_P (operands[1])) + { + rtx tmp; + + if (register_operand (operands[0], mode)) + tmp = operands[0]; + else + tmp = gen_reg_rtx (mode); + + alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0); + if (tmp != operands[0]) + emit_move_insn (operands[0], tmp); + } + else if (MEM_P (operands[0])) + { + if (!reg_or_0_operand (operands[1], mode)) + operands[1] = force_reg (mode, operands[1]); + alpha_expand_unaligned_store (operands[0], operands[1], 8, 0); + } + else + gcc_unreachable (); +} + +/* Generate an unsigned DImode to FP conversion. This is the same code + optabs would emit if we didn't have TFmode patterns. + + For SFmode, this is the only construction I've found that can pass + gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode + intermediates will work, because you'll get intermediate rounding + that ruins the end result. Some of this could be fixed by turning + on round-to-positive-infinity, but that requires diddling the fpsr, + which kills performance. I tried turning this around and converting + to a negative number, so that I could turn on /m, but either I did + it wrong or there's something else cause I wound up with the exact + same single-bit error. There is a branch-less form of this same code: + + srl $16,1,$1 + and $16,1,$2 + cmplt $16,0,$3 + or $1,$2,$2 + cmovge $16,$16,$2 + itoft $3,$f10 + itoft $2,$f11 + cvtqs $f11,$f11 + adds $f11,$f11,$f0 + fcmoveq $f10,$f11,$f0 + + I'm not using it because it's the same number of instructions as + this branch-full form, and it has more serialized long latency + instructions on the critical path. + + For DFmode, we can avoid rounding errors by breaking up the word + into two pieces, converting them separately, and adding them back: + + LC0: .long 0,0x5f800000 + + itoft $16,$f11 + lda $2,LC0 + cmplt $16,0,$1 + cpyse $f11,$f31,$f10 + cpyse $f31,$f11,$f11 + s4addq $1,$2,$1 + lds $f12,0($1) + cvtqt $f10,$f10 + cvtqt $f11,$f11 + addt $f12,$f10,$f0 + addt $f0,$f11,$f0 + + This doesn't seem to be a clear-cut win over the optabs form. + It probably all depends on the distribution of numbers being + converted -- in the optabs form, all but high-bit-set has a + much lower minimum execution time. */ + +void +alpha_emit_floatuns (rtx operands[2]) +{ + rtx neglab, donelab, i0, i1, f0, in, out; + enum machine_mode mode; + + out = operands[0]; + in = force_reg (DImode, operands[1]); + mode = GET_MODE (out); + neglab = gen_label_rtx (); + donelab = gen_label_rtx (); + i0 = gen_reg_rtx (DImode); + i1 = gen_reg_rtx (DImode); + f0 = gen_reg_rtx (mode); + + emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab); + + emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in))); + emit_jump_insn (gen_jump (donelab)); + emit_barrier (); + + emit_label (neglab); + + emit_insn (gen_lshrdi3 (i0, in, const1_rtx)); + emit_insn (gen_anddi3 (i1, in, const1_rtx)); + emit_insn (gen_iordi3 (i0, i0, i1)); + emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0))); + emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0))); + + emit_label (donelab); +} + +/* Generate the comparison for a conditional branch. */ + +void +alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode) +{ + enum rtx_code cmp_code, branch_code; + enum machine_mode branch_mode = VOIDmode; + enum rtx_code code = GET_CODE (operands[0]); + rtx op0 = operands[1], op1 = operands[2]; + rtx tem; + + if (cmp_mode == TFmode) + { + op0 = alpha_emit_xfloating_compare (&code, op0, op1); + op1 = const0_rtx; + cmp_mode = DImode; + } + + /* The general case: fold the comparison code to the types of compares + that we have, choosing the branch as necessary. */ + switch (code) + { + case EQ: case LE: case LT: case LEU: case LTU: + case UNORDERED: + /* We have these compares. */ + cmp_code = code, branch_code = NE; + break; + + case NE: + case ORDERED: + /* These must be reversed. */ + cmp_code = reverse_condition (code), branch_code = EQ; + break; + + case GE: case GT: case GEU: case GTU: + /* For FP, we swap them, for INT, we reverse them. */ + if (cmp_mode == DFmode) + { + cmp_code = swap_condition (code); + branch_code = NE; + tem = op0, op0 = op1, op1 = tem; + } + else + { + cmp_code = reverse_condition (code); + branch_code = EQ; + } + break; + + default: + gcc_unreachable (); + } + + if (cmp_mode == DFmode) + { + if (flag_unsafe_math_optimizations && cmp_code != UNORDERED) + { + /* When we are not as concerned about non-finite values, and we + are comparing against zero, we can branch directly. */ + if (op1 == CONST0_RTX (DFmode)) + cmp_code = UNKNOWN, branch_code = code; + else if (op0 == CONST0_RTX (DFmode)) + { + /* Undo the swap we probably did just above. */ + tem = op0, op0 = op1, op1 = tem; + branch_code = swap_condition (cmp_code); + cmp_code = UNKNOWN; + } + } + else + { + /* ??? We mark the branch mode to be CCmode to prevent the + compare and branch from being combined, since the compare + insn follows IEEE rules that the branch does not. */ + branch_mode = CCmode; + } + } + else + { + /* The following optimizations are only for signed compares. */ + if (code != LEU && code != LTU && code != GEU && code != GTU) + { + /* Whee. Compare and branch against 0 directly. */ + if (op1 == const0_rtx) + cmp_code = UNKNOWN, branch_code = code; + + /* If the constants doesn't fit into an immediate, but can + be generated by lda/ldah, we adjust the argument and + compare against zero, so we can use beq/bne directly. */ + /* ??? Don't do this when comparing against symbols, otherwise + we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will + be declared false out of hand (at least for non-weak). */ + else if (CONST_INT_P (op1) + && (code == EQ || code == NE) + && !(symbolic_operand (op0, VOIDmode) + || (REG_P (op0) && REG_POINTER (op0)))) + { + rtx n_op1 = GEN_INT (-INTVAL (op1)); + + if (! satisfies_constraint_I (op1) + && (satisfies_constraint_K (n_op1) + || satisfies_constraint_L (n_op1))) + cmp_code = PLUS, branch_code = code, op1 = n_op1; + } + } + + if (!reg_or_0_operand (op0, DImode)) + op0 = force_reg (DImode, op0); + if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode)) + op1 = force_reg (DImode, op1); + } + + /* Emit an initial compare instruction, if necessary. */ + tem = op0; + if (cmp_code != UNKNOWN) + { + tem = gen_reg_rtx (cmp_mode); + emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)); + } + + /* Emit the branch instruction. */ + tem = gen_rtx_SET (VOIDmode, pc_rtx, + gen_rtx_IF_THEN_ELSE (VOIDmode, + gen_rtx_fmt_ee (branch_code, + branch_mode, tem, + CONST0_RTX (cmp_mode)), + gen_rtx_LABEL_REF (VOIDmode, + operands[3]), + pc_rtx)); + emit_jump_insn (tem); +} + +/* Certain simplifications can be done to make invalid setcc operations + valid. Return the final comparison, or NULL if we can't work. */ + +bool +alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode) +{ + enum rtx_code cmp_code; + enum rtx_code code = GET_CODE (operands[1]); + rtx op0 = operands[2], op1 = operands[3]; + rtx tmp; + + if (cmp_mode == TFmode) + { + op0 = alpha_emit_xfloating_compare (&code, op0, op1); + op1 = const0_rtx; + cmp_mode = DImode; + } + + if (cmp_mode == DFmode && !TARGET_FIX) + return 0; + + /* The general case: fold the comparison code to the types of compares + that we have, choosing the branch as necessary. */ + + cmp_code = UNKNOWN; + switch (code) + { + case EQ: case LE: case LT: case LEU: case LTU: + case UNORDERED: + /* We have these compares. */ + if (cmp_mode == DFmode) + cmp_code = code, code = NE; + break; + + case NE: + if (cmp_mode == DImode && op1 == const0_rtx) + break; + /* FALLTHRU */ + + case ORDERED: + cmp_code = reverse_condition (code); + code = EQ; + break; + + case GE: case GT: case GEU: case GTU: + /* These normally need swapping, but for integer zero we have + special patterns that recognize swapped operands. */ + if (cmp_mode == DImode && op1 == const0_rtx) + break; + code = swap_condition (code); + if (cmp_mode == DFmode) + cmp_code = code, code = NE; + tmp = op0, op0 = op1, op1 = tmp; + break; + + default: + gcc_unreachable (); + } + + if (cmp_mode == DImode) + { + if (!register_operand (op0, DImode)) + op0 = force_reg (DImode, op0); + if (!reg_or_8bit_operand (op1, DImode)) + op1 = force_reg (DImode, op1); + } + + /* Emit an initial compare instruction, if necessary. */ + if (cmp_code != UNKNOWN) + { + tmp = gen_reg_rtx (cmp_mode); + emit_insn (gen_rtx_SET (VOIDmode, tmp, + gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1))); + + op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp; + op1 = const0_rtx; + } + + /* Emit the setcc instruction. */ + emit_insn (gen_rtx_SET (VOIDmode, operands[0], + gen_rtx_fmt_ee (code, DImode, op0, op1))); + return true; +} + + +/* Rewrite a comparison against zero CMP of the form + (CODE (cc0) (const_int 0)) so it can be written validly in + a conditional move (if_then_else CMP ...). + If both of the operands that set cc0 are nonzero we must emit + an insn to perform the compare (it can't be done within + the conditional move). */ + +rtx +alpha_emit_conditional_move (rtx cmp, enum machine_mode mode) +{ + enum rtx_code code = GET_CODE (cmp); + enum rtx_code cmov_code = NE; + rtx op0 = XEXP (cmp, 0); + rtx op1 = XEXP (cmp, 1); + enum machine_mode cmp_mode + = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0)); + enum machine_mode cmov_mode = VOIDmode; + int local_fast_math = flag_unsafe_math_optimizations; + rtx tem; + + if (cmp_mode == TFmode) + { + op0 = alpha_emit_xfloating_compare (&code, op0, op1); + op1 = const0_rtx; + cmp_mode = DImode; + } + + gcc_assert (cmp_mode == DFmode || cmp_mode == DImode); + + if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode)) + { + enum rtx_code cmp_code; + + if (! TARGET_FIX) + return 0; + + /* If we have fp<->int register move instructions, do a cmov by + performing the comparison in fp registers, and move the + zero/nonzero value to integer registers, where we can then + use a normal cmov, or vice-versa. */ + + switch (code) + { + case EQ: case LE: case LT: case LEU: case LTU: + case UNORDERED: + /* We have these compares. */ + cmp_code = code, code = NE; + break; + + case NE: + case ORDERED: + /* These must be reversed. */ + cmp_code = reverse_condition (code), code = EQ; + break; + + case GE: case GT: case GEU: case GTU: + /* These normally need swapping, but for integer zero we have + special patterns that recognize swapped operands. */ + if (cmp_mode == DImode && op1 == const0_rtx) + cmp_code = code, code = NE; + else + { + cmp_code = swap_condition (code); + code = NE; + tem = op0, op0 = op1, op1 = tem; + } + break; + + default: + gcc_unreachable (); + } + + if (cmp_mode == DImode) + { + if (!reg_or_0_operand (op0, DImode)) + op0 = force_reg (DImode, op0); + if (!reg_or_8bit_operand (op1, DImode)) + op1 = force_reg (DImode, op1); + } + + tem = gen_reg_rtx (cmp_mode); + emit_insn (gen_rtx_SET (VOIDmode, tem, + gen_rtx_fmt_ee (cmp_code, cmp_mode, + op0, op1))); + + cmp_mode = cmp_mode == DImode ? DFmode : DImode; + op0 = gen_lowpart (cmp_mode, tem); + op1 = CONST0_RTX (cmp_mode); + cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1); + local_fast_math = 1; + } + + if (cmp_mode == DImode) + { + if (!reg_or_0_operand (op0, DImode)) + op0 = force_reg (DImode, op0); + if (!reg_or_8bit_operand (op1, DImode)) + op1 = force_reg (DImode, op1); + } + + /* We may be able to use a conditional move directly. + This avoids emitting spurious compares. */ + if (signed_comparison_operator (cmp, VOIDmode) + && (cmp_mode == DImode || local_fast_math) + && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode))) + return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); + + /* We can't put the comparison inside the conditional move; + emit a compare instruction and put that inside the + conditional move. Make sure we emit only comparisons we have; + swap or reverse as necessary. */ + + if (!can_create_pseudo_p ()) + return NULL_RTX; + + switch (code) + { + case EQ: case LE: case LT: case LEU: case LTU: + case UNORDERED: + /* We have these compares: */ + break; + + case NE: + case ORDERED: + /* These must be reversed. */ + code = reverse_condition (code); + cmov_code = EQ; + break; + + case GE: case GT: case GEU: case GTU: + /* These normally need swapping, but for integer zero we have + special patterns that recognize swapped operands. */ + if (cmp_mode == DImode && op1 == const0_rtx) + break; + code = swap_condition (code); + tem = op0, op0 = op1, op1 = tem; + break; + + default: + gcc_unreachable (); + } + + if (cmp_mode == DImode) + { + if (!reg_or_0_operand (op0, DImode)) + op0 = force_reg (DImode, op0); + if (!reg_or_8bit_operand (op1, DImode)) + op1 = force_reg (DImode, op1); + } + + /* ??? We mark the branch mode to be CCmode to prevent the compare + and cmov from being combined, since the compare insn follows IEEE + rules that the cmov does not. */ + if (cmp_mode == DFmode && !local_fast_math) + cmov_mode = CCmode; + + tem = gen_reg_rtx (cmp_mode); + emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1)); + return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode)); +} + +/* Simplify a conditional move of two constants into a setcc with + arithmetic. This is done with a splitter since combine would + just undo the work if done during code generation. It also catches + cases we wouldn't have before cse. */ + +int +alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond, + rtx t_rtx, rtx f_rtx) +{ + HOST_WIDE_INT t, f, diff; + enum machine_mode mode; + rtx target, subtarget, tmp; + + mode = GET_MODE (dest); + t = INTVAL (t_rtx); + f = INTVAL (f_rtx); + diff = t - f; + + if (((code == NE || code == EQ) && diff < 0) + || (code == GE || code == GT)) + { + code = reverse_condition (code); + diff = t, t = f, f = diff; + diff = t - f; + } + + subtarget = target = dest; + if (mode != DImode) + { + target = gen_lowpart (DImode, dest); + if (can_create_pseudo_p ()) + subtarget = gen_reg_rtx (DImode); + else + subtarget = target; + } + /* Below, we must be careful to use copy_rtx on target and subtarget + in intermediate insns, as they may be a subreg rtx, which may not + be shared. */ + + if (f == 0 && exact_log2 (diff) > 0 + /* On EV6, we've got enough shifters to make non-arithmetic shifts + viable over a longer latency cmove. On EV5, the E0 slot is a + scarce resource, and on EV4 shift has the same latency as a cmove. */ + && (diff <= 8 || alpha_tune == PROCESSOR_EV6)) + { + tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx); + emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp)); + + tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget), + GEN_INT (exact_log2 (t))); + emit_insn (gen_rtx_SET (VOIDmode, target, tmp)); + } + else if (f == 0 && t == -1) + { + tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx); + emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp)); + + emit_insn (gen_negdi2 (target, copy_rtx (subtarget))); + } + else if (diff == 1 || diff == 4 || diff == 8) + { + rtx add_op; + + tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx); + emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp)); + + if (diff == 1) + emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f))); + else + { + add_op = GEN_INT (f); + if (sext_add_operand (add_op, mode)) + { + tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget), + GEN_INT (diff)); + tmp = gen_rtx_PLUS (DImode, tmp, add_op); + emit_insn (gen_rtx_SET (VOIDmode, target, tmp)); + } + else + return 0; + } + } + else + return 0; + + return 1; +} + +/* Look up the function X_floating library function name for the + given operation. */ + +struct GTY(()) xfloating_op +{ + const enum rtx_code code; + const char *const GTY((skip)) osf_func; + const char *const GTY((skip)) vms_func; + rtx libcall; +}; + +static GTY(()) struct xfloating_op xfloating_ops[] = +{ + { PLUS, "_OtsAddX", "OTS$ADD_X", 0 }, + { MINUS, "_OtsSubX", "OTS$SUB_X", 0 }, + { MULT, "_OtsMulX", "OTS$MUL_X", 0 }, + { DIV, "_OtsDivX", "OTS$DIV_X", 0 }, + { EQ, "_OtsEqlX", "OTS$EQL_X", 0 }, + { NE, "_OtsNeqX", "OTS$NEQ_X", 0 }, + { LT, "_OtsLssX", "OTS$LSS_X", 0 }, + { LE, "_OtsLeqX", "OTS$LEQ_X", 0 }, + { GT, "_OtsGtrX", "OTS$GTR_X", 0 }, + { GE, "_OtsGeqX", "OTS$GEQ_X", 0 }, + { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 }, + { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 }, + { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 }, + { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 }, + { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 } +}; + +static GTY(()) struct xfloating_op vax_cvt_ops[] = +{ + { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 }, + { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 } +}; + +static rtx +alpha_lookup_xfloating_lib_func (enum rtx_code code) +{ + struct xfloating_op *ops = xfloating_ops; + long n = ARRAY_SIZE (xfloating_ops); + long i; + + gcc_assert (TARGET_HAS_XFLOATING_LIBS); + + /* How irritating. Nothing to key off for the main table. */ + if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE)) + { + ops = vax_cvt_ops; + n = ARRAY_SIZE (vax_cvt_ops); + } + + for (i = 0; i < n; ++i, ++ops) + if (ops->code == code) + { + rtx func = ops->libcall; + if (!func) + { + func = init_one_libfunc (TARGET_ABI_OPEN_VMS + ? ops->vms_func : ops->osf_func); + ops->libcall = func; + } + return func; + } + + gcc_unreachable (); +} + +/* Most X_floating operations take the rounding mode as an argument. + Compute that here. */ + +static int +alpha_compute_xfloating_mode_arg (enum rtx_code code, + enum alpha_fp_rounding_mode round) +{ + int mode; + + switch (round) + { + case ALPHA_FPRM_NORM: + mode = 2; + break; + case ALPHA_FPRM_MINF: + mode = 1; + break; + case ALPHA_FPRM_CHOP: + mode = 0; + break; + case ALPHA_FPRM_DYN: + mode = 4; + break; + default: + gcc_unreachable (); + + /* XXX For reference, round to +inf is mode = 3. */ + } + + if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N) + mode |= 0x10000; + + return mode; +} + +/* Emit an X_floating library function call. + + Note that these functions do not follow normal calling conventions: + TFmode arguments are passed in two integer registers (as opposed to + indirect); TFmode return values appear in R16+R17. + + FUNC is the function to call. + TARGET is where the output belongs. + OPERANDS are the inputs. + NOPERANDS is the count of inputs. + EQUIV is the expression equivalent for the function. +*/ + +static void +alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[], + int noperands, rtx equiv) +{ + rtx usage = NULL_RTX, tmp, reg; + int regno = 16, i; + + start_sequence (); + + for (i = 0; i < noperands; ++i) + { + switch (GET_MODE (operands[i])) + { + case TFmode: + reg = gen_rtx_REG (TFmode, regno); + regno += 2; + break; + + case DFmode: + reg = gen_rtx_REG (DFmode, regno + 32); + regno += 1; + break; + + case VOIDmode: + gcc_assert (CONST_INT_P (operands[i])); + /* FALLTHRU */ + case DImode: + reg = gen_rtx_REG (DImode, regno); + regno += 1; + break; + + default: + gcc_unreachable (); + } + + emit_move_insn (reg, operands[i]); + use_reg (&usage, reg); + } + + switch (GET_MODE (target)) + { + case TFmode: + reg = gen_rtx_REG (TFmode, 16); + break; + case DFmode: + reg = gen_rtx_REG (DFmode, 32); + break; + case DImode: + reg = gen_rtx_REG (DImode, 0); + break; + default: + gcc_unreachable (); + } + + tmp = gen_rtx_MEM (QImode, func); + tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx, + const0_rtx, const0_rtx)); + CALL_INSN_FUNCTION_USAGE (tmp) = usage; + RTL_CONST_CALL_P (tmp) = 1; + + tmp = get_insns (); + end_sequence (); + + emit_libcall_block (tmp, target, reg, equiv); +} + +/* Emit an X_floating library function call for arithmetic (+,-,*,/). */ + +void +alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[]) +{ + rtx func; + int mode; + rtx out_operands[3]; + + func = alpha_lookup_xfloating_lib_func (code); + mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm); + + out_operands[0] = operands[1]; + out_operands[1] = operands[2]; + out_operands[2] = GEN_INT (mode); + alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3, + gen_rtx_fmt_ee (code, TFmode, operands[1], + operands[2])); +} + +/* Emit an X_floating library function call for a comparison. */ + +static rtx +alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1) +{ + enum rtx_code cmp_code, res_code; + rtx func, out, operands[2], note; + + /* X_floating library comparison functions return + -1 unordered + 0 false + 1 true + Convert the compare against the raw return value. */ + + cmp_code = *pcode; + switch (cmp_code) + { + case UNORDERED: + cmp_code = EQ; + res_code = LT; + break; + case ORDERED: + cmp_code = EQ; + res_code = GE; + break; + case NE: + res_code = NE; + break; + case EQ: + case LT: + case GT: + case LE: + case GE: + res_code = GT; + break; + default: + gcc_unreachable (); + } + *pcode = res_code; + + func = alpha_lookup_xfloating_lib_func (cmp_code); + + operands[0] = op0; + operands[1] = op1; + out = gen_reg_rtx (DImode); + + /* What's actually returned is -1,0,1, not a proper boolean value. */ + note = gen_rtx_fmt_ee (cmp_code, VOIDmode, op0, op1); + note = gen_rtx_UNSPEC (DImode, gen_rtvec (1, note), UNSPEC_XFLT_COMPARE); + alpha_emit_xfloating_libcall (func, out, operands, 2, note); + + return out; +} + +/* Emit an X_floating library function call for a conversion. */ + +void +alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[]) +{ + int noperands = 1, mode; + rtx out_operands[2]; + rtx func; + enum rtx_code code = orig_code; + + if (code == UNSIGNED_FIX) + code = FIX; + + func = alpha_lookup_xfloating_lib_func (code); + + out_operands[0] = operands[1]; + + switch (code) + { + case FIX: + mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP); + out_operands[1] = GEN_INT (mode); + noperands = 2; + break; + case FLOAT_TRUNCATE: + mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm); + out_operands[1] = GEN_INT (mode); + noperands = 2; + break; + default: + break; + } + + alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands, + gen_rtx_fmt_e (orig_code, + GET_MODE (operands[0]), + operands[1])); +} + +/* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of + DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true, + guarantee that the sequence + set (OP[0] OP[2]) + set (OP[1] OP[3]) + is valid. Naturally, output operand ordering is little-endian. + This is used by *movtf_internal and *movti_internal. */ + +void +alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode, + bool fixup_overlap) +{ + switch (GET_CODE (operands[1])) + { + case REG: + operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1); + operands[2] = gen_rtx_REG (DImode, REGNO (operands[1])); + break; + + case MEM: + operands[3] = adjust_address (operands[1], DImode, 8); + operands[2] = adjust_address (operands[1], DImode, 0); + break; + + case CONST_INT: + case CONST_DOUBLE: + gcc_assert (operands[1] == CONST0_RTX (mode)); + operands[2] = operands[3] = const0_rtx; + break; + + default: + gcc_unreachable (); + } + + switch (GET_CODE (operands[0])) + { + case REG: + operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1); + operands[0] = gen_rtx_REG (DImode, REGNO (operands[0])); + break; + + case MEM: + operands[1] = adjust_address (operands[0], DImode, 8); + operands[0] = adjust_address (operands[0], DImode, 0); + break; + + default: + gcc_unreachable (); + } + + if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3])) + { + rtx tmp; + tmp = operands[0], operands[0] = operands[1], operands[1] = tmp; + tmp = operands[2], operands[2] = operands[3], operands[3] = tmp; + } +} + +/* Implement negtf2 or abstf2. Op0 is destination, op1 is source, + op2 is a register containing the sign bit, operation is the + logical operation to be performed. */ + +void +alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx)) +{ + rtx high_bit = operands[2]; + rtx scratch; + int move; + + alpha_split_tmode_pair (operands, TFmode, false); + + /* Detect three flavors of operand overlap. */ + move = 1; + if (rtx_equal_p (operands[0], operands[2])) + move = 0; + else if (rtx_equal_p (operands[1], operands[2])) + { + if (rtx_equal_p (operands[0], high_bit)) + move = 2; + else + move = -1; + } + + if (move < 0) + emit_move_insn (operands[0], operands[2]); + + /* ??? If the destination overlaps both source tf and high_bit, then + assume source tf is dead in its entirety and use the other half + for a scratch register. Otherwise "scratch" is just the proper + destination register. */ + scratch = operands[move < 2 ? 1 : 3]; + + emit_insn ((*operation) (scratch, high_bit, operands[3])); + + if (move > 0) + { + emit_move_insn (operands[0], operands[2]); + if (move > 1) + emit_move_insn (operands[1], scratch); + } +} + +/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting + unaligned data: + + unsigned: signed: + word: ldq_u r1,X(r11) ldq_u r1,X(r11) + ldq_u r2,X+1(r11) ldq_u r2,X+1(r11) + lda r3,X(r11) lda r3,X+2(r11) + extwl r1,r3,r1 extql r1,r3,r1 + extwh r2,r3,r2 extqh r2,r3,r2 + or r1.r2.r1 or r1,r2,r1 + sra r1,48,r1 + + long: ldq_u r1,X(r11) ldq_u r1,X(r11) + ldq_u r2,X+3(r11) ldq_u r2,X+3(r11) + lda r3,X(r11) lda r3,X(r11) + extll r1,r3,r1 extll r1,r3,r1 + extlh r2,r3,r2 extlh r2,r3,r2 + or r1.r2.r1 addl r1,r2,r1 + + quad: ldq_u r1,X(r11) + ldq_u r2,X+7(r11) + lda r3,X(r11) + extql r1,r3,r1 + extqh r2,r3,r2 + or r1.r2.r1 +*/ + +void +alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size, + HOST_WIDE_INT ofs, int sign) +{ + rtx meml, memh, addr, extl, exth, tmp, mema; + enum machine_mode mode; + + if (TARGET_BWX && size == 2) + { + meml = adjust_address (mem, QImode, ofs); + memh = adjust_address (mem, QImode, ofs+1); + extl = gen_reg_rtx (DImode); + exth = gen_reg_rtx (DImode); + emit_insn (gen_zero_extendqidi2 (extl, meml)); + emit_insn (gen_zero_extendqidi2 (exth, memh)); + exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8), + NULL, 1, OPTAB_LIB_WIDEN); + addr = expand_simple_binop (DImode, IOR, extl, exth, + NULL, 1, OPTAB_LIB_WIDEN); + + if (sign && GET_MODE (tgt) != HImode) + { + addr = gen_lowpart (HImode, addr); + emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0)); + } + else + { + if (GET_MODE (tgt) != DImode) + addr = gen_lowpart (GET_MODE (tgt), addr); + emit_move_insn (tgt, addr); + } + return; + } + + meml = gen_reg_rtx (DImode); + memh = gen_reg_rtx (DImode); + addr = gen_reg_rtx (DImode); + extl = gen_reg_rtx (DImode); + exth = gen_reg_rtx (DImode); + + mema = XEXP (mem, 0); + if (GET_CODE (mema) == LO_SUM) + mema = force_reg (Pmode, mema); + + /* AND addresses cannot be in any alias set, since they may implicitly + alias surrounding code. Ideally we'd have some alias set that + covered all types except those with alignment 8 or higher. */ + + tmp = change_address (mem, DImode, + gen_rtx_AND (DImode, + plus_constant (DImode, mema, ofs), + GEN_INT (-8))); + set_mem_alias_set (tmp, 0); + emit_move_insn (meml, tmp); + + tmp = change_address (mem, DImode, + gen_rtx_AND (DImode, + plus_constant (DImode, mema, + ofs + size - 1), + GEN_INT (-8))); + set_mem_alias_set (tmp, 0); + emit_move_insn (memh, tmp); + + if (sign && size == 2) + { + emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2)); + + emit_insn (gen_extql (extl, meml, addr)); + emit_insn (gen_extqh (exth, memh, addr)); + + /* We must use tgt here for the target. Alpha-vms port fails if we use + addr for the target, because addr is marked as a pointer and combine + knows that pointers are always sign-extended 32-bit values. */ + addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN); + addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48), + addr, 1, OPTAB_WIDEN); + } + else + { + emit_move_insn (addr, plus_constant (Pmode, mema, ofs)); + emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr)); + switch ((int) size) + { + case 2: + emit_insn (gen_extwh (exth, memh, addr)); + mode = HImode; + break; + case 4: + emit_insn (gen_extlh (exth, memh, addr)); + mode = SImode; + break; + case 8: + emit_insn (gen_extqh (exth, memh, addr)); + mode = DImode; + break; + default: + gcc_unreachable (); + } + + addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl), + gen_lowpart (mode, exth), gen_lowpart (mode, tgt), + sign, OPTAB_WIDEN); + } + + if (addr != tgt) + emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr)); +} + +/* Similarly, use ins and msk instructions to perform unaligned stores. */ + +void +alpha_expand_unaligned_store (rtx dst, rtx src, + HOST_WIDE_INT size, HOST_WIDE_INT ofs) +{ + rtx dstl, dsth, addr, insl, insh, meml, memh, dsta; + + if (TARGET_BWX && size == 2) + { + if (src != const0_rtx) + { + dstl = gen_lowpart (QImode, src); + dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8), + NULL, 1, OPTAB_LIB_WIDEN); + dsth = gen_lowpart (QImode, dsth); + } + else + dstl = dsth = const0_rtx; + + meml = adjust_address (dst, QImode, ofs); + memh = adjust_address (dst, QImode, ofs+1); + + emit_move_insn (meml, dstl); + emit_move_insn (memh, dsth); + return; + } + + dstl = gen_reg_rtx (DImode); + dsth = gen_reg_rtx (DImode); + insl = gen_reg_rtx (DImode); + insh = gen_reg_rtx (DImode); + + dsta = XEXP (dst, 0); + if (GET_CODE (dsta) == LO_SUM) + dsta = force_reg (Pmode, dsta); + + /* AND addresses cannot be in any alias set, since they may implicitly + alias surrounding code. Ideally we'd have some alias set that + covered all types except those with alignment 8 or higher. */ + + meml = change_address (dst, DImode, + gen_rtx_AND (DImode, + plus_constant (DImode, dsta, ofs), + GEN_INT (-8))); + set_mem_alias_set (meml, 0); + + memh = change_address (dst, DImode, + gen_rtx_AND (DImode, + plus_constant (DImode, dsta, + ofs + size - 1), + GEN_INT (-8))); + set_mem_alias_set (memh, 0); + + emit_move_insn (dsth, memh); + emit_move_insn (dstl, meml); + + addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs)); + + if (src != CONST0_RTX (GET_MODE (src))) + { + emit_insn (gen_insxh (insh, gen_lowpart (DImode, src), + GEN_INT (size*8), addr)); + + switch ((int) size) + { + case 2: + emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr)); + break; + case 4: + emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr)); + break; + case 8: + emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr)); + break; + default: + gcc_unreachable (); + } + } + + emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr)); + + switch ((int) size) + { + case 2: + emit_insn (gen_mskwl (dstl, dstl, addr)); + break; + case 4: + emit_insn (gen_mskll (dstl, dstl, addr)); + break; + case 8: + emit_insn (gen_mskql (dstl, dstl, addr)); + break; + default: + gcc_unreachable (); + } + + if (src != CONST0_RTX (GET_MODE (src))) + { + dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN); + dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN); + } + + /* Must store high before low for degenerate case of aligned. */ + emit_move_insn (memh, dsth); + emit_move_insn (meml, dstl); +} + +/* The block move code tries to maximize speed by separating loads and + stores at the expense of register pressure: we load all of the data + before we store it back out. There are two secondary effects worth + mentioning, that this speeds copying to/from aligned and unaligned + buffers, and that it makes the code significantly easier to write. */ + +#define MAX_MOVE_WORDS 8 + +/* Load an integral number of consecutive unaligned quadwords. */ + +static void +alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem, + HOST_WIDE_INT words, HOST_WIDE_INT ofs) +{ + rtx const im8 = GEN_INT (-8); + rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1]; + rtx sreg, areg, tmp, smema; + HOST_WIDE_INT i; + + smema = XEXP (smem, 0); + if (GET_CODE (smema) == LO_SUM) + smema = force_reg (Pmode, smema); + + /* Generate all the tmp registers we need. */ + for (i = 0; i < words; ++i) + { + data_regs[i] = out_regs[i]; + ext_tmps[i] = gen_reg_rtx (DImode); + } + data_regs[words] = gen_reg_rtx (DImode); + + if (ofs != 0) + smem = adjust_address (smem, GET_MODE (smem), ofs); + + /* Load up all of the source data. */ + for (i = 0; i < words; ++i) + { + tmp = change_address (smem, DImode, + gen_rtx_AND (DImode, + plus_constant (DImode, smema, 8*i), + im8)); + set_mem_alias_set (tmp, 0); + emit_move_insn (data_regs[i], tmp); + } + + tmp = change_address (smem, DImode, + gen_rtx_AND (DImode, + plus_constant (DImode, smema, + 8*words - 1), + im8)); + set_mem_alias_set (tmp, 0); + emit_move_insn (data_regs[words], tmp); + + /* Extract the half-word fragments. Unfortunately DEC decided to make + extxh with offset zero a noop instead of zeroing the register, so + we must take care of that edge condition ourselves with cmov. */ + + sreg = copy_addr_to_reg (smema); + areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL, + 1, OPTAB_WIDEN); + for (i = 0; i < words; ++i) + { + emit_insn (gen_extql (data_regs[i], data_regs[i], sreg)); + emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg)); + emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i], + gen_rtx_IF_THEN_ELSE (DImode, + gen_rtx_EQ (DImode, areg, + const0_rtx), + const0_rtx, ext_tmps[i]))); + } + + /* Merge the half-words into whole words. */ + for (i = 0; i < words; ++i) + { + out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i], + ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN); + } +} + +/* Store an integral number of consecutive unaligned quadwords. DATA_REGS + may be NULL to store zeros. */ + +static void +alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem, + HOST_WIDE_INT words, HOST_WIDE_INT ofs) +{ + rtx const im8 = GEN_INT (-8); + rtx ins_tmps[MAX_MOVE_WORDS]; + rtx st_tmp_1, st_tmp_2, dreg; + rtx st_addr_1, st_addr_2, dmema; + HOST_WIDE_INT i; + + dmema = XEXP (dmem, 0); + if (GET_CODE (dmema) == LO_SUM) + dmema = force_reg (Pmode, dmema); + + /* Generate all the tmp registers we need. */ + if (data_regs != NULL) + for (i = 0; i < words; ++i) + ins_tmps[i] = gen_reg_rtx(DImode); + st_tmp_1 = gen_reg_rtx(DImode); + st_tmp_2 = gen_reg_rtx(DImode); + + if (ofs != 0) + dmem = adjust_address (dmem, GET_MODE (dmem), ofs); + + st_addr_2 = change_address (dmem, DImode, + gen_rtx_AND (DImode, + plus_constant (DImode, dmema, + words*8 - 1), + im8)); + set_mem_alias_set (st_addr_2, 0); + + st_addr_1 = change_address (dmem, DImode, + gen_rtx_AND (DImode, dmema, im8)); + set_mem_alias_set (st_addr_1, 0); + + /* Load up the destination end bits. */ + emit_move_insn (st_tmp_2, st_addr_2); + emit_move_insn (st_tmp_1, st_addr_1); + + /* Shift the input data into place. */ + dreg = copy_addr_to_reg (dmema); + if (data_regs != NULL) + { + for (i = words-1; i >= 0; --i) + { + emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg)); + emit_insn (gen_insql (data_regs[i], data_regs[i], dreg)); + } + for (i = words-1; i > 0; --i) + { + ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i], + ins_tmps[i-1], ins_tmps[i-1], 1, + OPTAB_WIDEN); + } + } + + /* Split and merge the ends with the destination data. */ + emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg)); + emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg)); + + if (data_regs != NULL) + { + st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1], + st_tmp_2, 1, OPTAB_WIDEN); + st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0], + st_tmp_1, 1, OPTAB_WIDEN); + } + + /* Store it all. */ + emit_move_insn (st_addr_2, st_tmp_2); + for (i = words-1; i > 0; --i) + { + rtx tmp = change_address (dmem, DImode, + gen_rtx_AND (DImode, + plus_constant (DImode, + dmema, i*8), + im8)); + set_mem_alias_set (tmp, 0); + emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx); + } + emit_move_insn (st_addr_1, st_tmp_1); +} + + +/* Expand string/block move operations. + + operands[0] is the pointer to the destination. + operands[1] is the pointer to the source. + operands[2] is the number of bytes to move. + operands[3] is the alignment. */ + +int +alpha_expand_block_move (rtx operands[]) +{ + rtx bytes_rtx = operands[2]; + rtx align_rtx = operands[3]; + HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx); + HOST_WIDE_INT bytes = orig_bytes; + HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT; + HOST_WIDE_INT dst_align = src_align; + rtx orig_src = operands[1]; + rtx orig_dst = operands[0]; + rtx data_regs[2 * MAX_MOVE_WORDS + 16]; + rtx tmp; + unsigned int i, words, ofs, nregs = 0; + + if (orig_bytes <= 0) + return 1; + else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD) + return 0; + + /* Look for additional alignment information from recorded register info. */ + + tmp = XEXP (orig_src, 0); + if (REG_P (tmp)) + src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp))); + else if (GET_CODE (tmp) == PLUS + && REG_P (XEXP (tmp, 0)) + && CONST_INT_P (XEXP (tmp, 1))) + { + unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1)); + unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0))); + + if (a > src_align) + { + if (a >= 64 && c % 8 == 0) + src_align = 64; + else if (a >= 32 && c % 4 == 0) + src_align = 32; + else if (a >= 16 && c % 2 == 0) + src_align = 16; + } + } + + tmp = XEXP (orig_dst, 0); + if (REG_P (tmp)) + dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp))); + else if (GET_CODE (tmp) == PLUS + && REG_P (XEXP (tmp, 0)) + && CONST_INT_P (XEXP (tmp, 1))) + { + unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1)); + unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0))); + + if (a > dst_align) + { + if (a >= 64 && c % 8 == 0) + dst_align = 64; + else if (a >= 32 && c % 4 == 0) + dst_align = 32; + else if (a >= 16 && c % 2 == 0) + dst_align = 16; + } + } + + ofs = 0; + if (src_align >= 64 && bytes >= 8) + { + words = bytes / 8; + + for (i = 0; i < words; ++i) + data_regs[nregs + i] = gen_reg_rtx (DImode); + + for (i = 0; i < words; ++i) + emit_move_insn (data_regs[nregs + i], + adjust_address (orig_src, DImode, ofs + i * 8)); + + nregs += words; + bytes -= words * 8; + ofs += words * 8; + } + + if (src_align >= 32 && bytes >= 4) + { + words = bytes / 4; + + for (i = 0; i < words; ++i) + data_regs[nregs + i] = gen_reg_rtx (SImode); + + for (i = 0; i < words; ++i) + emit_move_insn (data_regs[nregs + i], + adjust_address (orig_src, SImode, ofs + i * 4)); + + nregs += words; + bytes -= words * 4; + ofs += words * 4; + } + + if (bytes >= 8) + { + words = bytes / 8; + + for (i = 0; i < words+1; ++i) + data_regs[nregs + i] = gen_reg_rtx (DImode); + + alpha_expand_unaligned_load_words (data_regs + nregs, orig_src, + words, ofs); + + nregs += words; + bytes -= words * 8; + ofs += words * 8; + } + + if (! TARGET_BWX && bytes >= 4) + { + data_regs[nregs++] = tmp = gen_reg_rtx (SImode); + alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0); + bytes -= 4; + ofs += 4; + } + + if (bytes >= 2) + { + if (src_align >= 16) + { + do { + data_regs[nregs++] = tmp = gen_reg_rtx (HImode); + emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs)); + bytes -= 2; + ofs += 2; + } while (bytes >= 2); + } + else if (! TARGET_BWX) + { + data_regs[nregs++] = tmp = gen_reg_rtx (HImode); + alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0); + bytes -= 2; + ofs += 2; + } + } + + while (bytes > 0) + { + data_regs[nregs++] = tmp = gen_reg_rtx (QImode); + emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs)); + bytes -= 1; + ofs += 1; + } + + gcc_assert (nregs <= ARRAY_SIZE (data_regs)); + + /* Now save it back out again. */ + + i = 0, ofs = 0; + + /* Write out the data in whatever chunks reading the source allowed. */ + if (dst_align >= 64) + { + while (i < nregs && GET_MODE (data_regs[i]) == DImode) + { + emit_move_insn (adjust_address (orig_dst, DImode, ofs), + data_regs[i]); + ofs += 8; + i++; + } + } + + if (dst_align >= 32) + { + /* If the source has remaining DImode regs, write them out in + two pieces. */ + while (i < nregs && GET_MODE (data_regs[i]) == DImode) + { + tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32), + NULL_RTX, 1, OPTAB_WIDEN); + + emit_move_insn (adjust_address (orig_dst, SImode, ofs), + gen_lowpart (SImode, data_regs[i])); + emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4), + gen_lowpart (SImode, tmp)); + ofs += 8; + i++; + } + + while (i < nregs && GET_MODE (data_regs[i]) == SImode) + { + emit_move_insn (adjust_address (orig_dst, SImode, ofs), + data_regs[i]); + ofs += 4; + i++; + } + } + + if (i < nregs && GET_MODE (data_regs[i]) == DImode) + { + /* Write out a remaining block of words using unaligned methods. */ + + for (words = 1; i + words < nregs; words++) + if (GET_MODE (data_regs[i + words]) != DImode) + break; + + if (words == 1) + alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs); + else + alpha_expand_unaligned_store_words (data_regs + i, orig_dst, + words, ofs); + + i += words; + ofs += words * 8; + } + + /* Due to the above, this won't be aligned. */ + /* ??? If we have more than one of these, consider constructing full + words in registers and using alpha_expand_unaligned_store_words. */ + while (i < nregs && GET_MODE (data_regs[i]) == SImode) + { + alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs); + ofs += 4; + i++; + } + + if (dst_align >= 16) + while (i < nregs && GET_MODE (data_regs[i]) == HImode) + { + emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]); + i++; + ofs += 2; + } + else + while (i < nregs && GET_MODE (data_regs[i]) == HImode) + { + alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs); + i++; + ofs += 2; + } + + /* The remainder must be byte copies. */ + while (i < nregs) + { + gcc_assert (GET_MODE (data_regs[i]) == QImode); + emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]); + i++; + ofs += 1; + } + + return 1; +} + +int +alpha_expand_block_clear (rtx operands[]) +{ + rtx bytes_rtx = operands[1]; + rtx align_rtx = operands[3]; + HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx); + HOST_WIDE_INT bytes = orig_bytes; + HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT; + HOST_WIDE_INT alignofs = 0; + rtx orig_dst = operands[0]; + rtx tmp; + int i, words, ofs = 0; + + if (orig_bytes <= 0) + return 1; + if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD) + return 0; + + /* Look for stricter alignment. */ + tmp = XEXP (orig_dst, 0); + if (REG_P (tmp)) + align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp))); + else if (GET_CODE (tmp) == PLUS + && REG_P (XEXP (tmp, 0)) + && CONST_INT_P (XEXP (tmp, 1))) + { + HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1)); + int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0))); + + if (a > align) + { + if (a >= 64) + align = a, alignofs = 8 - c % 8; + else if (a >= 32) + align = a, alignofs = 4 - c % 4; + else if (a >= 16) + align = a, alignofs = 2 - c % 2; + } + } + + /* Handle an unaligned prefix first. */ + + if (alignofs > 0) + { +#if HOST_BITS_PER_WIDE_INT >= 64 + /* Given that alignofs is bounded by align, the only time BWX could + generate three stores is for a 7 byte fill. Prefer two individual + stores over a load/mask/store sequence. */ + if ((!TARGET_BWX || alignofs == 7) + && align >= 32 + && !(alignofs == 4 && bytes >= 4)) + { + enum machine_mode mode = (align >= 64 ? DImode : SImode); + int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs; + rtx mem, tmp; + HOST_WIDE_INT mask; + + mem = adjust_address (orig_dst, mode, ofs - inv_alignofs); + set_mem_alias_set (mem, 0); + + mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8)); + if (bytes < alignofs) + { + mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8); + ofs += bytes; + bytes = 0; + } + else + { + bytes -= alignofs; + ofs += alignofs; + } + alignofs = 0; + + tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask), + NULL_RTX, 1, OPTAB_WIDEN); + + emit_move_insn (mem, tmp); + } +#endif + + if (TARGET_BWX && (alignofs & 1) && bytes >= 1) + { + emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx); + bytes -= 1; + ofs += 1; + alignofs -= 1; + } + if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2) + { + emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx); + bytes -= 2; + ofs += 2; + alignofs -= 2; + } + if (alignofs == 4 && bytes >= 4) + { + emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx); + bytes -= 4; + ofs += 4; + alignofs = 0; + } + + /* If we've not used the extra lead alignment information by now, + we won't be able to. Downgrade align to match what's left over. */ + if (alignofs > 0) + { + alignofs = alignofs & -alignofs; + align = MIN (align, alignofs * BITS_PER_UNIT); + } + } + + /* Handle a block of contiguous long-words. */ + + if (align >= 64 && bytes >= 8) + { + words = bytes / 8; + + for (i = 0; i < words; ++i) + emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8), + const0_rtx); + + bytes -= words * 8; + ofs += words * 8; + } + + /* If the block is large and appropriately aligned, emit a single + store followed by a sequence of stq_u insns. */ + + if (align >= 32 && bytes > 16) + { + rtx orig_dsta; + + emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx); + bytes -= 4; + ofs += 4; + + orig_dsta = XEXP (orig_dst, 0); + if (GET_CODE (orig_dsta) == LO_SUM) + orig_dsta = force_reg (Pmode, orig_dsta); + + words = bytes / 8; + for (i = 0; i < words; ++i) + { + rtx mem + = change_address (orig_dst, DImode, + gen_rtx_AND (DImode, + plus_constant (DImode, orig_dsta, + ofs + i*8), + GEN_INT (-8))); + set_mem_alias_set (mem, 0); + emit_move_insn (mem, const0_rtx); + } + + /* Depending on the alignment, the first stq_u may have overlapped + with the initial stl, which means that the last stq_u didn't + write as much as it would appear. Leave those questionable bytes + unaccounted for. */ + bytes -= words * 8 - 4; + ofs += words * 8 - 4; + } + + /* Handle a smaller block of aligned words. */ + + if ((align >= 64 && bytes == 4) + || (align == 32 && bytes >= 4)) + { + words = bytes / 4; + + for (i = 0; i < words; ++i) + emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4), + const0_rtx); + + bytes -= words * 4; + ofs += words * 4; + } + + /* An unaligned block uses stq_u stores for as many as possible. */ + + if (bytes >= 8) + { + words = bytes / 8; + + alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs); + + bytes -= words * 8; + ofs += words * 8; + } + + /* Next clean up any trailing pieces. */ + +#if HOST_BITS_PER_WIDE_INT >= 64 + /* Count the number of bits in BYTES for which aligned stores could + be emitted. */ + words = 0; + for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1) + if (bytes & i) + words += 1; + + /* If we have appropriate alignment (and it wouldn't take too many + instructions otherwise), mask out the bytes we need. */ + if (TARGET_BWX ? words > 2 : bytes > 0) + { + if (align >= 64) + { + rtx mem, tmp; + HOST_WIDE_INT mask; + + mem = adjust_address (orig_dst, DImode, ofs); + set_mem_alias_set (mem, 0); + + mask = ~(HOST_WIDE_INT)0 << (bytes * 8); + + tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask), + NULL_RTX, 1, OPTAB_WIDEN); + + emit_move_insn (mem, tmp); + return 1; + } + else if (align >= 32 && bytes < 4) + { + rtx mem, tmp; + HOST_WIDE_INT mask; + + mem = adjust_address (orig_dst, SImode, ofs); + set_mem_alias_set (mem, 0); + + mask = ~(HOST_WIDE_INT)0 << (bytes * 8); + + tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask), + NULL_RTX, 1, OPTAB_WIDEN); + + emit_move_insn (mem, tmp); + return 1; + } + } +#endif + + if (!TARGET_BWX && bytes >= 4) + { + alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs); + bytes -= 4; + ofs += 4; + } + + if (bytes >= 2) + { + if (align >= 16) + { + do { + emit_move_insn (adjust_address (orig_dst, HImode, ofs), + const0_rtx); + bytes -= 2; + ofs += 2; + } while (bytes >= 2); + } + else if (! TARGET_BWX) + { + alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs); + bytes -= 2; + ofs += 2; + } + } + + while (bytes > 0) + { + emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx); + bytes -= 1; + ofs += 1; + } + + return 1; +} + +/* Returns a mask so that zap(x, value) == x & mask. */ + +rtx +alpha_expand_zap_mask (HOST_WIDE_INT value) +{ + rtx result; + int i; + + if (HOST_BITS_PER_WIDE_INT >= 64) + { + HOST_WIDE_INT mask = 0; + + for (i = 7; i >= 0; --i) + { + mask <<= 8; + if (!((value >> i) & 1)) + mask |= 0xff; + } + + result = gen_int_mode (mask, DImode); + } + else + { + HOST_WIDE_INT mask_lo = 0, mask_hi = 0; + + gcc_assert (HOST_BITS_PER_WIDE_INT == 32); + + for (i = 7; i >= 4; --i) + { + mask_hi <<= 8; + if (!((value >> i) & 1)) + mask_hi |= 0xff; + } + + for (i = 3; i >= 0; --i) + { + mask_lo <<= 8; + if (!((value >> i) & 1)) + mask_lo |= 0xff; + } + + result = immed_double_const (mask_lo, mask_hi, DImode); + } + + return result; +} + +void +alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx), + enum machine_mode mode, + rtx op0, rtx op1, rtx op2) +{ + op0 = gen_lowpart (mode, op0); + + if (op1 == const0_rtx) + op1 = CONST0_RTX (mode); + else + op1 = gen_lowpart (mode, op1); + + if (op2 == const0_rtx) + op2 = CONST0_RTX (mode); + else + op2 = gen_lowpart (mode, op2); + + emit_insn ((*gen) (op0, op1, op2)); +} + +/* A subroutine of the atomic operation splitters. Jump to LABEL if + COND is true. Mark the jump as unlikely to be taken. */ + +static void +emit_unlikely_jump (rtx cond, rtx label) +{ + int very_unlikely = REG_BR_PROB_BASE / 100 - 1; + rtx x; + + x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx); + x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x)); + add_int_reg_note (x, REG_BR_PROB, very_unlikely); +} + +/* A subroutine of the atomic operation splitters. Emit a load-locked + instruction in MODE. */ + +static void +emit_load_locked (enum machine_mode mode, rtx reg, rtx mem) +{ + rtx (*fn) (rtx, rtx) = NULL; + if (mode == SImode) + fn = gen_load_locked_si; + else if (mode == DImode) + fn = gen_load_locked_di; + emit_insn (fn (reg, mem)); +} + +/* A subroutine of the atomic operation splitters. Emit a store-conditional + instruction in MODE. */ + +static void +emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val) +{ + rtx (*fn) (rtx, rtx, rtx) = NULL; + if (mode == SImode) + fn = gen_store_conditional_si; + else if (mode == DImode) + fn = gen_store_conditional_di; + emit_insn (fn (res, mem, val)); +} + +/* Subroutines of the atomic operation splitters. Emit barriers + as needed for the memory MODEL. */ + +static void +alpha_pre_atomic_barrier (enum memmodel model) +{ + if (need_atomic_barrier_p (model, true)) + emit_insn (gen_memory_barrier ()); +} + +static void +alpha_post_atomic_barrier (enum memmodel model) +{ + if (need_atomic_barrier_p (model, false)) + emit_insn (gen_memory_barrier ()); +} + +/* A subroutine of the atomic operation splitters. Emit an insxl + instruction in MODE. */ + +static rtx +emit_insxl (enum machine_mode mode, rtx op1, rtx op2) +{ + rtx ret = gen_reg_rtx (DImode); + rtx (*fn) (rtx, rtx, rtx); + + switch (mode) + { + case QImode: + fn = gen_insbl; + break; + case HImode: + fn = gen_inswl; + break; + case SImode: + fn = gen_insll; + break; + case DImode: + fn = gen_insql; + break; + default: + gcc_unreachable (); + } + + op1 = force_reg (mode, op1); + emit_insn (fn (ret, op1, op2)); + + return ret; +} + +/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation + to perform. MEM is the memory on which to operate. VAL is the second + operand of the binary operator. BEFORE and AFTER are optional locations to + return the value of MEM either before of after the operation. SCRATCH is + a scratch register. */ + +void +alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before, + rtx after, rtx scratch, enum memmodel model) +{ + enum machine_mode mode = GET_MODE (mem); + rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch)); + + alpha_pre_atomic_barrier (model); + + label = gen_label_rtx (); + emit_label (label); + label = gen_rtx_LABEL_REF (DImode, label); + + if (before == NULL) + before = scratch; + emit_load_locked (mode, before, mem); + + if (code == NOT) + { + x = gen_rtx_AND (mode, before, val); + emit_insn (gen_rtx_SET (VOIDmode, val, x)); + + x = gen_rtx_NOT (mode, val); + } + else + x = gen_rtx_fmt_ee (code, mode, before, val); + if (after) + emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x))); + emit_insn (gen_rtx_SET (VOIDmode, scratch, x)); + + emit_store_conditional (mode, cond, mem, scratch); + + x = gen_rtx_EQ (DImode, cond, const0_rtx); + emit_unlikely_jump (x, label); + + alpha_post_atomic_barrier (model); +} + +/* Expand a compare and swap operation. */ + +void +alpha_split_compare_and_swap (rtx operands[]) +{ + rtx cond, retval, mem, oldval, newval; + bool is_weak; + enum memmodel mod_s, mod_f; + enum machine_mode mode; + rtx label1, label2, x; + + cond = operands[0]; + retval = operands[1]; + mem = operands[2]; + oldval = operands[3]; + newval = operands[4]; + is_weak = (operands[5] != const0_rtx); + mod_s = (enum memmodel) INTVAL (operands[6]); + mod_f = (enum memmodel) INTVAL (operands[7]); + mode = GET_MODE (mem); + + alpha_pre_atomic_barrier (mod_s); + + label1 = NULL_RTX; + if (!is_weak) + { + label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); + emit_label (XEXP (label1, 0)); + } + label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); + + emit_load_locked (mode, retval, mem); + + x = gen_lowpart (DImode, retval); + if (oldval == const0_rtx) + { + emit_move_insn (cond, const0_rtx); + x = gen_rtx_NE (DImode, x, const0_rtx); + } + else + { + x = gen_rtx_EQ (DImode, x, oldval); + emit_insn (gen_rtx_SET (VOIDmode, cond, x)); + x = gen_rtx_EQ (DImode, cond, const0_rtx); + } + emit_unlikely_jump (x, label2); + + emit_move_insn (cond, newval); + emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond)); + + if (!is_weak) + { + x = gen_rtx_EQ (DImode, cond, const0_rtx); + emit_unlikely_jump (x, label1); + } + + if (mod_f != MEMMODEL_RELAXED) + emit_label (XEXP (label2, 0)); + + alpha_post_atomic_barrier (mod_s); + + if (mod_f == MEMMODEL_RELAXED) + emit_label (XEXP (label2, 0)); +} + +void +alpha_expand_compare_and_swap_12 (rtx operands[]) +{ + rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f; + enum machine_mode mode; + rtx addr, align, wdst; + rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx); + + cond = operands[0]; + dst = operands[1]; + mem = operands[2]; + oldval = operands[3]; + newval = operands[4]; + is_weak = operands[5]; + mod_s = operands[6]; + mod_f = operands[7]; + mode = GET_MODE (mem); + + /* We forced the address into a register via mem_noofs_operand. */ + addr = XEXP (mem, 0); + gcc_assert (register_operand (addr, DImode)); + + align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8), + NULL_RTX, 1, OPTAB_DIRECT); + + oldval = convert_modes (DImode, mode, oldval, 1); + + if (newval != const0_rtx) + newval = emit_insxl (mode, newval, addr); + + wdst = gen_reg_rtx (DImode); + if (mode == QImode) + gen = gen_atomic_compare_and_swapqi_1; + else + gen = gen_atomic_compare_and_swaphi_1; + emit_insn (gen (cond, wdst, mem, oldval, newval, align, + is_weak, mod_s, mod_f)); + + emit_move_insn (dst, gen_lowpart (mode, wdst)); +} + +void +alpha_split_compare_and_swap_12 (rtx operands[]) +{ + rtx cond, dest, orig_mem, oldval, newval, align, scratch; + enum machine_mode mode; + bool is_weak; + enum memmodel mod_s, mod_f; + rtx label1, label2, mem, addr, width, mask, x; + + cond = operands[0]; + dest = operands[1]; + orig_mem = operands[2]; + oldval = operands[3]; + newval = operands[4]; + align = operands[5]; + is_weak = (operands[6] != const0_rtx); + mod_s = (enum memmodel) INTVAL (operands[7]); + mod_f = (enum memmodel) INTVAL (operands[8]); + scratch = operands[9]; + mode = GET_MODE (orig_mem); + addr = XEXP (orig_mem, 0); + + mem = gen_rtx_MEM (DImode, align); + MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem); + if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER) + set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER); + + alpha_pre_atomic_barrier (mod_s); + + label1 = NULL_RTX; + if (!is_weak) + { + label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); + emit_label (XEXP (label1, 0)); + } + label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); + + emit_load_locked (DImode, scratch, mem); + + width = GEN_INT (GET_MODE_BITSIZE (mode)); + mask = GEN_INT (mode == QImode ? 0xff : 0xffff); + emit_insn (gen_extxl (dest, scratch, width, addr)); + + if (oldval == const0_rtx) + { + emit_move_insn (cond, const0_rtx); + x = gen_rtx_NE (DImode, dest, const0_rtx); + } + else + { + x = gen_rtx_EQ (DImode, dest, oldval); + emit_insn (gen_rtx_SET (VOIDmode, cond, x)); + x = gen_rtx_EQ (DImode, cond, const0_rtx); + } + emit_unlikely_jump (x, label2); + + emit_insn (gen_mskxl (cond, scratch, mask, addr)); + + if (newval != const0_rtx) + emit_insn (gen_iordi3 (cond, cond, newval)); + + emit_store_conditional (DImode, cond, mem, cond); + + if (!is_weak) + { + x = gen_rtx_EQ (DImode, cond, const0_rtx); + emit_unlikely_jump (x, label1); + } + + if (mod_f != MEMMODEL_RELAXED) + emit_label (XEXP (label2, 0)); + + alpha_post_atomic_barrier (mod_s); + + if (mod_f == MEMMODEL_RELAXED) + emit_label (XEXP (label2, 0)); +} + +/* Expand an atomic exchange operation. */ + +void +alpha_split_atomic_exchange (rtx operands[]) +{ + rtx retval, mem, val, scratch; + enum memmodel model; + enum machine_mode mode; + rtx label, x, cond; + + retval = operands[0]; + mem = operands[1]; + val = operands[2]; + model = (enum memmodel) INTVAL (operands[3]); + scratch = operands[4]; + mode = GET_MODE (mem); + cond = gen_lowpart (DImode, scratch); + + alpha_pre_atomic_barrier (model); + + label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); + emit_label (XEXP (label, 0)); + + emit_load_locked (mode, retval, mem); + emit_move_insn (scratch, val); + emit_store_conditional (mode, cond, mem, scratch); + + x = gen_rtx_EQ (DImode, cond, const0_rtx); + emit_unlikely_jump (x, label); + + alpha_post_atomic_barrier (model); +} + +void +alpha_expand_atomic_exchange_12 (rtx operands[]) +{ + rtx dst, mem, val, model; + enum machine_mode mode; + rtx addr, align, wdst; + rtx (*gen) (rtx, rtx, rtx, rtx, rtx); + + dst = operands[0]; + mem = operands[1]; + val = operands[2]; + model = operands[3]; + mode = GET_MODE (mem); + + /* We forced the address into a register via mem_noofs_operand. */ + addr = XEXP (mem, 0); + gcc_assert (register_operand (addr, DImode)); + + align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8), + NULL_RTX, 1, OPTAB_DIRECT); + + /* Insert val into the correct byte location within the word. */ + if (val != const0_rtx) + val = emit_insxl (mode, val, addr); + + wdst = gen_reg_rtx (DImode); + if (mode == QImode) + gen = gen_atomic_exchangeqi_1; + else + gen = gen_atomic_exchangehi_1; + emit_insn (gen (wdst, mem, val, align, model)); + + emit_move_insn (dst, gen_lowpart (mode, wdst)); +} + +void +alpha_split_atomic_exchange_12 (rtx operands[]) +{ + rtx dest, orig_mem, addr, val, align, scratch; + rtx label, mem, width, mask, x; + enum machine_mode mode; + enum memmodel model; + + dest = operands[0]; + orig_mem = operands[1]; + val = operands[2]; + align = operands[3]; + model = (enum memmodel) INTVAL (operands[4]); + scratch = operands[5]; + mode = GET_MODE (orig_mem); + addr = XEXP (orig_mem, 0); + + mem = gen_rtx_MEM (DImode, align); + MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem); + if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER) + set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER); + + alpha_pre_atomic_barrier (model); + + label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); + emit_label (XEXP (label, 0)); + + emit_load_locked (DImode, scratch, mem); + + width = GEN_INT (GET_MODE_BITSIZE (mode)); + mask = GEN_INT (mode == QImode ? 0xff : 0xffff); + emit_insn (gen_extxl (dest, scratch, width, addr)); + emit_insn (gen_mskxl (scratch, scratch, mask, addr)); + if (val != const0_rtx) + emit_insn (gen_iordi3 (scratch, scratch, val)); + + emit_store_conditional (DImode, scratch, mem, scratch); + + x = gen_rtx_EQ (DImode, scratch, const0_rtx); + emit_unlikely_jump (x, label); + + alpha_post_atomic_barrier (model); +} + +/* Adjust the cost of a scheduling dependency. Return the new cost of + a dependency LINK or INSN on DEP_INSN. COST is the current cost. */ + +static int +alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost) +{ + enum attr_type dep_insn_type; + + /* If the dependence is an anti-dependence, there is no cost. For an + output dependence, there is sometimes a cost, but it doesn't seem + worth handling those few cases. */ + if (REG_NOTE_KIND (link) != 0) + return cost; + + /* If we can't recognize the insns, we can't really do anything. */ + if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0) + return cost; + + dep_insn_type = get_attr_type (dep_insn); + + /* Bring in the user-defined memory latency. */ + if (dep_insn_type == TYPE_ILD + || dep_insn_type == TYPE_FLD + || dep_insn_type == TYPE_LDSYM) + cost += alpha_memory_latency-1; + + /* Everything else handled in DFA bypasses now. */ + + return cost; +} + +/* The number of instructions that can be issued per cycle. */ + +static int +alpha_issue_rate (void) +{ + return (alpha_tune == PROCESSOR_EV4 ? 2 : 4); +} + +/* How many alternative schedules to try. This should be as wide as the + scheduling freedom in the DFA, but no wider. Making this value too + large results extra work for the scheduler. + + For EV4, loads can be issued to either IB0 or IB1, thus we have 2 + alternative schedules. For EV5, we can choose between E0/E1 and + FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */ + +static int +alpha_multipass_dfa_lookahead (void) +{ + return (alpha_tune == PROCESSOR_EV6 ? 4 : 2); +} + +/* Machine-specific function data. */ + +struct GTY(()) alpha_links; + +struct GTY(()) machine_function +{ + /* For OSF. */ + const char *some_ld_name; + + /* For flag_reorder_blocks_and_partition. */ + rtx gp_save_rtx; + + /* For VMS condition handlers. */ + bool uses_condition_handler; + + /* Linkage entries. */ + splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *))) + links; +}; + +/* How to allocate a 'struct machine_function'. */ + +static struct machine_function * +alpha_init_machine_status (void) +{ + return ggc_alloc_cleared_machine_function (); +} + +/* Support for frame based VMS condition handlers. */ + +/* A VMS condition handler may be established for a function with a call to + __builtin_establish_vms_condition_handler, and cancelled with a call to + __builtin_revert_vms_condition_handler. + + The VMS Condition Handling Facility knows about the existence of a handler + from the procedure descriptor .handler field. As the VMS native compilers, + we store the user specified handler's address at a fixed location in the + stack frame and point the procedure descriptor at a common wrapper which + fetches the real handler's address and issues an indirect call. + + The indirection wrapper is "__gcc_shell_handler", provided by libgcc. + + We force the procedure kind to PT_STACK, and the fixed frame location is + fp+8, just before the register save area. We use the handler_data field in + the procedure descriptor to state the fp offset at which the installed + handler address can be found. */ + +#define VMS_COND_HANDLER_FP_OFFSET 8 + +/* Expand code to store the currently installed user VMS condition handler + into TARGET and install HANDLER as the new condition handler. */ + +void +alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler) +{ + rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx, + VMS_COND_HANDLER_FP_OFFSET); + + rtx handler_slot + = gen_rtx_MEM (DImode, handler_slot_address); + + emit_move_insn (target, handler_slot); + emit_move_insn (handler_slot, handler); + + /* Notify the start/prologue/epilogue emitters that the condition handler + slot is needed. In addition to reserving the slot space, this will force + the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx + use above is correct. */ + cfun->machine->uses_condition_handler = true; +} + +/* Expand code to store the current VMS condition handler into TARGET and + nullify it. */ + +void +alpha_expand_builtin_revert_vms_condition_handler (rtx target) +{ + /* We implement this by establishing a null condition handler, with the tiny + side effect of setting uses_condition_handler. This is a little bit + pessimistic if no actual builtin_establish call is ever issued, which is + not a real problem and expected never to happen anyway. */ + + alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx); +} + +/* Functions to save and restore alpha_return_addr_rtx. */ + +/* Start the ball rolling with RETURN_ADDR_RTX. */ + +rtx +alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) +{ + if (count != 0) + return const0_rtx; + + return get_hard_reg_initial_val (Pmode, REG_RA); +} + +/* Return or create a memory slot containing the gp value for the current + function. Needed only if TARGET_LD_BUGGY_LDGP. */ + +rtx +alpha_gp_save_rtx (void) +{ + rtx seq, m = cfun->machine->gp_save_rtx; + + if (m == NULL) + { + start_sequence (); + + m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD); + m = validize_mem (m); + emit_move_insn (m, pic_offset_table_rtx); + + seq = get_insns (); + end_sequence (); + + /* We used to simply emit the sequence after entry_of_function. + However this breaks the CFG if the first instruction in the + first block is not the NOTE_INSN_BASIC_BLOCK, for example a + label. Emit the sequence properly on the edge. We are only + invoked from dw2_build_landing_pads and finish_eh_generation + will call commit_edge_insertions thanks to a kludge. */ + insert_insn_on_edge (seq, + single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))); + + cfun->machine->gp_save_rtx = m; + } + + return m; +} + +static void +alpha_instantiate_decls (void) +{ + if (cfun->machine->gp_save_rtx != NULL_RTX) + instantiate_decl_rtl (cfun->machine->gp_save_rtx); +} + +static int +alpha_ra_ever_killed (void) +{ + rtx top; + + if (!has_hard_reg_initial_val (Pmode, REG_RA)) + return (int)df_regs_ever_live_p (REG_RA); + + push_topmost_sequence (); + top = get_insns (); + pop_topmost_sequence (); + + return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX); +} + + +/* Return the trap mode suffix applicable to the current + instruction, or NULL. */ + +static const char * +get_trap_mode_suffix (void) +{ + enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn); + + switch (s) + { + case TRAP_SUFFIX_NONE: + return NULL; + + case TRAP_SUFFIX_SU: + if (alpha_fptm >= ALPHA_FPTM_SU) + return "su"; + return NULL; + + case TRAP_SUFFIX_SUI: + if (alpha_fptm >= ALPHA_FPTM_SUI) + return "sui"; + return NULL; + + case TRAP_SUFFIX_V_SV: + switch (alpha_fptm) + { + case ALPHA_FPTM_N: + return NULL; + case ALPHA_FPTM_U: + return "v"; + case ALPHA_FPTM_SU: + case ALPHA_FPTM_SUI: + return "sv"; + default: + gcc_unreachable (); + } + + case TRAP_SUFFIX_V_SV_SVI: + switch (alpha_fptm) + { + case ALPHA_FPTM_N: + return NULL; + case ALPHA_FPTM_U: + return "v"; + case ALPHA_FPTM_SU: + return "sv"; + case ALPHA_FPTM_SUI: + return "svi"; + default: + gcc_unreachable (); + } + break; + + case TRAP_SUFFIX_U_SU_SUI: + switch (alpha_fptm) + { + case ALPHA_FPTM_N: + return NULL; + case ALPHA_FPTM_U: + return "u"; + case ALPHA_FPTM_SU: + return "su"; + case ALPHA_FPTM_SUI: + return "sui"; + default: + gcc_unreachable (); + } + break; + + default: + gcc_unreachable (); + } + gcc_unreachable (); +} + +/* Return the rounding mode suffix applicable to the current + instruction, or NULL. */ + +static const char * +get_round_mode_suffix (void) +{ + enum attr_round_suffix s = get_attr_round_suffix (current_output_insn); + + switch (s) + { + case ROUND_SUFFIX_NONE: + return NULL; + case ROUND_SUFFIX_NORMAL: + switch (alpha_fprm) + { + case ALPHA_FPRM_NORM: + return NULL; + case ALPHA_FPRM_MINF: + return "m"; + case ALPHA_FPRM_CHOP: + return "c"; + case ALPHA_FPRM_DYN: + return "d"; + default: + gcc_unreachable (); + } + break; + + case ROUND_SUFFIX_C: + return "c"; + + default: + gcc_unreachable (); + } + gcc_unreachable (); +} + +/* Locate some local-dynamic symbol still in use by this function + so that we can print its name in some movdi_er_tlsldm pattern. */ + +static int +get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED) +{ + rtx x = *px; + + if (GET_CODE (x) == SYMBOL_REF + && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC) + { + cfun->machine->some_ld_name = XSTR (x, 0); + return 1; + } + + return 0; +} + +static const char * +get_some_local_dynamic_name (void) +{ + rtx insn; + + if (cfun->machine->some_ld_name) + return cfun->machine->some_ld_name; + + for (insn = get_insns (); insn ; insn = NEXT_INSN (insn)) + if (INSN_P (insn) + && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0)) + return cfun->machine->some_ld_name; + + gcc_unreachable (); +} + +/* Print an operand. Recognize special options, documented below. */ + +void +print_operand (FILE *file, rtx x, int code) +{ + int i; + + switch (code) + { + case '~': + /* Print the assembler name of the current function. */ + assemble_name (file, alpha_fnname); + break; + + case '&': + assemble_name (file, get_some_local_dynamic_name ()); + break; + + case '/': + { + const char *trap = get_trap_mode_suffix (); + const char *round = get_round_mode_suffix (); + + if (trap || round) + fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : "")); + break; + } + + case ',': + /* Generates single precision instruction suffix. */ + fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file); + break; + + case '-': + /* Generates double precision instruction suffix. */ + fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file); + break; + + case '#': + if (alpha_this_literal_sequence_number == 0) + alpha_this_literal_sequence_number = alpha_next_sequence_number++; + fprintf (file, "%d", alpha_this_literal_sequence_number); + break; + + case '*': + if (alpha_this_gpdisp_sequence_number == 0) + alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++; + fprintf (file, "%d", alpha_this_gpdisp_sequence_number); + break; + + case 'H': + if (GET_CODE (x) == HIGH) + output_addr_const (file, XEXP (x, 0)); + else + output_operand_lossage ("invalid %%H value"); + break; + + case 'J': + { + const char *lituse; + + if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL) + { + x = XVECEXP (x, 0, 0); + lituse = "lituse_tlsgd"; + } + else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL) + { + x = XVECEXP (x, 0, 0); + lituse = "lituse_tlsldm"; + } + else if (CONST_INT_P (x)) + lituse = "lituse_jsr"; + else + { + output_operand_lossage ("invalid %%J value"); + break; + } + + if (x != const0_rtx) + fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x)); + } + break; + + case 'j': + { + const char *lituse; + +#ifdef HAVE_AS_JSRDIRECT_RELOCS + lituse = "lituse_jsrdirect"; +#else + lituse = "lituse_jsr"; +#endif + + gcc_assert (INTVAL (x) != 0); + fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x)); + } + break; + case 'r': + /* If this operand is the constant zero, write it as "$31". */ + if (REG_P (x)) + fprintf (file, "%s", reg_names[REGNO (x)]); + else if (x == CONST0_RTX (GET_MODE (x))) + fprintf (file, "$31"); + else + output_operand_lossage ("invalid %%r value"); + break; + + case 'R': + /* Similar, but for floating-point. */ + if (REG_P (x)) + fprintf (file, "%s", reg_names[REGNO (x)]); + else if (x == CONST0_RTX (GET_MODE (x))) + fprintf (file, "$f31"); + else + output_operand_lossage ("invalid %%R value"); + break; + + case 'N': + /* Write the 1's complement of a constant. */ + if (!CONST_INT_P (x)) + output_operand_lossage ("invalid %%N value"); + + fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x)); + break; + + case 'P': + /* Write 1 << C, for a constant C. */ + if (!CONST_INT_P (x)) + output_operand_lossage ("invalid %%P value"); + + fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x)); + break; + + case 'h': + /* Write the high-order 16 bits of a constant, sign-extended. */ + if (!CONST_INT_P (x)) + output_operand_lossage ("invalid %%h value"); + + fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16); + break; + + case 'L': + /* Write the low-order 16 bits of a constant, sign-extended. */ + if (!CONST_INT_P (x)) + output_operand_lossage ("invalid %%L value"); + + fprintf (file, HOST_WIDE_INT_PRINT_DEC, + (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000)); + break; + + case 'm': + /* Write mask for ZAP insn. */ + if (GET_CODE (x) == CONST_DOUBLE) + { + HOST_WIDE_INT mask = 0; + HOST_WIDE_INT value; + + value = CONST_DOUBLE_LOW (x); + for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; + i++, value >>= 8) + if (value & 0xff) + mask |= (1 << i); + + value = CONST_DOUBLE_HIGH (x); + for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; + i++, value >>= 8) + if (value & 0xff) + mask |= (1 << (i + sizeof (int))); + + fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff); + } + + else if (CONST_INT_P (x)) + { + HOST_WIDE_INT mask = 0, value = INTVAL (x); + + for (i = 0; i < 8; i++, value >>= 8) + if (value & 0xff) + mask |= (1 << i); + + fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask); + } + else + output_operand_lossage ("invalid %%m value"); + break; + + case 'M': + /* 'b', 'w', 'l', or 'q' as the value of the constant. */ + if (!CONST_INT_P (x) + || (INTVAL (x) != 8 && INTVAL (x) != 16 + && INTVAL (x) != 32 && INTVAL (x) != 64)) + output_operand_lossage ("invalid %%M value"); + + fprintf (file, "%s", + (INTVAL (x) == 8 ? "b" + : INTVAL (x) == 16 ? "w" + : INTVAL (x) == 32 ? "l" + : "q")); + break; + + case 'U': + /* Similar, except do it from the mask. */ + if (CONST_INT_P (x)) + { + HOST_WIDE_INT value = INTVAL (x); + + if (value == 0xff) + { + fputc ('b', file); + break; + } + if (value == 0xffff) + { + fputc ('w', file); + break; + } + if (value == 0xffffffff) + { + fputc ('l', file); + break; + } + if (value == -1) + { + fputc ('q', file); + break; + } + } + else if (HOST_BITS_PER_WIDE_INT == 32 + && GET_CODE (x) == CONST_DOUBLE + && CONST_DOUBLE_LOW (x) == 0xffffffff + && CONST_DOUBLE_HIGH (x) == 0) + { + fputc ('l', file); + break; + } + output_operand_lossage ("invalid %%U value"); + break; + + case 's': + /* Write the constant value divided by 8. */ + if (!CONST_INT_P (x) + || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64 + || (INTVAL (x) & 7) != 0) + output_operand_lossage ("invalid %%s value"); + + fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8); + break; + + case 'S': + /* Same, except compute (64 - c) / 8 */ + + if (!CONST_INT_P (x) + && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64 + && (INTVAL (x) & 7) != 8) + output_operand_lossage ("invalid %%s value"); + + fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8); + break; + + case 'C': case 'D': case 'c': case 'd': + /* Write out comparison name. */ + { + enum rtx_code c = GET_CODE (x); + + if (!COMPARISON_P (x)) + output_operand_lossage ("invalid %%C value"); + + else if (code == 'D') + c = reverse_condition (c); + else if (code == 'c') + c = swap_condition (c); + else if (code == 'd') + c = swap_condition (reverse_condition (c)); + + if (c == LEU) + fprintf (file, "ule"); + else if (c == LTU) + fprintf (file, "ult"); + else if (c == UNORDERED) + fprintf (file, "un"); + else + fprintf (file, "%s", GET_RTX_NAME (c)); + } + break; + + case 'E': + /* Write the divide or modulus operator. */ + switch (GET_CODE (x)) + { + case DIV: + fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q"); + break; + case UDIV: + fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q"); + break; + case MOD: + fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q"); + break; + case UMOD: + fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q"); + break; + default: + output_operand_lossage ("invalid %%E value"); + break; + } + break; + + case 'A': + /* Write "_u" for unaligned access. */ + if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND) + fprintf (file, "_u"); + break; + + case 0: + if (REG_P (x)) + fprintf (file, "%s", reg_names[REGNO (x)]); + else if (MEM_P (x)) + output_address (XEXP (x, 0)); + else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC) + { + switch (XINT (XEXP (x, 0), 1)) + { + case UNSPEC_DTPREL: + case UNSPEC_TPREL: + output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0)); + break; + default: + output_operand_lossage ("unknown relocation unspec"); + break; + } + } + else + output_addr_const (file, x); + break; + + default: + output_operand_lossage ("invalid %%xn code"); + } +} + +void +print_operand_address (FILE *file, rtx addr) +{ + int basereg = 31; + HOST_WIDE_INT offset = 0; + + if (GET_CODE (addr) == AND) + addr = XEXP (addr, 0); + + if (GET_CODE (addr) == PLUS + && CONST_INT_P (XEXP (addr, 1))) + { + offset = INTVAL (XEXP (addr, 1)); + addr = XEXP (addr, 0); + } + + if (GET_CODE (addr) == LO_SUM) + { + const char *reloc16, *reloclo; + rtx op1 = XEXP (addr, 1); + + if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC) + { + op1 = XEXP (op1, 0); + switch (XINT (op1, 1)) + { + case UNSPEC_DTPREL: + reloc16 = NULL; + reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello"); + break; + case UNSPEC_TPREL: + reloc16 = NULL; + reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello"); + break; + default: + output_operand_lossage ("unknown relocation unspec"); + return; + } + + output_addr_const (file, XVECEXP (op1, 0, 0)); + } + else + { + reloc16 = "gprel"; + reloclo = "gprellow"; + output_addr_const (file, op1); + } + + if (offset) + fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset); + + addr = XEXP (addr, 0); + switch (GET_CODE (addr)) + { + case REG: + basereg = REGNO (addr); + break; + + case SUBREG: + basereg = subreg_regno (addr); + break; + + default: + gcc_unreachable (); + } + + fprintf (file, "($%d)\t\t!%s", basereg, + (basereg == 29 ? reloc16 : reloclo)); + return; + } + + switch (GET_CODE (addr)) + { + case REG: + basereg = REGNO (addr); + break; + + case SUBREG: + basereg = subreg_regno (addr); + break; + + case CONST_INT: + offset = INTVAL (addr); + break; + +#if TARGET_ABI_OPEN_VMS + case SYMBOL_REF: + fprintf (file, "%s", XSTR (addr, 0)); + return; + + case CONST: + gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS + && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF); + fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC, + XSTR (XEXP (XEXP (addr, 0), 0), 0), + INTVAL (XEXP (XEXP (addr, 0), 1))); + return; + +#endif + default: + gcc_unreachable (); + } + + fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg); +} + +/* Emit RTL insns to initialize the variable parts of a trampoline at + M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx + for the static chain value for the function. */ + +static void +alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) +{ + rtx fnaddr, mem, word1, word2; + + fnaddr = XEXP (DECL_RTL (fndecl), 0); + +#ifdef POINTERS_EXTEND_UNSIGNED + fnaddr = convert_memory_address (Pmode, fnaddr); + chain_value = convert_memory_address (Pmode, chain_value); +#endif + + if (TARGET_ABI_OPEN_VMS) + { + const char *fnname; + char *trname; + + /* Construct the name of the trampoline entry point. */ + fnname = XSTR (fnaddr, 0); + trname = (char *) alloca (strlen (fnname) + 5); + strcpy (trname, fnname); + strcat (trname, "..tr"); + fnname = ggc_alloc_string (trname, strlen (trname) + 1); + word2 = gen_rtx_SYMBOL_REF (Pmode, fnname); + + /* Trampoline (or "bounded") procedure descriptor is constructed from + the function's procedure descriptor with certain fields zeroed IAW + the VMS calling standard. This is stored in the first quadword. */ + word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr)); + word1 = expand_and (DImode, word1, + GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)), + NULL); + } + else + { + /* These 4 instructions are: + ldq $1,24($27) + ldq $27,16($27) + jmp $31,($27),0 + nop + We don't bother setting the HINT field of the jump; the nop + is merely there for padding. */ + word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018)); + word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000)); + } + + /* Store the first two words, as computed above. */ + mem = adjust_address (m_tramp, DImode, 0); + emit_move_insn (mem, word1); + mem = adjust_address (m_tramp, DImode, 8); + emit_move_insn (mem, word2); + + /* Store function address and static chain value. */ + mem = adjust_address (m_tramp, Pmode, 16); + emit_move_insn (mem, fnaddr); + mem = adjust_address (m_tramp, Pmode, 24); + emit_move_insn (mem, chain_value); + + if (TARGET_ABI_OSF) + { + emit_insn (gen_imb ()); +#ifdef HAVE_ENABLE_EXECUTE_STACK + emit_library_call (init_one_libfunc ("__enable_execute_stack"), + LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode); +#endif + } +} + +/* Determine where to put an argument to a function. + Value is zero to push the argument on the stack, + or a hard register in which to store the argument. + + MODE is the argument's machine mode. + TYPE is the data type of the argument (as a tree). + This is null for libcalls where that information may + not be available. + CUM is a variable of type CUMULATIVE_ARGS which gives info about + the preceding args and about the function being called. + NAMED is nonzero if this argument is a named parameter + (otherwise it is an extra parameter matching an ellipsis). + + On Alpha the first 6 words of args are normally in registers + and the rest are pushed. */ + +static rtx +alpha_function_arg (cumulative_args_t cum_v, enum machine_mode mode, + const_tree type, bool named ATTRIBUTE_UNUSED) +{ + CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); + int basereg; + int num_args; + + /* Don't get confused and pass small structures in FP registers. */ + if (type && AGGREGATE_TYPE_P (type)) + basereg = 16; + else + { +#ifdef ENABLE_CHECKING + /* With alpha_split_complex_arg, we shouldn't see any raw complex + values here. */ + gcc_assert (!COMPLEX_MODE_P (mode)); +#endif + + /* Set up defaults for FP operands passed in FP registers, and + integral operands passed in integer registers. */ + if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT) + basereg = 32 + 16; + else + basereg = 16; + } + + /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for + the two platforms, so we can't avoid conditional compilation. */ +#if TARGET_ABI_OPEN_VMS + { + if (mode == VOIDmode) + return alpha_arg_info_reg_val (*cum); + + num_args = cum->num_args; + if (num_args >= 6 + || targetm.calls.must_pass_in_stack (mode, type)) + return NULL_RTX; + } +#elif TARGET_ABI_OSF + { + if (*cum >= 6) + return NULL_RTX; + num_args = *cum; + + /* VOID is passed as a special flag for "last argument". */ + if (type == void_type_node) + basereg = 16; + else if (targetm.calls.must_pass_in_stack (mode, type)) + return NULL_RTX; + } +#else +#error Unhandled ABI +#endif + + return gen_rtx_REG (mode, num_args + basereg); +} + +/* Update the data in CUM to advance over an argument + of mode MODE and data type TYPE. + (TYPE is null for libcalls where that information may not be available.) */ + +static void +alpha_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode, + const_tree type, bool named ATTRIBUTE_UNUSED) +{ + CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); + bool onstack = targetm.calls.must_pass_in_stack (mode, type); + int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named); + +#if TARGET_ABI_OSF + *cum += increment; +#else + if (!onstack && cum->num_args < 6) + cum->atypes[cum->num_args] = alpha_arg_type (mode); + cum->num_args += increment; +#endif +} + +static int +alpha_arg_partial_bytes (cumulative_args_t cum_v, + enum machine_mode mode ATTRIBUTE_UNUSED, + tree type ATTRIBUTE_UNUSED, + bool named ATTRIBUTE_UNUSED) +{ + int words = 0; + CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v); + +#if TARGET_ABI_OPEN_VMS + if (cum->num_args < 6 + && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named)) + words = 6 - cum->num_args; +#elif TARGET_ABI_OSF + if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named)) + words = 6 - *cum; +#else +#error Unhandled ABI +#endif + + return words * UNITS_PER_WORD; +} + + +/* Return true if TYPE must be returned in memory, instead of in registers. */ + +static bool +alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED) +{ + enum machine_mode mode = VOIDmode; + int size; + + if (type) + { + mode = TYPE_MODE (type); + + /* All aggregates are returned in memory, except on OpenVMS where + records that fit 64 bits should be returned by immediate value + as required by section 3.8.7.1 of the OpenVMS Calling Standard. */ + if (TARGET_ABI_OPEN_VMS + && TREE_CODE (type) != ARRAY_TYPE + && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8) + return false; + + if (AGGREGATE_TYPE_P (type)) + return true; + } + + size = GET_MODE_SIZE (mode); + switch (GET_MODE_CLASS (mode)) + { + case MODE_VECTOR_FLOAT: + /* Pass all float vectors in memory, like an aggregate. */ + return true; + + case MODE_COMPLEX_FLOAT: + /* We judge complex floats on the size of their element, + not the size of the whole type. */ + size = GET_MODE_UNIT_SIZE (mode); + break; + + case MODE_INT: + case MODE_FLOAT: + case MODE_COMPLEX_INT: + case MODE_VECTOR_INT: + break; + + default: + /* ??? We get called on all sorts of random stuff from + aggregate_value_p. We must return something, but it's not + clear what's safe to return. Pretend it's a struct I + guess. */ + return true; + } + + /* Otherwise types must fit in one register. */ + return size > UNITS_PER_WORD; +} + +/* Return true if TYPE should be passed by invisible reference. */ + +static bool +alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED, + enum machine_mode mode, + const_tree type ATTRIBUTE_UNUSED, + bool named ATTRIBUTE_UNUSED) +{ + return mode == TFmode || mode == TCmode; +} + +/* Define how to find the value returned by a function. VALTYPE is the + data type of the value (as a tree). If the precise function being + called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0. + MODE is set instead of VALTYPE for libcalls. + + On Alpha the value is found in $0 for integer functions and + $f0 for floating-point functions. */ + +rtx +function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED, + enum machine_mode mode) +{ + unsigned int regnum, dummy ATTRIBUTE_UNUSED; + enum mode_class mclass; + + gcc_assert (!valtype || !alpha_return_in_memory (valtype, func)); + + if (valtype) + mode = TYPE_MODE (valtype); + + mclass = GET_MODE_CLASS (mode); + switch (mclass) + { + case MODE_INT: + /* Do the same thing as PROMOTE_MODE except for libcalls on VMS, + where we have them returning both SImode and DImode. */ + if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype))) + PROMOTE_MODE (mode, dummy, valtype); + /* FALLTHRU */ + + case MODE_COMPLEX_INT: + case MODE_VECTOR_INT: + regnum = 0; + break; + + case MODE_FLOAT: + regnum = 32; + break; + + case MODE_COMPLEX_FLOAT: + { + enum machine_mode cmode = GET_MODE_INNER (mode); + + return gen_rtx_PARALLEL + (VOIDmode, + gen_rtvec (2, + gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32), + const0_rtx), + gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33), + GEN_INT (GET_MODE_SIZE (cmode))))); + } + + case MODE_RANDOM: + /* We should only reach here for BLKmode on VMS. */ + gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode); + regnum = 0; + break; + + default: + gcc_unreachable (); + } + + return gen_rtx_REG (mode, regnum); +} + +/* TCmode complex values are passed by invisible reference. We + should not split these values. */ + +static bool +alpha_split_complex_arg (const_tree type) +{ + return TYPE_MODE (type) != TCmode; +} + +static tree +alpha_build_builtin_va_list (void) +{ + tree base, ofs, space, record, type_decl; + + if (TARGET_ABI_OPEN_VMS) + return ptr_type_node; + + record = (*lang_hooks.types.make_type) (RECORD_TYPE); + type_decl = build_decl (BUILTINS_LOCATION, + TYPE_DECL, get_identifier ("__va_list_tag"), record); + TYPE_STUB_DECL (record) = type_decl; + TYPE_NAME (record) = type_decl; + + /* C++? SET_IS_AGGR_TYPE (record, 1); */ + + /* Dummy field to prevent alignment warnings. */ + space = build_decl (BUILTINS_LOCATION, + FIELD_DECL, NULL_TREE, integer_type_node); + DECL_FIELD_CONTEXT (space) = record; + DECL_ARTIFICIAL (space) = 1; + DECL_IGNORED_P (space) = 1; + + ofs = build_decl (BUILTINS_LOCATION, + FIELD_DECL, get_identifier ("__offset"), + integer_type_node); + DECL_FIELD_CONTEXT (ofs) = record; + DECL_CHAIN (ofs) = space; + /* ??? This is a hack, __offset is marked volatile to prevent + DCE that confuses stdarg optimization and results in + gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */ + TREE_THIS_VOLATILE (ofs) = 1; + + base = build_decl (BUILTINS_LOCATION, + FIELD_DECL, get_identifier ("__base"), + ptr_type_node); + DECL_FIELD_CONTEXT (base) = record; + DECL_CHAIN (base) = ofs; + + TYPE_FIELDS (record) = base; + layout_type (record); + + va_list_gpr_counter_field = ofs; + return record; +} + +#if TARGET_ABI_OSF +/* Helper function for alpha_stdarg_optimize_hook. Skip over casts + and constant additions. */ + +static gimple +va_list_skip_additions (tree lhs) +{ + gimple stmt; + + for (;;) + { + enum tree_code code; + + stmt = SSA_NAME_DEF_STMT (lhs); + + if (gimple_code (stmt) == GIMPLE_PHI) + return stmt; + + if (!is_gimple_assign (stmt) + || gimple_assign_lhs (stmt) != lhs) + return NULL; + + if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME) + return stmt; + code = gimple_assign_rhs_code (stmt); + if (!CONVERT_EXPR_CODE_P (code) + && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR) + || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST + || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt)))) + return stmt; + + lhs = gimple_assign_rhs1 (stmt); + } +} + +/* Check if LHS = RHS statement is + LHS = *(ap.__base + ap.__offset + cst) + or + LHS = *(ap.__base + + ((ap.__offset + cst <= 47) + ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2). + If the former, indicate that GPR registers are needed, + if the latter, indicate that FPR registers are needed. + + Also look for LHS = (*ptr).field, where ptr is one of the forms + listed above. + + On alpha, cfun->va_list_gpr_size is used as size of the needed + regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR + registers are needed and bit 1 set if FPR registers are needed. + Return true if va_list references should not be scanned for the + current statement. */ + +static bool +alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt) +{ + tree base, offset, rhs; + int offset_arg = 1; + gimple base_stmt; + + if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) + != GIMPLE_SINGLE_RHS) + return false; + + rhs = gimple_assign_rhs1 (stmt); + while (handled_component_p (rhs)) + rhs = TREE_OPERAND (rhs, 0); + if (TREE_CODE (rhs) != MEM_REF + || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME) + return false; + + stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0)); + if (stmt == NULL + || !is_gimple_assign (stmt) + || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR) + return false; + + base = gimple_assign_rhs1 (stmt); + if (TREE_CODE (base) == SSA_NAME) + { + base_stmt = va_list_skip_additions (base); + if (base_stmt + && is_gimple_assign (base_stmt) + && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF) + base = gimple_assign_rhs1 (base_stmt); + } + + if (TREE_CODE (base) != COMPONENT_REF + || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node)) + { + base = gimple_assign_rhs2 (stmt); + if (TREE_CODE (base) == SSA_NAME) + { + base_stmt = va_list_skip_additions (base); + if (base_stmt + && is_gimple_assign (base_stmt) + && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF) + base = gimple_assign_rhs1 (base_stmt); + } + + if (TREE_CODE (base) != COMPONENT_REF + || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node)) + return false; + + offset_arg = 0; + } + + base = get_base_address (base); + if (TREE_CODE (base) != VAR_DECL + || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names)) + return false; + + offset = gimple_op (stmt, 1 + offset_arg); + if (TREE_CODE (offset) == SSA_NAME) + { + gimple offset_stmt = va_list_skip_additions (offset); + + if (offset_stmt + && gimple_code (offset_stmt) == GIMPLE_PHI) + { + HOST_WIDE_INT sub; + gimple arg1_stmt, arg2_stmt; + tree arg1, arg2; + enum tree_code code1, code2; + + if (gimple_phi_num_args (offset_stmt) != 2) + goto escapes; + + arg1_stmt + = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0)); + arg2_stmt + = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1)); + if (arg1_stmt == NULL + || !is_gimple_assign (arg1_stmt) + || arg2_stmt == NULL + || !is_gimple_assign (arg2_stmt)) + goto escapes; + + code1 = gimple_assign_rhs_code (arg1_stmt); + code2 = gimple_assign_rhs_code (arg2_stmt); + if (code1 == COMPONENT_REF + && (code2 == MINUS_EXPR || code2 == PLUS_EXPR)) + /* Do nothing. */; + else if (code2 == COMPONENT_REF + && (code1 == MINUS_EXPR || code1 == PLUS_EXPR)) + { + gimple tem = arg1_stmt; + code2 = code1; + arg1_stmt = arg2_stmt; + arg2_stmt = tem; + } + else + goto escapes; + + if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt))) + goto escapes; + + sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt)); + if (code2 == MINUS_EXPR) + sub = -sub; + if (sub < -48 || sub > -32) + goto escapes; + + arg1 = gimple_assign_rhs1 (arg1_stmt); + arg2 = gimple_assign_rhs1 (arg2_stmt); + if (TREE_CODE (arg2) == SSA_NAME) + { + arg2_stmt = va_list_skip_additions (arg2); + if (arg2_stmt == NULL + || !is_gimple_assign (arg2_stmt) + || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF) + goto escapes; + arg2 = gimple_assign_rhs1 (arg2_stmt); + } + if (arg1 != arg2) + goto escapes; + + if (TREE_CODE (arg1) != COMPONENT_REF + || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field + || get_base_address (arg1) != base) + goto escapes; + + /* Need floating point regs. */ + cfun->va_list_fpr_size |= 2; + return false; + } + if (offset_stmt + && is_gimple_assign (offset_stmt) + && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF) + offset = gimple_assign_rhs1 (offset_stmt); + } + if (TREE_CODE (offset) != COMPONENT_REF + || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field + || get_base_address (offset) != base) + goto escapes; + else + /* Need general regs. */ + cfun->va_list_fpr_size |= 1; + return false; + +escapes: + si->va_list_escapes = true; + return false; +} +#endif + +/* Perform any needed actions needed for a function that is receiving a + variable number of arguments. */ + +static void +alpha_setup_incoming_varargs (cumulative_args_t pcum, enum machine_mode mode, + tree type, int *pretend_size, int no_rtl) +{ + CUMULATIVE_ARGS cum = *get_cumulative_args (pcum); + + /* Skip the current argument. */ + targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type, + true); + +#if TARGET_ABI_OPEN_VMS + /* For VMS, we allocate space for all 6 arg registers plus a count. + + However, if NO registers need to be saved, don't allocate any space. + This is not only because we won't need the space, but because AP + includes the current_pretend_args_size and we don't want to mess up + any ap-relative addresses already made. */ + if (cum.num_args < 6) + { + if (!no_rtl) + { + emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx); + emit_insn (gen_arg_home ()); + } + *pretend_size = 7 * UNITS_PER_WORD; + } +#else + /* On OSF/1 and friends, we allocate space for all 12 arg registers, but + only push those that are remaining. However, if NO registers need to + be saved, don't allocate any space. This is not only because we won't + need the space, but because AP includes the current_pretend_args_size + and we don't want to mess up any ap-relative addresses already made. + + If we are not to use the floating-point registers, save the integer + registers where we would put the floating-point registers. This is + not the most efficient way to implement varargs with just one register + class, but it isn't worth doing anything more efficient in this rare + case. */ + if (cum >= 6) + return; + + if (!no_rtl) + { + int count; + alias_set_type set = get_varargs_alias_set (); + rtx tmp; + + count = cfun->va_list_gpr_size / UNITS_PER_WORD; + if (count > 6 - cum) + count = 6 - cum; + + /* Detect whether integer registers or floating-point registers + are needed by the detected va_arg statements. See above for + how these values are computed. Note that the "escape" value + is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of + these bits set. */ + gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3); + + if (cfun->va_list_fpr_size & 1) + { + tmp = gen_rtx_MEM (BLKmode, + plus_constant (Pmode, virtual_incoming_args_rtx, + (cum + 6) * UNITS_PER_WORD)); + MEM_NOTRAP_P (tmp) = 1; + set_mem_alias_set (tmp, set); + move_block_from_reg (16 + cum, tmp, count); + } + + if (cfun->va_list_fpr_size & 2) + { + tmp = gen_rtx_MEM (BLKmode, + plus_constant (Pmode, virtual_incoming_args_rtx, + cum * UNITS_PER_WORD)); + MEM_NOTRAP_P (tmp) = 1; + set_mem_alias_set (tmp, set); + move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count); + } + } + *pretend_size = 12 * UNITS_PER_WORD; +#endif +} + +static void +alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED) +{ + HOST_WIDE_INT offset; + tree t, offset_field, base_field; + + if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK) + return; + + /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base + up by 48, storing fp arg registers in the first 48 bytes, and the + integer arg registers in the next 48 bytes. This is only done, + however, if any integer registers need to be stored. + + If no integer registers need be stored, then we must subtract 48 + in order to account for the integer arg registers which are counted + in argsize above, but which are not actually stored on the stack. + Must further be careful here about structures straddling the last + integer argument register; that futzes with pretend_args_size, + which changes the meaning of AP. */ + + if (NUM_ARGS < 6) + offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD; + else + offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size; + + if (TARGET_ABI_OPEN_VMS) + { + t = make_tree (ptr_type_node, virtual_incoming_args_rtx); + t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD); + t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + } + else + { + base_field = TYPE_FIELDS (TREE_TYPE (valist)); + offset_field = DECL_CHAIN (base_field); + + base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field), + valist, base_field, NULL_TREE); + offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field), + valist, offset_field, NULL_TREE); + + t = make_tree (ptr_type_node, virtual_incoming_args_rtx); + t = fold_build_pointer_plus_hwi (t, offset); + t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD); + t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + } +} + +static tree +alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, + gimple_seq *pre_p) +{ + tree type_size, ptr_type, addend, t, addr; + gimple_seq internal_post; + + /* If the type could not be passed in registers, skip the block + reserved for the registers. */ + if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type)) + { + t = build_int_cst (TREE_TYPE (offset), 6*8); + gimplify_assign (offset, + build2 (MAX_EXPR, TREE_TYPE (offset), offset, t), + pre_p); + } + + addend = offset; + ptr_type = build_pointer_type_for_mode (type, ptr_mode, true); + + if (TREE_CODE (type) == COMPLEX_TYPE) + { + tree real_part, imag_part, real_temp; + + real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base, + offset, pre_p); + + /* Copy the value into a new temporary, lest the formal temporary + be reused out from under us. */ + real_temp = get_initialized_tmp_var (real_part, pre_p, NULL); + + imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base, + offset, pre_p); + + return build2 (COMPLEX_EXPR, type, real_temp, imag_part); + } + else if (TREE_CODE (type) == REAL_TYPE) + { + tree fpaddend, cond, fourtyeight; + + fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8); + fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend), + addend, fourtyeight); + cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight); + addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond, + fpaddend, addend); + } + + /* Build the final address and force that value into a temporary. */ + addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend); + internal_post = NULL; + gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue); + gimple_seq_add_seq (pre_p, internal_post); + + /* Update the offset field. */ + type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type)); + if (type_size == NULL || TREE_OVERFLOW (type_size)) + t = size_zero_node; + else + { + t = size_binop (PLUS_EXPR, type_size, size_int (7)); + t = size_binop (TRUNC_DIV_EXPR, t, size_int (8)); + t = size_binop (MULT_EXPR, t, size_int (8)); + } + t = fold_convert (TREE_TYPE (offset), t); + gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t), + pre_p); + + return build_va_arg_indirect_ref (addr); +} + +static tree +alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p, + gimple_seq *post_p) +{ + tree offset_field, base_field, offset, base, t, r; + bool indirect; + + if (TARGET_ABI_OPEN_VMS) + return std_gimplify_va_arg_expr (valist, type, pre_p, post_p); + + base_field = TYPE_FIELDS (va_list_type_node); + offset_field = DECL_CHAIN (base_field); + base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field), + valist, base_field, NULL_TREE); + offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field), + valist, offset_field, NULL_TREE); + + /* Pull the fields of the structure out into temporaries. Since we never + modify the base field, we can use a formal temporary. Sign-extend the + offset field so that it's the proper width for pointer arithmetic. */ + base = get_formal_tmp_var (base_field, pre_p); + + t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field); + offset = get_initialized_tmp_var (t, pre_p, NULL); + + indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false); + if (indirect) + type = build_pointer_type_for_mode (type, ptr_mode, true); + + /* Find the value. Note that this will be a stable indirection, or + a composite of stable indirections in the case of complex. */ + r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p); + + /* Stuff the offset temporary back into its field. */ + gimplify_assign (unshare_expr (offset_field), + fold_convert (TREE_TYPE (offset_field), offset), pre_p); + + if (indirect) + r = build_va_arg_indirect_ref (r); + + return r; +} + +/* Builtins. */ + +enum alpha_builtin +{ + ALPHA_BUILTIN_CMPBGE, + ALPHA_BUILTIN_EXTBL, + ALPHA_BUILTIN_EXTWL, + ALPHA_BUILTIN_EXTLL, + ALPHA_BUILTIN_EXTQL, + ALPHA_BUILTIN_EXTWH, + ALPHA_BUILTIN_EXTLH, + ALPHA_BUILTIN_EXTQH, + ALPHA_BUILTIN_INSBL, + ALPHA_BUILTIN_INSWL, + ALPHA_BUILTIN_INSLL, + ALPHA_BUILTIN_INSQL, + ALPHA_BUILTIN_INSWH, + ALPHA_BUILTIN_INSLH, + ALPHA_BUILTIN_INSQH, + ALPHA_BUILTIN_MSKBL, + ALPHA_BUILTIN_MSKWL, + ALPHA_BUILTIN_MSKLL, + ALPHA_BUILTIN_MSKQL, + ALPHA_BUILTIN_MSKWH, + ALPHA_BUILTIN_MSKLH, + ALPHA_BUILTIN_MSKQH, + ALPHA_BUILTIN_UMULH, + ALPHA_BUILTIN_ZAP, + ALPHA_BUILTIN_ZAPNOT, + ALPHA_BUILTIN_AMASK, + ALPHA_BUILTIN_IMPLVER, + ALPHA_BUILTIN_RPCC, + ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER, + ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, + + /* TARGET_MAX */ + ALPHA_BUILTIN_MINUB8, + ALPHA_BUILTIN_MINSB8, + ALPHA_BUILTIN_MINUW4, + ALPHA_BUILTIN_MINSW4, + ALPHA_BUILTIN_MAXUB8, + ALPHA_BUILTIN_MAXSB8, + ALPHA_BUILTIN_MAXUW4, + ALPHA_BUILTIN_MAXSW4, + ALPHA_BUILTIN_PERR, + ALPHA_BUILTIN_PKLB, + ALPHA_BUILTIN_PKWB, + ALPHA_BUILTIN_UNPKBL, + ALPHA_BUILTIN_UNPKBW, + + /* TARGET_CIX */ + ALPHA_BUILTIN_CTTZ, + ALPHA_BUILTIN_CTLZ, + ALPHA_BUILTIN_CTPOP, + + ALPHA_BUILTIN_max +}; + +static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = { + CODE_FOR_builtin_cmpbge, + CODE_FOR_extbl, + CODE_FOR_extwl, + CODE_FOR_extll, + CODE_FOR_extql, + CODE_FOR_extwh, + CODE_FOR_extlh, + CODE_FOR_extqh, + CODE_FOR_builtin_insbl, + CODE_FOR_builtin_inswl, + CODE_FOR_builtin_insll, + CODE_FOR_insql, + CODE_FOR_inswh, + CODE_FOR_inslh, + CODE_FOR_insqh, + CODE_FOR_mskbl, + CODE_FOR_mskwl, + CODE_FOR_mskll, + CODE_FOR_mskql, + CODE_FOR_mskwh, + CODE_FOR_msklh, + CODE_FOR_mskqh, + CODE_FOR_umuldi3_highpart, + CODE_FOR_builtin_zap, + CODE_FOR_builtin_zapnot, + CODE_FOR_builtin_amask, + CODE_FOR_builtin_implver, + CODE_FOR_builtin_rpcc, + CODE_FOR_builtin_establish_vms_condition_handler, + CODE_FOR_builtin_revert_vms_condition_handler, + + /* TARGET_MAX */ + CODE_FOR_builtin_minub8, + CODE_FOR_builtin_minsb8, + CODE_FOR_builtin_minuw4, + CODE_FOR_builtin_minsw4, + CODE_FOR_builtin_maxub8, + CODE_FOR_builtin_maxsb8, + CODE_FOR_builtin_maxuw4, + CODE_FOR_builtin_maxsw4, + CODE_FOR_builtin_perr, + CODE_FOR_builtin_pklb, + CODE_FOR_builtin_pkwb, + CODE_FOR_builtin_unpkbl, + CODE_FOR_builtin_unpkbw, + + /* TARGET_CIX */ + CODE_FOR_ctzdi2, + CODE_FOR_clzdi2, + CODE_FOR_popcountdi2 +}; + +struct alpha_builtin_def +{ + const char *name; + enum alpha_builtin code; + unsigned int target_mask; + bool is_const; +}; + +static struct alpha_builtin_def const zero_arg_builtins[] = { + { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true }, + { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false } +}; + +static struct alpha_builtin_def const one_arg_builtins[] = { + { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true }, + { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true }, + { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true }, + { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true }, + { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true }, + { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true }, + { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true }, + { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true } +}; + +static struct alpha_builtin_def const two_arg_builtins[] = { + { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true }, + { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true }, + { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true }, + { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true }, + { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true }, + { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true }, + { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true }, + { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true }, + { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true }, + { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true }, + { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true }, + { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true }, + { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true }, + { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true }, + { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true }, + { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true }, + { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true }, + { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true }, + { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true }, + { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true }, + { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true }, + { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true }, + { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true }, + { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true }, + { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true }, + { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true }, + { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true }, + { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true }, + { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true }, + { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true }, + { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true }, + { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true }, + { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true }, + { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true } +}; + +static GTY(()) tree alpha_dimode_u; +static GTY(()) tree alpha_v8qi_u; +static GTY(()) tree alpha_v8qi_s; +static GTY(()) tree alpha_v4hi_u; +static GTY(()) tree alpha_v4hi_s; + +static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max]; + +/* Return the alpha builtin for CODE. */ + +static tree +alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED) +{ + if (code >= ALPHA_BUILTIN_max) + return error_mark_node; + return alpha_builtins[code]; +} + +/* Helper function of alpha_init_builtins. Add the built-in specified + by NAME, TYPE, CODE, and ECF. */ + +static void +alpha_builtin_function (const char *name, tree ftype, + enum alpha_builtin code, unsigned ecf) +{ + tree decl = add_builtin_function (name, ftype, (int) code, + BUILT_IN_MD, NULL, NULL_TREE); + + if (ecf & ECF_CONST) + TREE_READONLY (decl) = 1; + if (ecf & ECF_NOTHROW) + TREE_NOTHROW (decl) = 1; + + alpha_builtins [(int) code] = decl; +} + +/* Helper function of alpha_init_builtins. Add the COUNT built-in + functions pointed to by P, with function type FTYPE. */ + +static void +alpha_add_builtins (const struct alpha_builtin_def *p, size_t count, + tree ftype) +{ + size_t i; + + for (i = 0; i < count; ++i, ++p) + if ((target_flags & p->target_mask) == p->target_mask) + alpha_builtin_function (p->name, ftype, p->code, + (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW); +} + +static void +alpha_init_builtins (void) +{ + tree ftype; + + alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1); + alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8); + alpha_v8qi_s = build_vector_type (intQI_type_node, 8); + alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4); + alpha_v4hi_s = build_vector_type (intHI_type_node, 4); + + ftype = build_function_type_list (alpha_dimode_u, NULL_TREE); + alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype); + + ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE); + alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype); + + ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, + alpha_dimode_u, NULL_TREE); + alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype); + + if (TARGET_ABI_OPEN_VMS) + { + ftype = build_function_type_list (ptr_type_node, ptr_type_node, + NULL_TREE); + alpha_builtin_function ("__builtin_establish_vms_condition_handler", + ftype, + ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER, + 0); + + ftype = build_function_type_list (ptr_type_node, void_type_node, + NULL_TREE); + alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype, + ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0); + + vms_patch_builtins (); + } +} + +/* Expand an expression EXP that calls a built-in function, + with result going to TARGET if that's convenient + (and in mode MODE if that's convenient). + SUBTARGET may be used as the target for computing one of EXP's operands. + IGNORE is nonzero if the value is to be ignored. */ + +static rtx +alpha_expand_builtin (tree exp, rtx target, + rtx subtarget ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + int ignore ATTRIBUTE_UNUSED) +{ +#define MAX_ARGS 2 + + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); + unsigned int fcode = DECL_FUNCTION_CODE (fndecl); + tree arg; + call_expr_arg_iterator iter; + enum insn_code icode; + rtx op[MAX_ARGS], pat; + int arity; + bool nonvoid; + + if (fcode >= ALPHA_BUILTIN_max) + internal_error ("bad builtin fcode"); + icode = code_for_builtin[fcode]; + if (icode == 0) + internal_error ("bad builtin fcode"); + + nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node; + + arity = 0; + FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) + { + const struct insn_operand_data *insn_op; + + if (arg == error_mark_node) + return NULL_RTX; + if (arity > MAX_ARGS) + return NULL_RTX; + + insn_op = &insn_data[icode].operand[arity + nonvoid]; + + op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL); + + if (!(*insn_op->predicate) (op[arity], insn_op->mode)) + op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]); + arity++; + } + + if (nonvoid) + { + enum machine_mode tmode = insn_data[icode].operand[0].mode; + if (!target + || GET_MODE (target) != tmode + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + } + + switch (arity) + { + case 0: + pat = GEN_FCN (icode) (target); + break; + case 1: + if (nonvoid) + pat = GEN_FCN (icode) (target, op[0]); + else + pat = GEN_FCN (icode) (op[0]); + break; + case 2: + pat = GEN_FCN (icode) (target, op[0], op[1]); + break; + default: + gcc_unreachable (); + } + if (!pat) + return NULL_RTX; + emit_insn (pat); + + if (nonvoid) + return target; + else + return const0_rtx; +} + + +/* Several bits below assume HWI >= 64 bits. This should be enforced + by config.gcc. */ +#if HOST_BITS_PER_WIDE_INT < 64 +# error "HOST_WIDE_INT too small" +#endif + +/* Fold the builtin for the CMPBGE instruction. This is a vector comparison + with an 8-bit output vector. OPINT contains the integer operands; bit N + of OP_CONST is set if OPINT[N] is valid. */ + +static tree +alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const) +{ + if (op_const == 3) + { + int i, val; + for (i = 0, val = 0; i < 8; ++i) + { + unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff; + unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff; + if (c0 >= c1) + val |= 1 << i; + } + return build_int_cst (alpha_dimode_u, val); + } + else if (op_const == 2 && opint[1] == 0) + return build_int_cst (alpha_dimode_u, 0xff); + return NULL; +} + +/* Fold the builtin for the ZAPNOT instruction. This is essentially a + specialized form of an AND operation. Other byte manipulation instructions + are defined in terms of this instruction, so this is also used as a + subroutine for other builtins. + + OP contains the tree operands; OPINT contains the extracted integer values. + Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only + OPINT may be considered. */ + +static tree +alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[], + long op_const) +{ + if (op_const & 2) + { + unsigned HOST_WIDE_INT mask = 0; + int i; + + for (i = 0; i < 8; ++i) + if ((opint[1] >> i) & 1) + mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8); + + if (op_const & 1) + return build_int_cst (alpha_dimode_u, opint[0] & mask); + + if (op) + return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0], + build_int_cst (alpha_dimode_u, mask)); + } + else if ((op_const & 1) && opint[0] == 0) + return build_int_cst (alpha_dimode_u, 0); + return NULL; +} + +/* Fold the builtins for the EXT family of instructions. */ + +static tree +alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[], + long op_const, unsigned HOST_WIDE_INT bytemask, + bool is_high) +{ + long zap_const = 2; + tree *zap_op = NULL; + + if (op_const & 2) + { + unsigned HOST_WIDE_INT loc; + + loc = opint[1] & 7; + loc *= BITS_PER_UNIT; + + if (loc != 0) + { + if (op_const & 1) + { + unsigned HOST_WIDE_INT temp = opint[0]; + if (is_high) + temp <<= loc; + else + temp >>= loc; + opint[0] = temp; + zap_const = 3; + } + } + else + zap_op = op; + } + + opint[1] = bytemask; + return alpha_fold_builtin_zapnot (zap_op, opint, zap_const); +} + +/* Fold the builtins for the INS family of instructions. */ + +static tree +alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[], + long op_const, unsigned HOST_WIDE_INT bytemask, + bool is_high) +{ + if ((op_const & 1) && opint[0] == 0) + return build_int_cst (alpha_dimode_u, 0); + + if (op_const & 2) + { + unsigned HOST_WIDE_INT temp, loc, byteloc; + tree *zap_op = NULL; + + loc = opint[1] & 7; + bytemask <<= loc; + + temp = opint[0]; + if (is_high) + { + byteloc = (64 - (loc * 8)) & 0x3f; + if (byteloc == 0) + zap_op = op; + else + temp >>= byteloc; + bytemask >>= 8; + } + else + { + byteloc = loc * 8; + if (byteloc == 0) + zap_op = op; + else + temp <<= byteloc; + } + + opint[0] = temp; + opint[1] = bytemask; + return alpha_fold_builtin_zapnot (zap_op, opint, op_const); + } + + return NULL; +} + +static tree +alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[], + long op_const, unsigned HOST_WIDE_INT bytemask, + bool is_high) +{ + if (op_const & 2) + { + unsigned HOST_WIDE_INT loc; + + loc = opint[1] & 7; + bytemask <<= loc; + + if (is_high) + bytemask >>= 8; + + opint[1] = bytemask ^ 0xff; + } + + return alpha_fold_builtin_zapnot (op, opint, op_const); +} + +static tree +alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype) +{ + tree op0 = fold_convert (vtype, op[0]); + tree op1 = fold_convert (vtype, op[1]); + tree val = fold_build2 (code, vtype, op0, op1); + return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val); +} + +static tree +alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const) +{ + unsigned HOST_WIDE_INT temp = 0; + int i; + + if (op_const != 3) + return NULL; + + for (i = 0; i < 8; ++i) + { + unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff; + unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff; + if (a >= b) + temp += a - b; + else + temp += b - a; + } + + return build_int_cst (alpha_dimode_u, temp); +} + +static tree +alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const) +{ + unsigned HOST_WIDE_INT temp; + + if (op_const == 0) + return NULL; + + temp = opint[0] & 0xff; + temp |= (opint[0] >> 24) & 0xff00; + + return build_int_cst (alpha_dimode_u, temp); +} + +static tree +alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const) +{ + unsigned HOST_WIDE_INT temp; + + if (op_const == 0) + return NULL; + + temp = opint[0] & 0xff; + temp |= (opint[0] >> 8) & 0xff00; + temp |= (opint[0] >> 16) & 0xff0000; + temp |= (opint[0] >> 24) & 0xff000000; + + return build_int_cst (alpha_dimode_u, temp); +} + +static tree +alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const) +{ + unsigned HOST_WIDE_INT temp; + + if (op_const == 0) + return NULL; + + temp = opint[0] & 0xff; + temp |= (opint[0] & 0xff00) << 24; + + return build_int_cst (alpha_dimode_u, temp); +} + +static tree +alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const) +{ + unsigned HOST_WIDE_INT temp; + + if (op_const == 0) + return NULL; + + temp = opint[0] & 0xff; + temp |= (opint[0] & 0x0000ff00) << 8; + temp |= (opint[0] & 0x00ff0000) << 16; + temp |= (opint[0] & 0xff000000) << 24; + + return build_int_cst (alpha_dimode_u, temp); +} + +static tree +alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const) +{ + unsigned HOST_WIDE_INT temp; + + if (op_const == 0) + return NULL; + + if (opint[0] == 0) + temp = 64; + else + temp = exact_log2 (opint[0] & -opint[0]); + + return build_int_cst (alpha_dimode_u, temp); +} + +static tree +alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const) +{ + unsigned HOST_WIDE_INT temp; + + if (op_const == 0) + return NULL; + + if (opint[0] == 0) + temp = 64; + else + temp = 64 - floor_log2 (opint[0]) - 1; + + return build_int_cst (alpha_dimode_u, temp); +} + +static tree +alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const) +{ + unsigned HOST_WIDE_INT temp, op; + + if (op_const == 0) + return NULL; + + op = opint[0]; + temp = 0; + while (op) + temp++, op &= op - 1; + + return build_int_cst (alpha_dimode_u, temp); +} + +/* Fold one of our builtin functions. */ + +static tree +alpha_fold_builtin (tree fndecl, int n_args, tree *op, + bool ignore ATTRIBUTE_UNUSED) +{ + unsigned HOST_WIDE_INT opint[MAX_ARGS]; + long op_const = 0; + int i; + + if (n_args > MAX_ARGS) + return NULL; + + for (i = 0; i < n_args; i++) + { + tree arg = op[i]; + if (arg == error_mark_node) + return NULL; + + opint[i] = 0; + if (TREE_CODE (arg) == INTEGER_CST) + { + op_const |= 1L << i; + opint[i] = int_cst_value (arg); + } + } + + switch (DECL_FUNCTION_CODE (fndecl)) + { + case ALPHA_BUILTIN_CMPBGE: + return alpha_fold_builtin_cmpbge (opint, op_const); + + case ALPHA_BUILTIN_EXTBL: + return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false); + case ALPHA_BUILTIN_EXTWL: + return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false); + case ALPHA_BUILTIN_EXTLL: + return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false); + case ALPHA_BUILTIN_EXTQL: + return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false); + case ALPHA_BUILTIN_EXTWH: + return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true); + case ALPHA_BUILTIN_EXTLH: + return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true); + case ALPHA_BUILTIN_EXTQH: + return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true); + + case ALPHA_BUILTIN_INSBL: + return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false); + case ALPHA_BUILTIN_INSWL: + return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false); + case ALPHA_BUILTIN_INSLL: + return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false); + case ALPHA_BUILTIN_INSQL: + return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false); + case ALPHA_BUILTIN_INSWH: + return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true); + case ALPHA_BUILTIN_INSLH: + return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true); + case ALPHA_BUILTIN_INSQH: + return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true); + + case ALPHA_BUILTIN_MSKBL: + return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false); + case ALPHA_BUILTIN_MSKWL: + return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false); + case ALPHA_BUILTIN_MSKLL: + return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false); + case ALPHA_BUILTIN_MSKQL: + return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false); + case ALPHA_BUILTIN_MSKWH: + return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true); + case ALPHA_BUILTIN_MSKLH: + return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true); + case ALPHA_BUILTIN_MSKQH: + return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true); + + case ALPHA_BUILTIN_UMULH: + return fold_build2 (MULT_HIGHPART_EXPR, alpha_dimode_u, op[0], op[1]); + + case ALPHA_BUILTIN_ZAP: + opint[1] ^= 0xff; + /* FALLTHRU */ + case ALPHA_BUILTIN_ZAPNOT: + return alpha_fold_builtin_zapnot (op, opint, op_const); + + case ALPHA_BUILTIN_MINUB8: + return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u); + case ALPHA_BUILTIN_MINSB8: + return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s); + case ALPHA_BUILTIN_MINUW4: + return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u); + case ALPHA_BUILTIN_MINSW4: + return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s); + case ALPHA_BUILTIN_MAXUB8: + return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u); + case ALPHA_BUILTIN_MAXSB8: + return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s); + case ALPHA_BUILTIN_MAXUW4: + return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u); + case ALPHA_BUILTIN_MAXSW4: + return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s); + + case ALPHA_BUILTIN_PERR: + return alpha_fold_builtin_perr (opint, op_const); + case ALPHA_BUILTIN_PKLB: + return alpha_fold_builtin_pklb (opint, op_const); + case ALPHA_BUILTIN_PKWB: + return alpha_fold_builtin_pkwb (opint, op_const); + case ALPHA_BUILTIN_UNPKBL: + return alpha_fold_builtin_unpkbl (opint, op_const); + case ALPHA_BUILTIN_UNPKBW: + return alpha_fold_builtin_unpkbw (opint, op_const); + + case ALPHA_BUILTIN_CTTZ: + return alpha_fold_builtin_cttz (opint, op_const); + case ALPHA_BUILTIN_CTLZ: + return alpha_fold_builtin_ctlz (opint, op_const); + case ALPHA_BUILTIN_CTPOP: + return alpha_fold_builtin_ctpop (opint, op_const); + + case ALPHA_BUILTIN_AMASK: + case ALPHA_BUILTIN_IMPLVER: + case ALPHA_BUILTIN_RPCC: + /* None of these are foldable at compile-time. */ + default: + return NULL; + } +} + +/* This page contains routines that are used to determine what the function + prologue and epilogue code will do and write them out. */ + +/* Compute the size of the save area in the stack. */ + +/* These variables are used for communication between the following functions. + They indicate various things about the current function being compiled + that are used to tell what kind of prologue, epilogue and procedure + descriptor to generate. */ + +/* Nonzero if we need a stack procedure. */ +enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2}; +static enum alpha_procedure_types alpha_procedure_type; + +/* Register number (either FP or SP) that is used to unwind the frame. */ +static int vms_unwind_regno; + +/* Register number used to save FP. We need not have one for RA since + we don't modify it for register procedures. This is only defined + for register frame procedures. */ +static int vms_save_fp_regno; + +/* Register number used to reference objects off our PV. */ +static int vms_base_regno; + +/* Compute register masks for saved registers. */ + +static void +alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP) +{ + unsigned long imask = 0; + unsigned long fmask = 0; + unsigned int i; + + /* When outputting a thunk, we don't have valid register life info, + but assemble_start_function wants to output .frame and .mask + directives. */ + if (cfun->is_thunk) + { + *imaskP = 0; + *fmaskP = 0; + return; + } + + if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK) + imask |= (1UL << HARD_FRAME_POINTER_REGNUM); + + /* One for every register we have to save. */ + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (! fixed_regs[i] && ! call_used_regs[i] + && df_regs_ever_live_p (i) && i != REG_RA) + { + if (i < 32) + imask |= (1UL << i); + else + fmask |= (1UL << (i - 32)); + } + + /* We need to restore these for the handler. */ + if (crtl->calls_eh_return) + { + for (i = 0; ; ++i) + { + unsigned regno = EH_RETURN_DATA_REGNO (i); + if (regno == INVALID_REGNUM) + break; + imask |= 1UL << regno; + } + } + + /* If any register spilled, then spill the return address also. */ + /* ??? This is required by the Digital stack unwind specification + and isn't needed if we're doing Dwarf2 unwinding. */ + if (imask || fmask || alpha_ra_ever_killed ()) + imask |= (1UL << REG_RA); + + *imaskP = imask; + *fmaskP = fmask; +} + +int +alpha_sa_size (void) +{ + unsigned long mask[2]; + int sa_size = 0; + int i, j; + + alpha_sa_mask (&mask[0], &mask[1]); + + for (j = 0; j < 2; ++j) + for (i = 0; i < 32; ++i) + if ((mask[j] >> i) & 1) + sa_size++; + + if (TARGET_ABI_OPEN_VMS) + { + /* Start with a stack procedure if we make any calls (REG_RA used), or + need a frame pointer, with a register procedure if we otherwise need + at least a slot, and with a null procedure in other cases. */ + if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed) + alpha_procedure_type = PT_STACK; + else if (get_frame_size() != 0) + alpha_procedure_type = PT_REGISTER; + else + alpha_procedure_type = PT_NULL; + + /* Don't reserve space for saving FP & RA yet. Do that later after we've + made the final decision on stack procedure vs register procedure. */ + if (alpha_procedure_type == PT_STACK) + sa_size -= 2; + + /* Decide whether to refer to objects off our PV via FP or PV. + If we need FP for something else or if we receive a nonlocal + goto (which expects PV to contain the value), we must use PV. + Otherwise, start by assuming we can use FP. */ + + vms_base_regno + = (frame_pointer_needed + || cfun->has_nonlocal_label + || alpha_procedure_type == PT_STACK + || crtl->outgoing_args_size) + ? REG_PV : HARD_FRAME_POINTER_REGNUM; + + /* If we want to copy PV into FP, we need to find some register + in which to save FP. */ + + vms_save_fp_regno = -1; + if (vms_base_regno == HARD_FRAME_POINTER_REGNUM) + for (i = 0; i < 32; i++) + if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i)) + vms_save_fp_regno = i; + + /* A VMS condition handler requires a stack procedure in our + implementation. (not required by the calling standard). */ + if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER) + || cfun->machine->uses_condition_handler) + vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK; + else if (alpha_procedure_type == PT_NULL) + vms_base_regno = REG_PV; + + /* Stack unwinding should be done via FP unless we use it for PV. */ + vms_unwind_regno = (vms_base_regno == REG_PV + ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM); + + /* If this is a stack procedure, allow space for saving FP, RA and + a condition handler slot if needed. */ + if (alpha_procedure_type == PT_STACK) + sa_size += 2 + cfun->machine->uses_condition_handler; + } + else + { + /* Our size must be even (multiple of 16 bytes). */ + if (sa_size & 1) + sa_size++; + } + + return sa_size * 8; +} + +/* Define the offset between two registers, one to be eliminated, + and the other its replacement, at the start of a routine. */ + +HOST_WIDE_INT +alpha_initial_elimination_offset (unsigned int from, + unsigned int to ATTRIBUTE_UNUSED) +{ + HOST_WIDE_INT ret; + + ret = alpha_sa_size (); + ret += ALPHA_ROUND (crtl->outgoing_args_size); + + switch (from) + { + case FRAME_POINTER_REGNUM: + break; + + case ARG_POINTER_REGNUM: + ret += (ALPHA_ROUND (get_frame_size () + + crtl->args.pretend_args_size) + - crtl->args.pretend_args_size); + break; + + default: + gcc_unreachable (); + } + + return ret; +} + +#if TARGET_ABI_OPEN_VMS + +/* Worker function for TARGET_CAN_ELIMINATE. */ + +static bool +alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to) +{ + /* We need the alpha_procedure_type to decide. Evaluate it now. */ + alpha_sa_size (); + + switch (alpha_procedure_type) + { + case PT_NULL: + /* NULL procedures have no frame of their own and we only + know how to resolve from the current stack pointer. */ + return to == STACK_POINTER_REGNUM; + + case PT_REGISTER: + case PT_STACK: + /* We always eliminate except to the stack pointer if there is no + usable frame pointer at hand. */ + return (to != STACK_POINTER_REGNUM + || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM); + } + + gcc_unreachable (); +} + +/* FROM is to be eliminated for TO. Return the offset so that TO+offset + designates the same location as FROM. */ + +HOST_WIDE_INT +alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to) +{ + /* The only possible attempts we ever expect are ARG or FRAME_PTR to + HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide + on the proper computations and will need the register save area size + in most cases. */ + + HOST_WIDE_INT sa_size = alpha_sa_size (); + + /* PT_NULL procedures have no frame of their own and we only allow + elimination to the stack pointer. This is the argument pointer and we + resolve the soft frame pointer to that as well. */ + + if (alpha_procedure_type == PT_NULL) + return 0; + + /* For a PT_STACK procedure the frame layout looks as follows + + -----> decreasing addresses + + < size rounded up to 16 | likewise > + --------------#------------------------------+++--------------+++-------# + incoming args # pretended args | "frame" | regs sa | PV | outgoing args # + --------------#---------------------------------------------------------# + ^ ^ ^ ^ + ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR + + + PT_REGISTER procedures are similar in that they may have a frame of their + own. They have no regs-sa/pv/outgoing-args area. + + We first compute offset to HARD_FRAME_PTR, then add what we need to get + to STACK_PTR if need be. */ + + { + HOST_WIDE_INT offset; + HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0; + + switch (from) + { + case FRAME_POINTER_REGNUM: + offset = ALPHA_ROUND (sa_size + pv_save_size); + break; + case ARG_POINTER_REGNUM: + offset = (ALPHA_ROUND (sa_size + pv_save_size + + get_frame_size () + + crtl->args.pretend_args_size) + - crtl->args.pretend_args_size); + break; + default: + gcc_unreachable (); + } + + if (to == STACK_POINTER_REGNUM) + offset += ALPHA_ROUND (crtl->outgoing_args_size); + + return offset; + } +} + +#define COMMON_OBJECT "common_object" + +static tree +common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED, + tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, + bool *no_add_attrs ATTRIBUTE_UNUSED) +{ + tree decl = *node; + gcc_assert (DECL_P (decl)); + + DECL_COMMON (decl) = 1; + return NULL_TREE; +} + +static const struct attribute_spec vms_attribute_table[] = +{ + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler, + affects_type_identity } */ + { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false }, + { NULL, 0, 0, false, false, false, NULL, false } +}; + +void +vms_output_aligned_decl_common(FILE *file, tree decl, const char *name, + unsigned HOST_WIDE_INT size, + unsigned int align) +{ + tree attr = DECL_ATTRIBUTES (decl); + fprintf (file, "%s", COMMON_ASM_OP); + assemble_name (file, name); + fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size); + /* ??? Unlike on OSF/1, the alignment factor is not in log units. */ + fprintf (file, ",%u", align / BITS_PER_UNIT); + if (attr) + { + attr = lookup_attribute (COMMON_OBJECT, attr); + if (attr) + fprintf (file, ",%s", + IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr)))); + } + fputc ('\n', file); +} + +#undef COMMON_OBJECT + +#endif + +static int +find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED) +{ + return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx; +} + +int +alpha_find_lo_sum_using_gp (rtx insn) +{ + return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0; +} + +static int +alpha_does_function_need_gp (void) +{ + rtx insn; + + /* The GP being variable is an OSF abi thing. */ + if (! TARGET_ABI_OSF) + return 0; + + /* We need the gp to load the address of __mcount. */ + if (TARGET_PROFILING_NEEDS_GP && crtl->profile) + return 1; + + /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */ + if (cfun->is_thunk) + return 1; + + /* The nonlocal receiver pattern assumes that the gp is valid for + the nested function. Reasonable because it's almost always set + correctly already. For the cases where that's wrong, make sure + the nested function loads its gp on entry. */ + if (crtl->has_nonlocal_goto) + return 1; + + /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first. + Even if we are a static function, we still need to do this in case + our address is taken and passed to something like qsort. */ + + push_topmost_sequence (); + insn = get_insns (); + pop_topmost_sequence (); + + for (; insn; insn = NEXT_INSN (insn)) + if (NONDEBUG_INSN_P (insn) + && GET_CODE (PATTERN (insn)) != USE + && GET_CODE (PATTERN (insn)) != CLOBBER + && get_attr_usegp (insn)) + return 1; + + return 0; +} + + +/* Helper function to set RTX_FRAME_RELATED_P on instructions, including + sequences. */ + +static rtx +set_frame_related_p (void) +{ + rtx seq = get_insns (); + rtx insn; + + end_sequence (); + + if (!seq) + return NULL_RTX; + + if (INSN_P (seq)) + { + insn = seq; + while (insn != NULL_RTX) + { + RTX_FRAME_RELATED_P (insn) = 1; + insn = NEXT_INSN (insn); + } + seq = emit_insn (seq); + } + else + { + seq = emit_insn (seq); + RTX_FRAME_RELATED_P (seq) = 1; + } + return seq; +} + +#define FRP(exp) (start_sequence (), exp, set_frame_related_p ()) + +/* Generates a store with the proper unwind info attached. VALUE is + stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG + contains SP+FRAME_BIAS, and that is the unwind info that should be + generated. If FRAME_REG != VALUE, then VALUE is being stored on + behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */ + +static void +emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias, + HOST_WIDE_INT base_ofs, rtx frame_reg) +{ + rtx addr, mem, insn; + + addr = plus_constant (Pmode, base_reg, base_ofs); + mem = gen_frame_mem (DImode, addr); + + insn = emit_move_insn (mem, value); + RTX_FRAME_RELATED_P (insn) = 1; + + if (frame_bias || value != frame_reg) + { + if (frame_bias) + { + addr = plus_constant (Pmode, stack_pointer_rtx, + frame_bias + base_ofs); + mem = gen_rtx_MEM (DImode, addr); + } + + add_reg_note (insn, REG_FRAME_RELATED_EXPR, + gen_rtx_SET (VOIDmode, mem, frame_reg)); + } +} + +static void +emit_frame_store (unsigned int regno, rtx base_reg, + HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs) +{ + rtx reg = gen_rtx_REG (DImode, regno); + emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg); +} + +/* Compute the frame size. SIZE is the size of the "naked" frame + and SA_SIZE is the size of the register save area. */ + +static HOST_WIDE_INT +compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size) +{ + if (TARGET_ABI_OPEN_VMS) + return ALPHA_ROUND (sa_size + + (alpha_procedure_type == PT_STACK ? 8 : 0) + + size + + crtl->args.pretend_args_size); + else + return ALPHA_ROUND (crtl->outgoing_args_size) + + sa_size + + ALPHA_ROUND (size + + crtl->args.pretend_args_size); +} + +/* Write function prologue. */ + +/* On vms we have two kinds of functions: + + - stack frame (PROC_STACK) + these are 'normal' functions with local vars and which are + calling other functions + - register frame (PROC_REGISTER) + keeps all data in registers, needs no stack + + We must pass this to the assembler so it can generate the + proper pdsc (procedure descriptor) + This is done with the '.pdesc' command. + + On not-vms, we don't really differentiate between the two, as we can + simply allocate stack without saving registers. */ + +void +alpha_expand_prologue (void) +{ + /* Registers to save. */ + unsigned long imask = 0; + unsigned long fmask = 0; + /* Stack space needed for pushing registers clobbered by us. */ + HOST_WIDE_INT sa_size, sa_bias; + /* Complete stack size needed. */ + HOST_WIDE_INT frame_size; + /* Probed stack size; it additionally includes the size of + the "reserve region" if any. */ + HOST_WIDE_INT probed_size; + /* Offset from base reg to register save area. */ + HOST_WIDE_INT reg_offset; + rtx sa_reg; + int i; + + sa_size = alpha_sa_size (); + frame_size = compute_frame_size (get_frame_size (), sa_size); + + if (flag_stack_usage_info) + current_function_static_stack_size = frame_size; + + if (TARGET_ABI_OPEN_VMS) + reg_offset = 8 + 8 * cfun->machine->uses_condition_handler; + else + reg_offset = ALPHA_ROUND (crtl->outgoing_args_size); + + alpha_sa_mask (&imask, &fmask); + + /* Emit an insn to reload GP, if needed. */ + if (TARGET_ABI_OSF) + { + alpha_function_needs_gp = alpha_does_function_need_gp (); + if (alpha_function_needs_gp) + emit_insn (gen_prologue_ldgp ()); + } + + /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert + the call to mcount ourselves, rather than having the linker do it + magically in response to -pg. Since _mcount has special linkage, + don't represent the call as a call. */ + if (TARGET_PROFILING_NEEDS_GP && crtl->profile) + emit_insn (gen_prologue_mcount ()); + + /* Adjust the stack by the frame size. If the frame size is > 4096 + bytes, we need to be sure we probe somewhere in the first and last + 4096 bytes (we can probably get away without the latter test) and + every 8192 bytes in between. If the frame size is > 32768, we + do this in a loop. Otherwise, we generate the explicit probe + instructions. + + Note that we are only allowed to adjust sp once in the prologue. */ + + probed_size = frame_size; + if (flag_stack_check) + probed_size += STACK_CHECK_PROTECT; + + if (probed_size <= 32768) + { + if (probed_size > 4096) + { + int probed; + + for (probed = 4096; probed < probed_size; probed += 8192) + emit_insn (gen_probe_stack (GEN_INT (-probed))); + + /* We only have to do this probe if we aren't saving registers or + if we are probing beyond the frame because of -fstack-check. */ + if ((sa_size == 0 && probed_size > probed - 4096) + || flag_stack_check) + emit_insn (gen_probe_stack (GEN_INT (-probed_size))); + } + + if (frame_size != 0) + FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, + GEN_INT (-frame_size)))); + } + else + { + /* Here we generate code to set R22 to SP + 4096 and set R23 to the + number of 8192 byte blocks to probe. We then probe each block + in the loop and then set SP to the proper location. If the + amount remaining is > 4096, we have to do one more probe if we + are not saving any registers or if we are probing beyond the + frame because of -fstack-check. */ + + HOST_WIDE_INT blocks = (probed_size + 4096) / 8192; + HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192; + rtx ptr = gen_rtx_REG (DImode, 22); + rtx count = gen_rtx_REG (DImode, 23); + rtx seq; + + emit_move_insn (count, GEN_INT (blocks)); + emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096))); + + /* Because of the difficulty in emitting a new basic block this + late in the compilation, generate the loop as a single insn. */ + emit_insn (gen_prologue_stack_probe_loop (count, ptr)); + + if ((leftover > 4096 && sa_size == 0) || flag_stack_check) + { + rtx last = gen_rtx_MEM (DImode, + plus_constant (Pmode, ptr, -leftover)); + MEM_VOLATILE_P (last) = 1; + emit_move_insn (last, const0_rtx); + } + + if (flag_stack_check) + { + /* If -fstack-check is specified we have to load the entire + constant into a register and subtract from the sp in one go, + because the probed stack size is not equal to the frame size. */ + HOST_WIDE_INT lo, hi; + lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000; + hi = frame_size - lo; + + emit_move_insn (ptr, GEN_INT (hi)); + emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo))); + seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, + ptr)); + } + else + { + seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr, + GEN_INT (-leftover))); + } + + /* This alternative is special, because the DWARF code cannot + possibly intuit through the loop above. So we invent this + note it looks at instead. */ + RTX_FRAME_RELATED_P (seq) = 1; + add_reg_note (seq, REG_FRAME_RELATED_EXPR, + gen_rtx_SET (VOIDmode, stack_pointer_rtx, + plus_constant (Pmode, stack_pointer_rtx, + -frame_size))); + } + + /* Cope with very large offsets to the register save area. */ + sa_bias = 0; + sa_reg = stack_pointer_rtx; + if (reg_offset + sa_size > 0x8000) + { + int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000; + rtx sa_bias_rtx; + + if (low + sa_size <= 0x8000) + sa_bias = reg_offset - low, reg_offset = low; + else + sa_bias = reg_offset, reg_offset = 0; + + sa_reg = gen_rtx_REG (DImode, 24); + sa_bias_rtx = GEN_INT (sa_bias); + + if (add_operand (sa_bias_rtx, DImode)) + emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx)); + else + { + emit_move_insn (sa_reg, sa_bias_rtx); + emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg)); + } + } + + /* Save regs in stack order. Beginning with VMS PV. */ + if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK) + emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0); + + /* Save register RA next. */ + if (imask & (1UL << REG_RA)) + { + emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset); + imask &= ~(1UL << REG_RA); + reg_offset += 8; + } + + /* Now save any other registers required to be saved. */ + for (i = 0; i < 31; i++) + if (imask & (1UL << i)) + { + emit_frame_store (i, sa_reg, sa_bias, reg_offset); + reg_offset += 8; + } + + for (i = 0; i < 31; i++) + if (fmask & (1UL << i)) + { + emit_frame_store (i+32, sa_reg, sa_bias, reg_offset); + reg_offset += 8; + } + + if (TARGET_ABI_OPEN_VMS) + { + /* Register frame procedures save the fp. */ + if (alpha_procedure_type == PT_REGISTER) + { + rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno), + hard_frame_pointer_rtx); + add_reg_note (insn, REG_CFA_REGISTER, NULL); + RTX_FRAME_RELATED_P (insn) = 1; + } + + if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV) + emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno), + gen_rtx_REG (DImode, REG_PV))); + + if (alpha_procedure_type != PT_NULL + && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM) + FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx)); + + /* If we have to allocate space for outgoing args, do it now. */ + if (crtl->outgoing_args_size != 0) + { + rtx seq + = emit_move_insn (stack_pointer_rtx, + plus_constant + (Pmode, hard_frame_pointer_rtx, + - (ALPHA_ROUND + (crtl->outgoing_args_size)))); + + /* Only set FRAME_RELATED_P on the stack adjustment we just emitted + if ! frame_pointer_needed. Setting the bit will change the CFA + computation rule to use sp again, which would be wrong if we had + frame_pointer_needed, as this means sp might move unpredictably + later on. + + Also, note that + frame_pointer_needed + => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM + and + crtl->outgoing_args_size != 0 + => alpha_procedure_type != PT_NULL, + + so when we are not setting the bit here, we are guaranteed to + have emitted an FRP frame pointer update just before. */ + RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed; + } + } + else + { + /* If we need a frame pointer, set it from the stack pointer. */ + if (frame_pointer_needed) + { + if (TARGET_CAN_FAULT_IN_PROLOGUE) + FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx)); + else + /* This must always be the last instruction in the + prologue, thus we emit a special move + clobber. */ + FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx, + stack_pointer_rtx, sa_reg))); + } + } + + /* The ABIs for VMS and OSF/1 say that while we can schedule insns into + the prologue, for exception handling reasons, we cannot do this for + any insn that might fault. We could prevent this for mems with a + (clobber:BLK (scratch)), but this doesn't work for fp insns. So we + have to prevent all such scheduling with a blockage. + + Linux, on the other hand, never bothered to implement OSF/1's + exception handling, and so doesn't care about such things. Anyone + planning to use dwarf2 frame-unwind info can also omit the blockage. */ + + if (! TARGET_CAN_FAULT_IN_PROLOGUE) + emit_insn (gen_blockage ()); +} + +/* Count the number of .file directives, so that .loc is up to date. */ +int num_source_filenames = 0; + +/* Output the textual info surrounding the prologue. */ + +void +alpha_start_function (FILE *file, const char *fnname, + tree decl ATTRIBUTE_UNUSED) +{ + unsigned long imask = 0; + unsigned long fmask = 0; + /* Stack space needed for pushing registers clobbered by us. */ + HOST_WIDE_INT sa_size; + /* Complete stack size needed. */ + unsigned HOST_WIDE_INT frame_size; + /* The maximum debuggable frame size. */ + unsigned HOST_WIDE_INT max_frame_size = 1UL << 31; + /* Offset from base reg to register save area. */ + HOST_WIDE_INT reg_offset; + char *entry_label = (char *) alloca (strlen (fnname) + 6); + char *tramp_label = (char *) alloca (strlen (fnname) + 6); + int i; + +#if TARGET_ABI_OPEN_VMS + vms_start_function (fnname); +#endif + + alpha_fnname = fnname; + sa_size = alpha_sa_size (); + frame_size = compute_frame_size (get_frame_size (), sa_size); + + if (TARGET_ABI_OPEN_VMS) + reg_offset = 8 + 8 * cfun->machine->uses_condition_handler; + else + reg_offset = ALPHA_ROUND (crtl->outgoing_args_size); + + alpha_sa_mask (&imask, &fmask); + + /* Issue function start and label. */ + if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive) + { + fputs ("\t.ent ", file); + assemble_name (file, fnname); + putc ('\n', file); + + /* If the function needs GP, we'll write the "..ng" label there. + Otherwise, do it here. */ + if (TARGET_ABI_OSF + && ! alpha_function_needs_gp + && ! cfun->is_thunk) + { + putc ('$', file); + assemble_name (file, fnname); + fputs ("..ng:\n", file); + } + } + /* Nested functions on VMS that are potentially called via trampoline + get a special transfer entry point that loads the called functions + procedure descriptor and static chain. */ + if (TARGET_ABI_OPEN_VMS + && !TREE_PUBLIC (decl) + && DECL_CONTEXT (decl) + && !TYPE_P (DECL_CONTEXT (decl)) + && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL) + { + strcpy (tramp_label, fnname); + strcat (tramp_label, "..tr"); + ASM_OUTPUT_LABEL (file, tramp_label); + fprintf (file, "\tldq $1,24($27)\n"); + fprintf (file, "\tldq $27,16($27)\n"); + } + + strcpy (entry_label, fnname); + if (TARGET_ABI_OPEN_VMS) + strcat (entry_label, "..en"); + + ASM_OUTPUT_LABEL (file, entry_label); + inside_function = TRUE; + + if (TARGET_ABI_OPEN_VMS) + fprintf (file, "\t.base $%d\n", vms_base_regno); + + if (TARGET_ABI_OSF + && TARGET_IEEE_CONFORMANT + && !flag_inhibit_size_directive) + { + /* Set flags in procedure descriptor to request IEEE-conformant + math-library routines. The value we set it to is PDSC_EXC_IEEE + (/usr/include/pdsc.h). */ + fputs ("\t.eflag 48\n", file); + } + + /* Set up offsets to alpha virtual arg/local debugging pointer. */ + alpha_auto_offset = -frame_size + crtl->args.pretend_args_size; + alpha_arg_offset = -frame_size + 48; + + /* Describe our frame. If the frame size is larger than an integer, + print it as zero to avoid an assembler error. We won't be + properly describing such a frame, but that's the best we can do. */ + if (TARGET_ABI_OPEN_VMS) + fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26," + HOST_WIDE_INT_PRINT_DEC "\n", + vms_unwind_regno, + frame_size >= (1UL << 31) ? 0 : frame_size, + reg_offset); + else if (!flag_inhibit_size_directive) + fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n", + (frame_pointer_needed + ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM), + frame_size >= max_frame_size ? 0 : frame_size, + crtl->args.pretend_args_size); + + /* Describe which registers were spilled. */ + if (TARGET_ABI_OPEN_VMS) + { + if (imask) + /* ??? Does VMS care if mask contains ra? The old code didn't + set it, so I don't here. */ + fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA)); + if (fmask) + fprintf (file, "\t.fmask 0x%lx,0\n", fmask); + if (alpha_procedure_type == PT_REGISTER) + fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno); + } + else if (!flag_inhibit_size_directive) + { + if (imask) + { + fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask, + frame_size >= max_frame_size ? 0 : reg_offset - frame_size); + + for (i = 0; i < 32; ++i) + if (imask & (1UL << i)) + reg_offset += 8; + } + + if (fmask) + fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask, + frame_size >= max_frame_size ? 0 : reg_offset - frame_size); + } + +#if TARGET_ABI_OPEN_VMS + /* If a user condition handler has been installed at some point, emit + the procedure descriptor bits to point the Condition Handling Facility + at the indirection wrapper, and state the fp offset at which the user + handler may be found. */ + if (cfun->machine->uses_condition_handler) + { + fprintf (file, "\t.handler __gcc_shell_handler\n"); + fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET); + } + +#ifdef TARGET_VMS_CRASH_DEBUG + /* Support of minimal traceback info. */ + switch_to_section (readonly_data_section); + fprintf (file, "\t.align 3\n"); + assemble_name (file, fnname); fputs ("..na:\n", file); + fputs ("\t.ascii \"", file); + assemble_name (file, fnname); + fputs ("\\0\"\n", file); + switch_to_section (text_section); +#endif +#endif /* TARGET_ABI_OPEN_VMS */ +} + +/* Emit the .prologue note at the scheduled end of the prologue. */ + +static void +alpha_output_function_end_prologue (FILE *file) +{ + if (TARGET_ABI_OPEN_VMS) + fputs ("\t.prologue\n", file); + else if (!flag_inhibit_size_directive) + fprintf (file, "\t.prologue %d\n", + alpha_function_needs_gp || cfun->is_thunk); +} + +/* Write function epilogue. */ + +void +alpha_expand_epilogue (void) +{ + /* Registers to save. */ + unsigned long imask = 0; + unsigned long fmask = 0; + /* Stack space needed for pushing registers clobbered by us. */ + HOST_WIDE_INT sa_size; + /* Complete stack size needed. */ + HOST_WIDE_INT frame_size; + /* Offset from base reg to register save area. */ + HOST_WIDE_INT reg_offset; + int fp_is_frame_pointer, fp_offset; + rtx sa_reg, sa_reg_exp = NULL; + rtx sp_adj1, sp_adj2, mem, reg, insn; + rtx eh_ofs; + rtx cfa_restores = NULL_RTX; + int i; + + sa_size = alpha_sa_size (); + frame_size = compute_frame_size (get_frame_size (), sa_size); + + if (TARGET_ABI_OPEN_VMS) + { + if (alpha_procedure_type == PT_STACK) + reg_offset = 8 + 8 * cfun->machine->uses_condition_handler; + else + reg_offset = 0; + } + else + reg_offset = ALPHA_ROUND (crtl->outgoing_args_size); + + alpha_sa_mask (&imask, &fmask); + + fp_is_frame_pointer + = (TARGET_ABI_OPEN_VMS + ? alpha_procedure_type == PT_STACK + : frame_pointer_needed); + fp_offset = 0; + sa_reg = stack_pointer_rtx; + + if (crtl->calls_eh_return) + eh_ofs = EH_RETURN_STACKADJ_RTX; + else + eh_ofs = NULL_RTX; + + if (sa_size) + { + /* If we have a frame pointer, restore SP from it. */ + if (TARGET_ABI_OPEN_VMS + ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM + : frame_pointer_needed) + emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx); + + /* Cope with very large offsets to the register save area. */ + if (reg_offset + sa_size > 0x8000) + { + int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000; + HOST_WIDE_INT bias; + + if (low + sa_size <= 0x8000) + bias = reg_offset - low, reg_offset = low; + else + bias = reg_offset, reg_offset = 0; + + sa_reg = gen_rtx_REG (DImode, 22); + sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias); + + emit_move_insn (sa_reg, sa_reg_exp); + } + + /* Restore registers in order, excepting a true frame pointer. */ + + mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset)); + reg = gen_rtx_REG (DImode, REG_RA); + emit_move_insn (reg, mem); + cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores); + + reg_offset += 8; + imask &= ~(1UL << REG_RA); + + for (i = 0; i < 31; ++i) + if (imask & (1UL << i)) + { + if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer) + fp_offset = reg_offset; + else + { + mem = gen_frame_mem (DImode, + plus_constant (Pmode, sa_reg, + reg_offset)); + reg = gen_rtx_REG (DImode, i); + emit_move_insn (reg, mem); + cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, + cfa_restores); + } + reg_offset += 8; + } + + for (i = 0; i < 31; ++i) + if (fmask & (1UL << i)) + { + mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg, + reg_offset)); + reg = gen_rtx_REG (DFmode, i+32); + emit_move_insn (reg, mem); + cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores); + reg_offset += 8; + } + } + + if (frame_size || eh_ofs) + { + sp_adj1 = stack_pointer_rtx; + + if (eh_ofs) + { + sp_adj1 = gen_rtx_REG (DImode, 23); + emit_move_insn (sp_adj1, + gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs)); + } + + /* If the stack size is large, begin computation into a temporary + register so as not to interfere with a potential fp restore, + which must be consecutive with an SP restore. */ + if (frame_size < 32768 && !cfun->calls_alloca) + sp_adj2 = GEN_INT (frame_size); + else if (frame_size < 0x40007fffL) + { + int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000; + + sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low); + if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2)) + sp_adj1 = sa_reg; + else + { + sp_adj1 = gen_rtx_REG (DImode, 23); + emit_move_insn (sp_adj1, sp_adj2); + } + sp_adj2 = GEN_INT (low); + } + else + { + rtx tmp = gen_rtx_REG (DImode, 23); + sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false); + if (!sp_adj2) + { + /* We can't drop new things to memory this late, afaik, + so build it up by pieces. */ + sp_adj2 = alpha_emit_set_long_const (tmp, frame_size, + -(frame_size < 0)); + gcc_assert (sp_adj2); + } + } + + /* From now on, things must be in order. So emit blockages. */ + + /* Restore the frame pointer. */ + if (fp_is_frame_pointer) + { + emit_insn (gen_blockage ()); + mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, + fp_offset)); + emit_move_insn (hard_frame_pointer_rtx, mem); + cfa_restores = alloc_reg_note (REG_CFA_RESTORE, + hard_frame_pointer_rtx, cfa_restores); + } + else if (TARGET_ABI_OPEN_VMS) + { + emit_insn (gen_blockage ()); + emit_move_insn (hard_frame_pointer_rtx, + gen_rtx_REG (DImode, vms_save_fp_regno)); + cfa_restores = alloc_reg_note (REG_CFA_RESTORE, + hard_frame_pointer_rtx, cfa_restores); + } + + /* Restore the stack pointer. */ + emit_insn (gen_blockage ()); + if (sp_adj2 == const0_rtx) + insn = emit_move_insn (stack_pointer_rtx, sp_adj1); + else + insn = emit_move_insn (stack_pointer_rtx, + gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)); + REG_NOTES (insn) = cfa_restores; + add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx); + RTX_FRAME_RELATED_P (insn) = 1; + } + else + { + gcc_assert (cfa_restores == NULL); + + if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER) + { + emit_insn (gen_blockage ()); + insn = emit_move_insn (hard_frame_pointer_rtx, + gen_rtx_REG (DImode, vms_save_fp_regno)); + add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx); + RTX_FRAME_RELATED_P (insn) = 1; + } + } +} + +/* Output the rest of the textual info surrounding the epilogue. */ + +void +alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED) +{ + rtx insn; + + /* We output a nop after noreturn calls at the very end of the function to + ensure that the return address always remains in the caller's code range, + as not doing so might confuse unwinding engines. */ + insn = get_last_insn (); + if (!INSN_P (insn)) + insn = prev_active_insn (insn); + if (insn && CALL_P (insn)) + output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL); + +#if TARGET_ABI_OPEN_VMS + /* Write the linkage entries. */ + alpha_write_linkage (file, fnname); +#endif + + /* End the function. */ + if (TARGET_ABI_OPEN_VMS + || !flag_inhibit_size_directive) + { + fputs ("\t.end ", file); + assemble_name (file, fnname); + putc ('\n', file); + } + inside_function = FALSE; +} + +#if TARGET_ABI_OSF +/* Emit a tail call to FUNCTION after adjusting THIS by DELTA. + + In order to avoid the hordes of differences between generated code + with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating + lots of code loading up large constants, generate rtl and emit it + instead of going straight to text. + + Not sure why this idea hasn't been explored before... */ + +static void +alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, + HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, + tree function) +{ + HOST_WIDE_INT hi, lo; + rtx this_rtx, insn, funexp; + + /* We always require a valid GP. */ + emit_insn (gen_prologue_ldgp ()); + emit_note (NOTE_INSN_PROLOGUE_END); + + /* Find the "this" pointer. If the function returns a structure, + the structure return pointer is in $16. */ + if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)) + this_rtx = gen_rtx_REG (Pmode, 17); + else + this_rtx = gen_rtx_REG (Pmode, 16); + + /* Add DELTA. When possible we use ldah+lda. Otherwise load the + entire constant for the add. */ + lo = ((delta & 0xffff) ^ 0x8000) - 0x8000; + hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000; + if (hi + lo == delta) + { + if (hi) + emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi))); + if (lo) + emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo))); + } + else + { + rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0), + delta, -(delta < 0)); + emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp)); + } + + /* Add a delta stored in the vtable at VCALL_OFFSET. */ + if (vcall_offset) + { + rtx tmp, tmp2; + + tmp = gen_rtx_REG (Pmode, 0); + emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx)); + + lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000; + hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000; + if (hi + lo == vcall_offset) + { + if (hi) + emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi))); + } + else + { + tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1), + vcall_offset, -(vcall_offset < 0)); + emit_insn (gen_adddi3 (tmp, tmp, tmp2)); + lo = 0; + } + if (lo) + tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo)); + else + tmp2 = tmp; + emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2)); + + emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp)); + } + + /* Generate a tail call to the target function. */ + if (! TREE_USED (function)) + { + assemble_external (function); + TREE_USED (function) = 1; + } + funexp = XEXP (DECL_RTL (function), 0); + funexp = gen_rtx_MEM (FUNCTION_MODE, funexp); + insn = emit_call_insn (gen_sibcall (funexp, const0_rtx)); + SIBLING_CALL_P (insn) = 1; + + /* Run just enough of rest_of_compilation to get the insns emitted. + There's not really enough bulk here to make other passes such as + instruction scheduling worth while. Note that use_thunk calls + assemble_start_function and assemble_end_function. */ + insn = get_insns (); + shorten_branches (insn); + final_start_function (insn, file, 1); + final (insn, file, 1); + final_end_function (); +} +#endif /* TARGET_ABI_OSF */ + +/* Debugging support. */ + +#include "gstab.h" + +/* Name of the file containing the current function. */ + +static const char *current_function_file = ""; + +/* Offsets to alpha virtual arg/local debugging pointers. */ + +long alpha_arg_offset; +long alpha_auto_offset; + +/* Emit a new filename to a stream. */ + +void +alpha_output_filename (FILE *stream, const char *name) +{ + static int first_time = TRUE; + + if (first_time) + { + first_time = FALSE; + ++num_source_filenames; + current_function_file = name; + fprintf (stream, "\t.file\t%d ", num_source_filenames); + output_quoted_string (stream, name); + fprintf (stream, "\n"); + } + + else if (name != current_function_file + && strcmp (name, current_function_file) != 0) + { + ++num_source_filenames; + current_function_file = name; + fprintf (stream, "\t.file\t%d ", num_source_filenames); + + output_quoted_string (stream, name); + fprintf (stream, "\n"); + } +} + +/* Structure to show the current status of registers and memory. */ + +struct shadow_summary +{ + struct { + unsigned int i : 31; /* Mask of int regs */ + unsigned int fp : 31; /* Mask of fp regs */ + unsigned int mem : 1; /* mem == imem | fpmem */ + } used, defd; +}; + +/* Summary the effects of expression X on the machine. Update SUM, a pointer + to the summary structure. SET is nonzero if the insn is setting the + object, otherwise zero. */ + +static void +summarize_insn (rtx x, struct shadow_summary *sum, int set) +{ + const char *format_ptr; + int i, j; + + if (x == 0) + return; + + switch (GET_CODE (x)) + { + /* ??? Note that this case would be incorrect if the Alpha had a + ZERO_EXTRACT in SET_DEST. */ + case SET: + summarize_insn (SET_SRC (x), sum, 0); + summarize_insn (SET_DEST (x), sum, 1); + break; + + case CLOBBER: + summarize_insn (XEXP (x, 0), sum, 1); + break; + + case USE: + summarize_insn (XEXP (x, 0), sum, 0); + break; + + case ASM_OPERANDS: + for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--) + summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0); + break; + + case PARALLEL: + for (i = XVECLEN (x, 0) - 1; i >= 0; i--) + summarize_insn (XVECEXP (x, 0, i), sum, 0); + break; + + case SUBREG: + summarize_insn (SUBREG_REG (x), sum, 0); + break; + + case REG: + { + int regno = REGNO (x); + unsigned long mask = ((unsigned long) 1) << (regno % 32); + + if (regno == 31 || regno == 63) + break; + + if (set) + { + if (regno < 32) + sum->defd.i |= mask; + else + sum->defd.fp |= mask; + } + else + { + if (regno < 32) + sum->used.i |= mask; + else + sum->used.fp |= mask; + } + } + break; + + case MEM: + if (set) + sum->defd.mem = 1; + else + sum->used.mem = 1; + + /* Find the regs used in memory address computation: */ + summarize_insn (XEXP (x, 0), sum, 0); + break; + + case CONST_INT: case CONST_DOUBLE: + case SYMBOL_REF: case LABEL_REF: case CONST: + case SCRATCH: case ASM_INPUT: + break; + + /* Handle common unary and binary ops for efficiency. */ + case COMPARE: case PLUS: case MINUS: case MULT: case DIV: + case MOD: case UDIV: case UMOD: case AND: case IOR: + case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT: + case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX: + case NE: case EQ: case GE: case GT: case LE: + case LT: case GEU: case GTU: case LEU: case LTU: + summarize_insn (XEXP (x, 0), sum, 0); + summarize_insn (XEXP (x, 1), sum, 0); + break; + + case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND: + case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT: + case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS: + case SQRT: case FFS: + summarize_insn (XEXP (x, 0), sum, 0); + break; + + default: + format_ptr = GET_RTX_FORMAT (GET_CODE (x)); + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) + switch (format_ptr[i]) + { + case 'e': + summarize_insn (XEXP (x, i), sum, 0); + break; + + case 'E': + for (j = XVECLEN (x, i) - 1; j >= 0; j--) + summarize_insn (XVECEXP (x, i, j), sum, 0); + break; + + case 'i': + break; + + default: + gcc_unreachable (); + } + } +} + +/* Ensure a sufficient number of `trapb' insns are in the code when + the user requests code with a trap precision of functions or + instructions. + + In naive mode, when the user requests a trap-precision of + "instruction", a trapb is needed after every instruction that may + generate a trap. This ensures that the code is resumption safe but + it is also slow. + + When optimizations are turned on, we delay issuing a trapb as long + as possible. In this context, a trap shadow is the sequence of + instructions that starts with a (potentially) trap generating + instruction and extends to the next trapb or call_pal instruction + (but GCC never generates call_pal by itself). We can delay (and + therefore sometimes omit) a trapb subject to the following + conditions: + + (a) On entry to the trap shadow, if any Alpha register or memory + location contains a value that is used as an operand value by some + instruction in the trap shadow (live on entry), then no instruction + in the trap shadow may modify the register or memory location. + + (b) Within the trap shadow, the computation of the base register + for a memory load or store instruction may not involve using the + result of an instruction that might generate an UNPREDICTABLE + result. + + (c) Within the trap shadow, no register may be used more than once + as a destination register. (This is to make life easier for the + trap-handler.) + + (d) The trap shadow may not include any branch instructions. */ + +static void +alpha_handle_trap_shadows (void) +{ + struct shadow_summary shadow; + int trap_pending, exception_nesting; + rtx i, n; + + trap_pending = 0; + exception_nesting = 0; + shadow.used.i = 0; + shadow.used.fp = 0; + shadow.used.mem = 0; + shadow.defd = shadow.used; + + for (i = get_insns (); i ; i = NEXT_INSN (i)) + { + if (NOTE_P (i)) + { + switch (NOTE_KIND (i)) + { + case NOTE_INSN_EH_REGION_BEG: + exception_nesting++; + if (trap_pending) + goto close_shadow; + break; + + case NOTE_INSN_EH_REGION_END: + exception_nesting--; + if (trap_pending) + goto close_shadow; + break; + + case NOTE_INSN_EPILOGUE_BEG: + if (trap_pending && alpha_tp >= ALPHA_TP_FUNC) + goto close_shadow; + break; + } + } + else if (trap_pending) + { + if (alpha_tp == ALPHA_TP_FUNC) + { + if (JUMP_P (i) + && GET_CODE (PATTERN (i)) == RETURN) + goto close_shadow; + } + else if (alpha_tp == ALPHA_TP_INSN) + { + if (optimize > 0) + { + struct shadow_summary sum; + + sum.used.i = 0; + sum.used.fp = 0; + sum.used.mem = 0; + sum.defd = sum.used; + + switch (GET_CODE (i)) + { + case INSN: + /* Annoyingly, get_attr_trap will die on these. */ + if (GET_CODE (PATTERN (i)) == USE + || GET_CODE (PATTERN (i)) == CLOBBER) + break; + + summarize_insn (PATTERN (i), &sum, 0); + + if ((sum.defd.i & shadow.defd.i) + || (sum.defd.fp & shadow.defd.fp)) + { + /* (c) would be violated */ + goto close_shadow; + } + + /* Combine shadow with summary of current insn: */ + shadow.used.i |= sum.used.i; + shadow.used.fp |= sum.used.fp; + shadow.used.mem |= sum.used.mem; + shadow.defd.i |= sum.defd.i; + shadow.defd.fp |= sum.defd.fp; + shadow.defd.mem |= sum.defd.mem; + + if ((sum.defd.i & shadow.used.i) + || (sum.defd.fp & shadow.used.fp) + || (sum.defd.mem & shadow.used.mem)) + { + /* (a) would be violated (also takes care of (b)) */ + gcc_assert (get_attr_trap (i) != TRAP_YES + || (!(sum.defd.i & sum.used.i) + && !(sum.defd.fp & sum.used.fp))); + + goto close_shadow; + } + break; + + case JUMP_INSN: + case CALL_INSN: + case CODE_LABEL: + goto close_shadow; + + default: + gcc_unreachable (); + } + } + else + { + close_shadow: + n = emit_insn_before (gen_trapb (), i); + PUT_MODE (n, TImode); + PUT_MODE (i, TImode); + trap_pending = 0; + shadow.used.i = 0; + shadow.used.fp = 0; + shadow.used.mem = 0; + shadow.defd = shadow.used; + } + } + } + + if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC) + && NONJUMP_INSN_P (i) + && GET_CODE (PATTERN (i)) != USE + && GET_CODE (PATTERN (i)) != CLOBBER + && get_attr_trap (i) == TRAP_YES) + { + if (optimize && !trap_pending) + summarize_insn (PATTERN (i), &shadow, 0); + trap_pending = 1; + } + } +} + +/* Alpha can only issue instruction groups simultaneously if they are + suitably aligned. This is very processor-specific. */ +/* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe + that are marked "fake". These instructions do not exist on that target, + but it is possible to see these insns with deranged combinations of + command-line options, such as "-mtune=ev4 -mmax". Instead of aborting, + choose a result at random. */ + +enum alphaev4_pipe { + EV4_STOP = 0, + EV4_IB0 = 1, + EV4_IB1 = 2, + EV4_IBX = 4 +}; + +enum alphaev5_pipe { + EV5_STOP = 0, + EV5_NONE = 1, + EV5_E01 = 2, + EV5_E0 = 4, + EV5_E1 = 8, + EV5_FAM = 16, + EV5_FA = 32, + EV5_FM = 64 +}; + +static enum alphaev4_pipe +alphaev4_insn_pipe (rtx insn) +{ + if (recog_memoized (insn) < 0) + return EV4_STOP; + if (get_attr_length (insn) != 4) + return EV4_STOP; + + switch (get_attr_type (insn)) + { + case TYPE_ILD: + case TYPE_LDSYM: + case TYPE_FLD: + case TYPE_LD_L: + return EV4_IBX; + + case TYPE_IADD: + case TYPE_ILOG: + case TYPE_ICMOV: + case TYPE_ICMP: + case TYPE_FST: + case TYPE_SHIFT: + case TYPE_IMUL: + case TYPE_FBR: + case TYPE_MVI: /* fake */ + return EV4_IB0; + + case TYPE_IST: + case TYPE_MISC: + case TYPE_IBR: + case TYPE_JSR: + case TYPE_CALLPAL: + case TYPE_FCPYS: + case TYPE_FCMOV: + case TYPE_FADD: + case TYPE_FDIV: + case TYPE_FMUL: + case TYPE_ST_C: + case TYPE_MB: + case TYPE_FSQRT: /* fake */ + case TYPE_FTOI: /* fake */ + case TYPE_ITOF: /* fake */ + return EV4_IB1; + + default: + gcc_unreachable (); + } +} + +static enum alphaev5_pipe +alphaev5_insn_pipe (rtx insn) +{ + if (recog_memoized (insn) < 0) + return EV5_STOP; + if (get_attr_length (insn) != 4) + return EV5_STOP; + + switch (get_attr_type (insn)) + { + case TYPE_ILD: + case TYPE_FLD: + case TYPE_LDSYM: + case TYPE_IADD: + case TYPE_ILOG: + case TYPE_ICMOV: + case TYPE_ICMP: + return EV5_E01; + + case TYPE_IST: + case TYPE_FST: + case TYPE_SHIFT: + case TYPE_IMUL: + case TYPE_MISC: + case TYPE_MVI: + case TYPE_LD_L: + case TYPE_ST_C: + case TYPE_MB: + case TYPE_FTOI: /* fake */ + case TYPE_ITOF: /* fake */ + return EV5_E0; + + case TYPE_IBR: + case TYPE_JSR: + case TYPE_CALLPAL: + return EV5_E1; + + case TYPE_FCPYS: + return EV5_FAM; + + case TYPE_FBR: + case TYPE_FCMOV: + case TYPE_FADD: + case TYPE_FDIV: + case TYPE_FSQRT: /* fake */ + return EV5_FA; + + case TYPE_FMUL: + return EV5_FM; + + default: + gcc_unreachable (); + } +} + +/* IN_USE is a mask of the slots currently filled within the insn group. + The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then + the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1. + + LEN is, of course, the length of the group in bytes. */ + +static rtx +alphaev4_next_group (rtx insn, int *pin_use, int *plen) +{ + int len, in_use; + + len = in_use = 0; + + if (! INSN_P (insn) + || GET_CODE (PATTERN (insn)) == CLOBBER + || GET_CODE (PATTERN (insn)) == USE) + goto next_and_done; + + while (1) + { + enum alphaev4_pipe pipe; + + pipe = alphaev4_insn_pipe (insn); + switch (pipe) + { + case EV4_STOP: + /* Force complex instructions to start new groups. */ + if (in_use) + goto done; + + /* If this is a completely unrecognized insn, it's an asm. + We don't know how long it is, so record length as -1 to + signal a needed realignment. */ + if (recog_memoized (insn) < 0) + len = -1; + else + len = get_attr_length (insn); + goto next_and_done; + + case EV4_IBX: + if (in_use & EV4_IB0) + { + if (in_use & EV4_IB1) + goto done; + in_use |= EV4_IB1; + } + else + in_use |= EV4_IB0 | EV4_IBX; + break; + + case EV4_IB0: + if (in_use & EV4_IB0) + { + if (!(in_use & EV4_IBX) || (in_use & EV4_IB1)) + goto done; + in_use |= EV4_IB1; + } + in_use |= EV4_IB0; + break; + + case EV4_IB1: + if (in_use & EV4_IB1) + goto done; + in_use |= EV4_IB1; + break; + + default: + gcc_unreachable (); + } + len += 4; + + /* Haifa doesn't do well scheduling branches. */ + if (JUMP_P (insn)) + goto next_and_done; + + next: + insn = next_nonnote_insn (insn); + + if (!insn || ! INSN_P (insn)) + goto done; + + /* Let Haifa tell us where it thinks insn group boundaries are. */ + if (GET_MODE (insn) == TImode) + goto done; + + if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE) + goto next; + } + + next_and_done: + insn = next_nonnote_insn (insn); + + done: + *plen = len; + *pin_use = in_use; + return insn; +} + +/* IN_USE is a mask of the slots currently filled within the insn group. + The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then + the insn in EV5_E0 can be swapped by the hardware into EV5_E1. + + LEN is, of course, the length of the group in bytes. */ + +static rtx +alphaev5_next_group (rtx insn, int *pin_use, int *plen) +{ + int len, in_use; + + len = in_use = 0; + + if (! INSN_P (insn) + || GET_CODE (PATTERN (insn)) == CLOBBER + || GET_CODE (PATTERN (insn)) == USE) + goto next_and_done; + + while (1) + { + enum alphaev5_pipe pipe; + + pipe = alphaev5_insn_pipe (insn); + switch (pipe) + { + case EV5_STOP: + /* Force complex instructions to start new groups. */ + if (in_use) + goto done; + + /* If this is a completely unrecognized insn, it's an asm. + We don't know how long it is, so record length as -1 to + signal a needed realignment. */ + if (recog_memoized (insn) < 0) + len = -1; + else + len = get_attr_length (insn); + goto next_and_done; + + /* ??? Most of the places below, we would like to assert never + happen, as it would indicate an error either in Haifa, or + in the scheduling description. Unfortunately, Haifa never + schedules the last instruction of the BB, so we don't have + an accurate TI bit to go off. */ + case EV5_E01: + if (in_use & EV5_E0) + { + if (in_use & EV5_E1) + goto done; + in_use |= EV5_E1; + } + else + in_use |= EV5_E0 | EV5_E01; + break; + + case EV5_E0: + if (in_use & EV5_E0) + { + if (!(in_use & EV5_E01) || (in_use & EV5_E1)) + goto done; + in_use |= EV5_E1; + } + in_use |= EV5_E0; + break; + + case EV5_E1: + if (in_use & EV5_E1) + goto done; + in_use |= EV5_E1; + break; + + case EV5_FAM: + if (in_use & EV5_FA) + { + if (in_use & EV5_FM) + goto done; + in_use |= EV5_FM; + } + else + in_use |= EV5_FA | EV5_FAM; + break; + + case EV5_FA: + if (in_use & EV5_FA) + goto done; + in_use |= EV5_FA; + break; + + case EV5_FM: + if (in_use & EV5_FM) + goto done; + in_use |= EV5_FM; + break; + + case EV5_NONE: + break; + + default: + gcc_unreachable (); + } + len += 4; + + /* Haifa doesn't do well scheduling branches. */ + /* ??? If this is predicted not-taken, slotting continues, except + that no more IBR, FBR, or JSR insns may be slotted. */ + if (JUMP_P (insn)) + goto next_and_done; + + next: + insn = next_nonnote_insn (insn); + + if (!insn || ! INSN_P (insn)) + goto done; + + /* Let Haifa tell us where it thinks insn group boundaries are. */ + if (GET_MODE (insn) == TImode) + goto done; + + if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE) + goto next; + } + + next_and_done: + insn = next_nonnote_insn (insn); + + done: + *plen = len; + *pin_use = in_use; + return insn; +} + +static rtx +alphaev4_next_nop (int *pin_use) +{ + int in_use = *pin_use; + rtx nop; + + if (!(in_use & EV4_IB0)) + { + in_use |= EV4_IB0; + nop = gen_nop (); + } + else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX) + { + in_use |= EV4_IB1; + nop = gen_nop (); + } + else if (TARGET_FP && !(in_use & EV4_IB1)) + { + in_use |= EV4_IB1; + nop = gen_fnop (); + } + else + nop = gen_unop (); + + *pin_use = in_use; + return nop; +} + +static rtx +alphaev5_next_nop (int *pin_use) +{ + int in_use = *pin_use; + rtx nop; + + if (!(in_use & EV5_E1)) + { + in_use |= EV5_E1; + nop = gen_nop (); + } + else if (TARGET_FP && !(in_use & EV5_FA)) + { + in_use |= EV5_FA; + nop = gen_fnop (); + } + else if (TARGET_FP && !(in_use & EV5_FM)) + { + in_use |= EV5_FM; + nop = gen_fnop (); + } + else + nop = gen_unop (); + + *pin_use = in_use; + return nop; +} + +/* The instruction group alignment main loop. */ + +static void +alpha_align_insns (unsigned int max_align, + rtx (*next_group) (rtx, int *, int *), + rtx (*next_nop) (int *)) +{ + /* ALIGN is the known alignment for the insn group. */ + unsigned int align; + /* OFS is the offset of the current insn in the insn group. */ + int ofs; + int prev_in_use, in_use, len, ldgp; + rtx i, next; + + /* Let shorten branches care for assigning alignments to code labels. */ + shorten_branches (get_insns ()); + + if (align_functions < 4) + align = 4; + else if ((unsigned int) align_functions < max_align) + align = align_functions; + else + align = max_align; + + ofs = prev_in_use = 0; + i = get_insns (); + if (NOTE_P (i)) + i = next_nonnote_insn (i); + + ldgp = alpha_function_needs_gp ? 8 : 0; + + while (i) + { + next = (*next_group) (i, &in_use, &len); + + /* When we see a label, resync alignment etc. */ + if (LABEL_P (i)) + { + unsigned int new_align = 1 << label_to_alignment (i); + + if (new_align >= align) + { + align = new_align < max_align ? new_align : max_align; + ofs = 0; + } + + else if (ofs & (new_align-1)) + ofs = (ofs | (new_align-1)) + 1; + gcc_assert (!len); + } + + /* Handle complex instructions special. */ + else if (in_use == 0) + { + /* Asms will have length < 0. This is a signal that we have + lost alignment knowledge. Assume, however, that the asm + will not mis-align instructions. */ + if (len < 0) + { + ofs = 0; + align = 4; + len = 0; + } + } + + /* If the known alignment is smaller than the recognized insn group, + realign the output. */ + else if ((int) align < len) + { + unsigned int new_log_align = len > 8 ? 4 : 3; + rtx prev, where; + + where = prev = prev_nonnote_insn (i); + if (!where || !LABEL_P (where)) + where = i; + + /* Can't realign between a call and its gp reload. */ + if (! (TARGET_EXPLICIT_RELOCS + && prev && CALL_P (prev))) + { + emit_insn_before (gen_realign (GEN_INT (new_log_align)), where); + align = 1 << new_log_align; + ofs = 0; + } + } + + /* We may not insert padding inside the initial ldgp sequence. */ + else if (ldgp > 0) + ldgp -= len; + + /* If the group won't fit in the same INT16 as the previous, + we need to add padding to keep the group together. Rather + than simply leaving the insn filling to the assembler, we + can make use of the knowledge of what sorts of instructions + were issued in the previous group to make sure that all of + the added nops are really free. */ + else if (ofs + len > (int) align) + { + int nop_count = (align - ofs) / 4; + rtx where; + + /* Insert nops before labels, branches, and calls to truly merge + the execution of the nops with the previous instruction group. */ + where = prev_nonnote_insn (i); + if (where) + { + if (LABEL_P (where)) + { + rtx where2 = prev_nonnote_insn (where); + if (where2 && JUMP_P (where2)) + where = where2; + } + else if (NONJUMP_INSN_P (where)) + where = i; + } + else + where = i; + + do + emit_insn_before ((*next_nop)(&prev_in_use), where); + while (--nop_count); + ofs = 0; + } + + ofs = (ofs + len) & (align - 1); + prev_in_use = in_use; + i = next; + } +} + +/* Insert an unop between sibcall or noreturn function call and GP load. */ + +static void +alpha_pad_function_end (void) +{ + rtx insn, next; + + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + { + if (!CALL_P (insn) + || !(SIBLING_CALL_P (insn) + || find_reg_note (insn, REG_NORETURN, NULL_RTX))) + continue; + + /* Make sure we do not split a call and its corresponding + CALL_ARG_LOCATION note. */ + next = NEXT_INSN (insn); + if (next == NULL) + continue; + if (BARRIER_P (next)) + { + next = NEXT_INSN (next); + if (next == NULL) + continue; + } + if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION) + insn = next; + + next = next_active_insn (insn); + if (next) + { + rtx pat = PATTERN (next); + + if (GET_CODE (pat) == SET + && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE + && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1) + emit_insn_after (gen_unop (), insn); + } + } +} + +/* Machine dependent reorg pass. */ + +static void +alpha_reorg (void) +{ + /* Workaround for a linker error that triggers when an exception + handler immediatelly follows a sibcall or a noreturn function. + +In the sibcall case: + + The instruction stream from an object file: + + 1d8: 00 00 fb 6b jmp (t12) + 1dc: 00 00 ba 27 ldah gp,0(ra) + 1e0: 00 00 bd 23 lda gp,0(gp) + 1e4: 00 00 7d a7 ldq t12,0(gp) + 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec> + + was converted in the final link pass to: + + 12003aa88: 67 fa ff c3 br 120039428 <...> + 12003aa8c: 00 00 fe 2f unop + 12003aa90: 00 00 fe 2f unop + 12003aa94: 48 83 7d a7 ldq t12,-31928(gp) + 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec> + +And in the noreturn case: + + The instruction stream from an object file: + + 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58> + 58: 00 00 ba 27 ldah gp,0(ra) + 5c: 00 00 bd 23 lda gp,0(gp) + 60: 00 00 7d a7 ldq t12,0(gp) + 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68> + + was converted in the final link pass to: + + fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8> + fdb28: 00 00 fe 2f unop + fdb2c: 00 00 fe 2f unop + fdb30: 30 82 7d a7 ldq t12,-32208(gp) + fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68> + + GP load instructions were wrongly cleared by the linker relaxation + pass. This workaround prevents removal of GP loads by inserting + an unop instruction between a sibcall or noreturn function call and + exception handler prologue. */ + + if (current_function_has_exception_handlers ()) + alpha_pad_function_end (); + + if (alpha_tp != ALPHA_TP_PROG || flag_exceptions) + alpha_handle_trap_shadows (); + + /* Due to the number of extra trapb insns, don't bother fixing up + alignment when trap precision is instruction. Moreover, we can + only do our job when sched2 is run. */ + if (optimize && !optimize_size + && alpha_tp != ALPHA_TP_INSN + && flag_schedule_insns_after_reload) + { + if (alpha_tune == PROCESSOR_EV4) + alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop); + else if (alpha_tune == PROCESSOR_EV5) + alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop); + } +} + +static void +alpha_file_start (void) +{ + default_file_start (); + + fputs ("\t.set noreorder\n", asm_out_file); + fputs ("\t.set volatile\n", asm_out_file); + if (TARGET_ABI_OSF) + fputs ("\t.set noat\n", asm_out_file); + if (TARGET_EXPLICIT_RELOCS) + fputs ("\t.set nomacro\n", asm_out_file); + if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX) + { + const char *arch; + + if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX) + arch = "ev6"; + else if (TARGET_MAX) + arch = "pca56"; + else if (TARGET_BWX) + arch = "ev56"; + else if (alpha_cpu == PROCESSOR_EV5) + arch = "ev5"; + else + arch = "ev4"; + + fprintf (asm_out_file, "\t.arch %s\n", arch); + } +} + +/* Since we don't have a .dynbss section, we should not allow global + relocations in the .rodata section. */ + +static int +alpha_elf_reloc_rw_mask (void) +{ + return flag_pic ? 3 : 2; +} + +/* Return a section for X. The only special thing we do here is to + honor small data. */ + +static section * +alpha_elf_select_rtx_section (enum machine_mode mode, rtx x, + unsigned HOST_WIDE_INT align) +{ + if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value) + /* ??? Consider using mergeable sdata sections. */ + return sdata_section; + else + return default_elf_select_rtx_section (mode, x, align); +} + +static unsigned int +alpha_elf_section_type_flags (tree decl, const char *name, int reloc) +{ + unsigned int flags = 0; + + if (strcmp (name, ".sdata") == 0 + || strncmp (name, ".sdata.", 7) == 0 + || strncmp (name, ".gnu.linkonce.s.", 16) == 0 + || strcmp (name, ".sbss") == 0 + || strncmp (name, ".sbss.", 6) == 0 + || strncmp (name, ".gnu.linkonce.sb.", 17) == 0) + flags = SECTION_SMALL; + + flags |= default_section_type_flags (decl, name, reloc); + return flags; +} + +/* Structure to collect function names for final output in link section. */ +/* Note that items marked with GTY can't be ifdef'ed out. */ + +enum reloc_kind +{ + KIND_LINKAGE, + KIND_CODEADDR +}; + +struct GTY(()) alpha_links +{ + rtx func; + rtx linkage; + enum reloc_kind rkind; +}; + +#if TARGET_ABI_OPEN_VMS + +/* Return the VMS argument type corresponding to MODE. */ + +enum avms_arg_type +alpha_arg_type (enum machine_mode mode) +{ + switch (mode) + { + case SFmode: + return TARGET_FLOAT_VAX ? FF : FS; + case DFmode: + return TARGET_FLOAT_VAX ? FD : FT; + default: + return I64; + } +} + +/* Return an rtx for an integer representing the VMS Argument Information + register value. */ + +rtx +alpha_arg_info_reg_val (CUMULATIVE_ARGS cum) +{ + unsigned HOST_WIDE_INT regval = cum.num_args; + int i; + + for (i = 0; i < 6; i++) + regval |= ((int) cum.atypes[i]) << (i * 3 + 8); + + return GEN_INT (regval); +} + + +/* Return a SYMBOL_REF representing the reference to the .linkage entry + of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if + this is the reference to the linkage pointer value, 0 if this is the + reference to the function entry value. RFLAG is 1 if this a reduced + reference (code address only), 0 if this is a full reference. */ + +rtx +alpha_use_linkage (rtx func, bool lflag, bool rflag) +{ + struct alpha_links *al = NULL; + const char *name = XSTR (func, 0); + + if (cfun->machine->links) + { + splay_tree_node lnode; + + /* Is this name already defined? */ + lnode = splay_tree_lookup (cfun->machine->links, (splay_tree_key) name); + if (lnode) + al = (struct alpha_links *) lnode->value; + } + else + cfun->machine->links = splay_tree_new_ggc + ((splay_tree_compare_fn) strcmp, + ggc_alloc_splay_tree_str_alpha_links_splay_tree_s, + ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s); + + if (al == NULL) + { + size_t buf_len; + char *linksym; + tree id; + + if (name[0] == '*') + name++; + + /* Follow transparent alias, as this is used for CRTL translations. */ + id = maybe_get_identifier (name); + if (id) + { + while (IDENTIFIER_TRANSPARENT_ALIAS (id)) + id = TREE_CHAIN (id); + name = IDENTIFIER_POINTER (id); + } + + buf_len = strlen (name) + 8 + 9; + linksym = (char *) alloca (buf_len); + snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name); + + al = ggc_alloc_alpha_links (); + al->func = func; + al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym)); + + splay_tree_insert (cfun->machine->links, + (splay_tree_key) ggc_strdup (name), + (splay_tree_value) al); + } + + al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE; + + if (lflag) + return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8)); + else + return al->linkage; +} + +static int +alpha_write_one_linkage (splay_tree_node node, void *data) +{ + const char *const name = (const char *) node->key; + struct alpha_links *link = (struct alpha_links *) node->value; + FILE *stream = (FILE *) data; + + ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0)); + if (link->rkind == KIND_CODEADDR) + { + /* External and used, request code address. */ + fprintf (stream, "\t.code_address "); + } + else + { + if (!SYMBOL_REF_EXTERNAL_P (link->func) + && SYMBOL_REF_LOCAL_P (link->func)) + { + /* Locally defined, build linkage pair. */ + fprintf (stream, "\t.quad %s..en\n", name); + fprintf (stream, "\t.quad "); + } + else + { + /* External, request linkage pair. */ + fprintf (stream, "\t.linkage "); + } + } + assemble_name (stream, name); + fputs ("\n", stream); + + return 0; +} + +static void +alpha_write_linkage (FILE *stream, const char *funname) +{ + fprintf (stream, "\t.link\n"); + fprintf (stream, "\t.align 3\n"); + in_section = NULL; + +#ifdef TARGET_VMS_CRASH_DEBUG + fputs ("\t.name ", stream); + assemble_name (stream, funname); + fputs ("..na\n", stream); +#endif + + ASM_OUTPUT_LABEL (stream, funname); + fprintf (stream, "\t.pdesc "); + assemble_name (stream, funname); + fprintf (stream, "..en,%s\n", + alpha_procedure_type == PT_STACK ? "stack" + : alpha_procedure_type == PT_REGISTER ? "reg" : "null"); + + if (cfun->machine->links) + { + splay_tree_foreach (cfun->machine->links, alpha_write_one_linkage, stream); + /* splay_tree_delete (func->links); */ + } +} + +/* Switch to an arbitrary section NAME with attributes as specified + by FLAGS. ALIGN specifies any known alignment requirements for + the section; 0 if the default should be used. */ + +static void +vms_asm_named_section (const char *name, unsigned int flags, + tree decl ATTRIBUTE_UNUSED) +{ + fputc ('\n', asm_out_file); + fprintf (asm_out_file, ".section\t%s", name); + + if (flags & SECTION_DEBUG) + fprintf (asm_out_file, ",NOWRT"); + + fputc ('\n', asm_out_file); +} + +/* Record an element in the table of global constructors. SYMBOL is + a SYMBOL_REF of the function to be called; PRIORITY is a number + between 0 and MAX_INIT_PRIORITY. + + Differs from default_ctors_section_asm_out_constructor in that the + width of the .ctors entry is always 64 bits, rather than the 32 bits + used by a normal pointer. */ + +static void +vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED) +{ + switch_to_section (ctors_section); + assemble_align (BITS_PER_WORD); + assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1); +} + +static void +vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED) +{ + switch_to_section (dtors_section); + assemble_align (BITS_PER_WORD); + assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1); +} +#else +rtx +alpha_use_linkage (rtx func ATTRIBUTE_UNUSED, + bool lflag ATTRIBUTE_UNUSED, + bool rflag ATTRIBUTE_UNUSED) +{ + return NULL_RTX; +} + +#endif /* TARGET_ABI_OPEN_VMS */ + +static void +alpha_init_libfuncs (void) +{ + if (TARGET_ABI_OPEN_VMS) + { + /* Use the VMS runtime library functions for division and + remainder. */ + set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I"); + set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L"); + set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI"); + set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL"); + set_optab_libfunc (smod_optab, SImode, "OTS$REM_I"); + set_optab_libfunc (smod_optab, DImode, "OTS$REM_L"); + set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI"); + set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL"); + abort_libfunc = init_one_libfunc ("decc$abort"); + memcmp_libfunc = init_one_libfunc ("decc$memcmp"); +#ifdef MEM_LIBFUNCS_INIT + MEM_LIBFUNCS_INIT; +#endif + } +} + +/* On the Alpha, we use this to disable the floating-point registers + when they don't exist. */ + +static void +alpha_conditional_register_usage (void) +{ + int i; + if (! TARGET_FPREGS) + for (i = 32; i < 63; i++) + fixed_regs[i] = call_used_regs[i] = 1; +} + +/* Canonicalize a comparison from one we don't have to one we do have. */ + +static void +alpha_canonicalize_comparison (int *code, rtx *op0, rtx *op1, + bool op0_preserve_value) +{ + if (!op0_preserve_value + && (*code == GE || *code == GT || *code == GEU || *code == GTU) + && (REG_P (*op1) || *op1 == const0_rtx)) + { + rtx tem = *op0; + *op0 = *op1; + *op1 = tem; + *code = (int)swap_condition ((enum rtx_code)*code); + } + + if ((*code == LT || *code == LTU) + && CONST_INT_P (*op1) && INTVAL (*op1) == 256) + { + *code = *code == LT ? LE : LEU; + *op1 = GEN_INT (255); + } +} + +/* Initialize the GCC target structure. */ +#if TARGET_ABI_OPEN_VMS +# undef TARGET_ATTRIBUTE_TABLE +# define TARGET_ATTRIBUTE_TABLE vms_attribute_table +# undef TARGET_CAN_ELIMINATE +# define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate +#endif + +#undef TARGET_IN_SMALL_DATA_P +#define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p + +#undef TARGET_ASM_ALIGNED_HI_OP +#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t" +#undef TARGET_ASM_ALIGNED_DI_OP +#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t" + +/* Default unaligned ops are provided for ELF systems. To get unaligned + data for non-ELF systems, we have to turn off auto alignment. */ +#if TARGET_ABI_OPEN_VMS +#undef TARGET_ASM_UNALIGNED_HI_OP +#define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t" +#undef TARGET_ASM_UNALIGNED_SI_OP +#define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t" +#undef TARGET_ASM_UNALIGNED_DI_OP +#define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t" +#endif + +#undef TARGET_ASM_RELOC_RW_MASK +#define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask +#undef TARGET_ASM_SELECT_RTX_SECTION +#define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section +#undef TARGET_SECTION_TYPE_FLAGS +#define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags + +#undef TARGET_ASM_FUNCTION_END_PROLOGUE +#define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue + +#undef TARGET_INIT_LIBFUNCS +#define TARGET_INIT_LIBFUNCS alpha_init_libfuncs + +#undef TARGET_LEGITIMIZE_ADDRESS +#define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address +#undef TARGET_MODE_DEPENDENT_ADDRESS_P +#define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p + +#undef TARGET_ASM_FILE_START +#define TARGET_ASM_FILE_START alpha_file_start + +#undef TARGET_SCHED_ADJUST_COST +#define TARGET_SCHED_ADJUST_COST alpha_adjust_cost +#undef TARGET_SCHED_ISSUE_RATE +#define TARGET_SCHED_ISSUE_RATE alpha_issue_rate +#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD +#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ + alpha_multipass_dfa_lookahead + +#undef TARGET_HAVE_TLS +#define TARGET_HAVE_TLS HAVE_AS_TLS + +#undef TARGET_BUILTIN_DECL +#define TARGET_BUILTIN_DECL alpha_builtin_decl +#undef TARGET_INIT_BUILTINS +#define TARGET_INIT_BUILTINS alpha_init_builtins +#undef TARGET_EXPAND_BUILTIN +#define TARGET_EXPAND_BUILTIN alpha_expand_builtin +#undef TARGET_FOLD_BUILTIN +#define TARGET_FOLD_BUILTIN alpha_fold_builtin + +#undef TARGET_FUNCTION_OK_FOR_SIBCALL +#define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall +#undef TARGET_CANNOT_COPY_INSN_P +#define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p +#undef TARGET_LEGITIMATE_CONSTANT_P +#define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p +#undef TARGET_CANNOT_FORCE_CONST_MEM +#define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem + +#if TARGET_ABI_OSF +#undef TARGET_ASM_OUTPUT_MI_THUNK +#define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true +#undef TARGET_STDARG_OPTIMIZE_HOOK +#define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook +#endif + +/* Use 16-bits anchor. */ +#undef TARGET_MIN_ANCHOR_OFFSET +#define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1 +#undef TARGET_MAX_ANCHOR_OFFSET +#define TARGET_MAX_ANCHOR_OFFSET 0x7fff +#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P +#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true + +#undef TARGET_RTX_COSTS +#define TARGET_RTX_COSTS alpha_rtx_costs +#undef TARGET_ADDRESS_COST +#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0 + +#undef TARGET_MACHINE_DEPENDENT_REORG +#define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg + +#undef TARGET_PROMOTE_FUNCTION_MODE +#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote +#undef TARGET_PROMOTE_PROTOTYPES +#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false +#undef TARGET_RETURN_IN_MEMORY +#define TARGET_RETURN_IN_MEMORY alpha_return_in_memory +#undef TARGET_PASS_BY_REFERENCE +#define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference +#undef TARGET_SETUP_INCOMING_VARARGS +#define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs +#undef TARGET_STRICT_ARGUMENT_NAMING +#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true +#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED +#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true +#undef TARGET_SPLIT_COMPLEX_ARG +#define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg +#undef TARGET_GIMPLIFY_VA_ARG_EXPR +#define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg +#undef TARGET_ARG_PARTIAL_BYTES +#define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes +#undef TARGET_FUNCTION_ARG +#define TARGET_FUNCTION_ARG alpha_function_arg +#undef TARGET_FUNCTION_ARG_ADVANCE +#define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance +#undef TARGET_TRAMPOLINE_INIT +#define TARGET_TRAMPOLINE_INIT alpha_trampoline_init + +#undef TARGET_INSTANTIATE_DECLS +#define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls + +#undef TARGET_SECONDARY_RELOAD +#define TARGET_SECONDARY_RELOAD alpha_secondary_reload + +#undef TARGET_SCALAR_MODE_SUPPORTED_P +#define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p +#undef TARGET_VECTOR_MODE_SUPPORTED_P +#define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p + +#undef TARGET_BUILD_BUILTIN_VA_LIST +#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list + +#undef TARGET_EXPAND_BUILTIN_VA_START +#define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start + +/* The Alpha architecture does not require sequential consistency. See + http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html + for an example of how it can be violated in practice. */ +#undef TARGET_RELAXED_ORDERING +#define TARGET_RELAXED_ORDERING true + +#undef TARGET_OPTION_OVERRIDE +#define TARGET_OPTION_OVERRIDE alpha_option_override + +#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING +#undef TARGET_MANGLE_TYPE +#define TARGET_MANGLE_TYPE alpha_mangle_type +#endif + +#undef TARGET_LEGITIMATE_ADDRESS_P +#define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p + +#undef TARGET_CONDITIONAL_REGISTER_USAGE +#define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage + +#undef TARGET_CANONICALIZE_COMPARISON +#define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison + +struct gcc_target targetm = TARGET_INITIALIZER; + + +#include "gt-alpha.h" diff --git a/gcc-4.9/gcc/config/alpha/alpha.h b/gcc-4.9/gcc/config/alpha/alpha.h new file mode 100644 index 000000000..0ff793f14 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/alpha.h @@ -0,0 +1,1074 @@ +/* Definitions of target machine for GNU compiler, for DEC Alpha. + Copyright (C) 1992-2014 Free Software Foundation, Inc. + Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +/* Target CPU builtins. */ +#define TARGET_CPU_CPP_BUILTINS() \ + do \ + { \ + builtin_define ("__alpha"); \ + builtin_define ("__alpha__"); \ + builtin_assert ("cpu=alpha"); \ + builtin_assert ("machine=alpha"); \ + if (TARGET_CIX) \ + { \ + builtin_define ("__alpha_cix__"); \ + builtin_assert ("cpu=cix"); \ + } \ + if (TARGET_FIX) \ + { \ + builtin_define ("__alpha_fix__"); \ + builtin_assert ("cpu=fix"); \ + } \ + if (TARGET_BWX) \ + { \ + builtin_define ("__alpha_bwx__"); \ + builtin_assert ("cpu=bwx"); \ + } \ + if (TARGET_MAX) \ + { \ + builtin_define ("__alpha_max__"); \ + builtin_assert ("cpu=max"); \ + } \ + if (alpha_cpu == PROCESSOR_EV6) \ + { \ + builtin_define ("__alpha_ev6__"); \ + builtin_assert ("cpu=ev6"); \ + } \ + else if (alpha_cpu == PROCESSOR_EV5) \ + { \ + builtin_define ("__alpha_ev5__"); \ + builtin_assert ("cpu=ev5"); \ + } \ + else /* Presumably ev4. */ \ + { \ + builtin_define ("__alpha_ev4__"); \ + builtin_assert ("cpu=ev4"); \ + } \ + if (TARGET_IEEE || TARGET_IEEE_WITH_INEXACT) \ + builtin_define ("_IEEE_FP"); \ + if (TARGET_IEEE_WITH_INEXACT) \ + builtin_define ("_IEEE_FP_INEXACT"); \ + if (TARGET_LONG_DOUBLE_128) \ + builtin_define ("__LONG_DOUBLE_128__"); \ + \ + /* Macros dependent on the C dialect. */ \ + SUBTARGET_LANGUAGE_CPP_BUILTINS(); \ +} while (0) + +#ifndef SUBTARGET_LANGUAGE_CPP_BUILTINS +#define SUBTARGET_LANGUAGE_CPP_BUILTINS() \ + do \ + { \ + if (preprocessing_asm_p ()) \ + builtin_define_std ("LANGUAGE_ASSEMBLY"); \ + else if (c_dialect_cxx ()) \ + { \ + builtin_define ("__LANGUAGE_C_PLUS_PLUS"); \ + builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); \ + } \ + else \ + builtin_define_std ("LANGUAGE_C"); \ + if (c_dialect_objc ()) \ + { \ + builtin_define ("__LANGUAGE_OBJECTIVE_C"); \ + builtin_define ("__LANGUAGE_OBJECTIVE_C__"); \ + } \ + } \ + while (0) +#endif + +/* Run-time compilation parameters selecting different hardware subsets. */ + +/* Which processor to schedule for. The cpu attribute defines a list that + mirrors this list, so changes to alpha.md must be made at the same time. */ + +enum processor_type +{ + PROCESSOR_EV4, /* 2106[46]{a,} */ + PROCESSOR_EV5, /* 21164{a,pc,} */ + PROCESSOR_EV6, /* 21264 */ + PROCESSOR_MAX +}; + +extern enum processor_type alpha_cpu; +extern enum processor_type alpha_tune; + +enum alpha_trap_precision +{ + ALPHA_TP_PROG, /* No precision (default). */ + ALPHA_TP_FUNC, /* Trap contained within originating function. */ + ALPHA_TP_INSN /* Instruction accuracy and code is resumption safe. */ +}; + +enum alpha_fp_rounding_mode +{ + ALPHA_FPRM_NORM, /* Normal rounding mode. */ + ALPHA_FPRM_MINF, /* Round towards minus-infinity. */ + ALPHA_FPRM_CHOP, /* Chopped rounding mode (towards 0). */ + ALPHA_FPRM_DYN /* Dynamic rounding mode. */ +}; + +enum alpha_fp_trap_mode +{ + ALPHA_FPTM_N, /* Normal trap mode. */ + ALPHA_FPTM_U, /* Underflow traps enabled. */ + ALPHA_FPTM_SU, /* Software completion, w/underflow traps */ + ALPHA_FPTM_SUI /* Software completion, w/underflow & inexact traps */ +}; + +extern enum alpha_trap_precision alpha_tp; +extern enum alpha_fp_rounding_mode alpha_fprm; +extern enum alpha_fp_trap_mode alpha_fptm; + +/* Invert the easy way to make options work. */ +#define TARGET_FP (!TARGET_SOFT_FP) + +/* These are for target os support and cannot be changed at runtime. */ +#define TARGET_ABI_OPEN_VMS 0 +#define TARGET_ABI_OSF (!TARGET_ABI_OPEN_VMS) + +#ifndef TARGET_CAN_FAULT_IN_PROLOGUE +#define TARGET_CAN_FAULT_IN_PROLOGUE 0 +#endif +#ifndef TARGET_HAS_XFLOATING_LIBS +#define TARGET_HAS_XFLOATING_LIBS TARGET_LONG_DOUBLE_128 +#endif +#ifndef TARGET_PROFILING_NEEDS_GP +#define TARGET_PROFILING_NEEDS_GP 0 +#endif +#ifndef TARGET_FIXUP_EV5_PREFETCH +#define TARGET_FIXUP_EV5_PREFETCH 0 +#endif +#ifndef HAVE_AS_TLS +#define HAVE_AS_TLS 0 +#endif + +#define TARGET_DEFAULT MASK_FPREGS + +#ifndef TARGET_CPU_DEFAULT +#define TARGET_CPU_DEFAULT 0 +#endif + +#ifndef TARGET_DEFAULT_EXPLICIT_RELOCS +#ifdef HAVE_AS_EXPLICIT_RELOCS +#define TARGET_DEFAULT_EXPLICIT_RELOCS MASK_EXPLICIT_RELOCS +#define TARGET_SUPPORT_ARCH 1 +#else +#define TARGET_DEFAULT_EXPLICIT_RELOCS 0 +#endif +#endif + +#ifndef TARGET_SUPPORT_ARCH +#define TARGET_SUPPORT_ARCH 0 +#endif + +/* Support for a compile-time default CPU, et cetera. The rules are: + --with-cpu is ignored if -mcpu is specified. + --with-tune is ignored if -mtune is specified. */ +#define OPTION_DEFAULT_SPECS \ + {"cpu", "%{!mcpu=*:-mcpu=%(VALUE)}" }, \ + {"tune", "%{!mtune=*:-mtune=%(VALUE)}" } + + +/* target machine storage layout */ + +/* Define the size of `int'. The default is the same as the word size. */ +#define INT_TYPE_SIZE 32 + +/* Define the size of `long long'. The default is the twice the word size. */ +#define LONG_LONG_TYPE_SIZE 64 + +/* The two floating-point formats we support are S-floating, which is + 4 bytes, and T-floating, which is 8 bytes. `float' is S and `double' + and `long double' are T. */ + +#define FLOAT_TYPE_SIZE 32 +#define DOUBLE_TYPE_SIZE 64 +#define LONG_DOUBLE_TYPE_SIZE (TARGET_LONG_DOUBLE_128 ? 128 : 64) + +/* Define this to set long double type size to use in libgcc2.c, which can + not depend on target_flags. */ +#ifdef __LONG_DOUBLE_128__ +#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 128 +#else +#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64 +#endif + +/* Work around target_flags dependency in ada/targtyps.c. */ +#define WIDEST_HARDWARE_FP_SIZE 64 + +#define WCHAR_TYPE "unsigned int" +#define WCHAR_TYPE_SIZE 32 + +/* Define this macro if it is advisable to hold scalars in registers + in a wider mode than that declared by the program. In such cases, + the value is constrained to be within the bounds of the declared + type, but kept valid in the wider mode. The signedness of the + extension may differ from that of the type. + + For Alpha, we always store objects in a full register. 32-bit integers + are always sign-extended, but smaller objects retain their signedness. + + Note that small vector types can get mapped onto integer modes at the + whim of not appearing in alpha-modes.def. We never promoted these + values before; don't do so now that we've trimmed the set of modes to + those actually implemented in the backend. */ + +#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \ + if (GET_MODE_CLASS (MODE) == MODE_INT \ + && (TYPE == NULL || TREE_CODE (TYPE) != VECTOR_TYPE) \ + && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \ + { \ + if ((MODE) == SImode) \ + (UNSIGNEDP) = 0; \ + (MODE) = DImode; \ + } + +/* Define this if most significant bit is lowest numbered + in instructions that operate on numbered bit-fields. + + There are no such instructions on the Alpha, but the documentation + is little endian. */ +#define BITS_BIG_ENDIAN 0 + +/* Define this if most significant byte of a word is the lowest numbered. + This is false on the Alpha. */ +#define BYTES_BIG_ENDIAN 0 + +/* Define this if most significant word of a multiword number is lowest + numbered. + + For Alpha we can decide arbitrarily since there are no machine instructions + for them. Might as well be consistent with bytes. */ +#define WORDS_BIG_ENDIAN 0 + +/* Width of a word, in units (bytes). */ +#define UNITS_PER_WORD 8 + +/* Width in bits of a pointer. + See also the macro `Pmode' defined below. */ +#define POINTER_SIZE 64 + +/* Allocation boundary (in *bits*) for storing arguments in argument list. */ +#define PARM_BOUNDARY 64 + +/* Boundary (in *bits*) on which stack pointer should be aligned. */ +#define STACK_BOUNDARY 128 + +/* Allocation boundary (in *bits*) for the code of a function. */ +#define FUNCTION_BOUNDARY 32 + +/* Alignment of field after `int : 0' in a structure. */ +#define EMPTY_FIELD_BOUNDARY 64 + +/* Every structure's size must be a multiple of this. */ +#define STRUCTURE_SIZE_BOUNDARY 8 + +/* A bit-field declared as `int' forces `int' alignment for the struct. */ +#undef PCC_BITFILED_TYPE_MATTERS +#define PCC_BITFIELD_TYPE_MATTERS 1 + +/* No data type wants to be aligned rounder than this. */ +#define BIGGEST_ALIGNMENT 128 + +/* For atomic access to objects, must have at least 32-bit alignment + unless the machine has byte operations. */ +#define MINIMUM_ATOMIC_ALIGNMENT ((unsigned int) (TARGET_BWX ? 8 : 32)) + +/* Align all constants and variables to at least a word boundary so + we can pick up pieces of them faster. */ +/* ??? Only if block-move stuff knows about different source/destination + alignment. */ +#if 0 +#define CONSTANT_ALIGNMENT(EXP, ALIGN) MAX ((ALIGN), BITS_PER_WORD) +#define DATA_ALIGNMENT(EXP, ALIGN) MAX ((ALIGN), BITS_PER_WORD) +#endif + +/* Set this nonzero if move instructions will actually fail to work + when given unaligned data. + + Since we get an error message when we do one, call them invalid. */ + +#define STRICT_ALIGNMENT 1 + +/* Set this nonzero if unaligned move instructions are extremely slow. + + On the Alpha, they trap. */ + +#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1 + +/* Standard register usage. */ + +/* Number of actual hardware registers. + The hardware registers are assigned numbers for the compiler + from 0 to just below FIRST_PSEUDO_REGISTER. + All registers that the compiler knows about must be given numbers, + even those that are not normally considered general registers. + + We define all 32 integer registers, even though $31 is always zero, + and all 32 floating-point registers, even though $f31 is also + always zero. We do not bother defining the FP status register and + there are no other registers. + + Since $31 is always zero, we will use register number 31 as the + argument pointer. It will never appear in the generated code + because we will always be eliminating it in favor of the stack + pointer or hardware frame pointer. + + Likewise, we use $f31 for the frame pointer, which will always + be eliminated in favor of the hardware frame pointer or the + stack pointer. */ + +#define FIRST_PSEUDO_REGISTER 64 + +/* 1 for registers that have pervasive standard uses + and are not available for the register allocator. */ + +#define FIXED_REGISTERS \ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, \ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } + +/* 1 for registers not available across function calls. + These must include the FIXED_REGISTERS and also any + registers that can be used without being saved. + The latter must include the registers where values are returned + and the register where structure-value addresses are passed. + Aside from that, you can include as many other registers as you like. */ +#define CALL_USED_REGISTERS \ + {1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, \ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, \ + 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } + +/* List the order in which to allocate registers. Each register must be + listed once, even those in FIXED_REGISTERS. */ + +#define REG_ALLOC_ORDER { \ + 1, 2, 3, 4, 5, 6, 7, 8, /* nonsaved integer registers */ \ + 22, 23, 24, 25, 28, /* likewise */ \ + 0, /* likewise, but return value */ \ + 21, 20, 19, 18, 17, 16, /* likewise, but input args */ \ + 27, /* likewise, but OSF procedure value */ \ + \ + 42, 43, 44, 45, 46, 47, /* nonsaved floating-point registers */ \ + 54, 55, 56, 57, 58, 59, /* likewise */ \ + 60, 61, 62, /* likewise */ \ + 32, 33, /* likewise, but return values */ \ + 53, 52, 51, 50, 49, 48, /* likewise, but input args */ \ + \ + 9, 10, 11, 12, 13, 14, /* saved integer registers */ \ + 26, /* return address */ \ + 15, /* hard frame pointer */ \ + \ + 34, 35, 36, 37, 38, 39, /* saved floating-point registers */ \ + 40, 41, /* likewise */ \ + \ + 29, 30, 31, 63 /* gp, sp, ap, sfp */ \ +} + +/* Return number of consecutive hard regs needed starting at reg REGNO + to hold something of mode MODE. + This is ordinarily the length in words of a value of mode MODE + but can be less for certain modes in special long registers. */ + +#define HARD_REGNO_NREGS(REGNO, MODE) \ + ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD) + +/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. + On Alpha, the integer registers can hold any mode. The floating-point + registers can hold 64-bit integers as well, but not smaller values. */ + +#define HARD_REGNO_MODE_OK(REGNO, MODE) \ + (IN_RANGE ((REGNO), 32, 62) \ + ? (MODE) == SFmode || (MODE) == DFmode || (MODE) == DImode \ + || (MODE) == SCmode || (MODE) == DCmode \ + : 1) + +/* A C expression that is nonzero if a value of mode + MODE1 is accessible in mode MODE2 without copying. + + This asymmetric test is true when MODE1 could be put + in an FP register but MODE2 could not. */ + +#define MODES_TIEABLE_P(MODE1, MODE2) \ + (HARD_REGNO_MODE_OK (32, (MODE1)) \ + ? HARD_REGNO_MODE_OK (32, (MODE2)) \ + : 1) + +/* Specify the registers used for certain standard purposes. + The values of these macros are register numbers. */ + +/* Alpha pc isn't overloaded on a register that the compiler knows about. */ +/* #define PC_REGNUM */ + +/* Register to use for pushing function arguments. */ +#define STACK_POINTER_REGNUM 30 + +/* Base register for access to local variables of the function. */ +#define HARD_FRAME_POINTER_REGNUM 15 + +/* Base register for access to arguments of the function. */ +#define ARG_POINTER_REGNUM 31 + +/* Base register for access to local variables of function. */ +#define FRAME_POINTER_REGNUM 63 + +/* Register in which static-chain is passed to a function. + + For the Alpha, this is based on an example; the calling sequence + doesn't seem to specify this. */ +#define STATIC_CHAIN_REGNUM 1 + +/* The register number of the register used to address a table of + static data addresses in memory. */ +#define PIC_OFFSET_TABLE_REGNUM 29 + +/* Define this macro if the register defined by `PIC_OFFSET_TABLE_REGNUM' + is clobbered by calls. */ +/* ??? It is and it isn't. It's required to be valid for a given + function when the function returns. It isn't clobbered by + current_file functions. Moreover, we do not expose the ldgp + until after reload, so we're probably safe. */ +/* #define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED */ + +/* Define the classes of registers for register constraints in the + machine description. Also define ranges of constants. + + One of the classes must always be named ALL_REGS and include all hard regs. + If there is more than one class, another class must be named NO_REGS + and contain no registers. + + The name GENERAL_REGS must be the name of a class (or an alias for + another name such as ALL_REGS). This is the class of registers + that is allowed by "g" or "r" in a register constraint. + Also, registers outside this class are allocated only when + instructions express preferences for them. + + The classes must be numbered in nondecreasing order; that is, + a larger-numbered class must never be contained completely + in a smaller-numbered class. + + For any two classes, it is very desirable that there be another + class that represents their union. */ + +enum reg_class { + NO_REGS, R0_REG, R24_REG, R25_REG, R27_REG, + GENERAL_REGS, FLOAT_REGS, ALL_REGS, + LIM_REG_CLASSES +}; + +#define N_REG_CLASSES (int) LIM_REG_CLASSES + +/* Give names of register classes as strings for dump file. */ + +#define REG_CLASS_NAMES \ + {"NO_REGS", "R0_REG", "R24_REG", "R25_REG", "R27_REG", \ + "GENERAL_REGS", "FLOAT_REGS", "ALL_REGS" } + +/* Define which registers fit in which classes. + This is an initializer for a vector of HARD_REG_SET + of length N_REG_CLASSES. */ + +#define REG_CLASS_CONTENTS \ +{ {0x00000000, 0x00000000}, /* NO_REGS */ \ + {0x00000001, 0x00000000}, /* R0_REG */ \ + {0x01000000, 0x00000000}, /* R24_REG */ \ + {0x02000000, 0x00000000}, /* R25_REG */ \ + {0x08000000, 0x00000000}, /* R27_REG */ \ + {0xffffffff, 0x80000000}, /* GENERAL_REGS */ \ + {0x00000000, 0x7fffffff}, /* FLOAT_REGS */ \ + {0xffffffff, 0xffffffff} } + +/* The same information, inverted: + Return the class number of the smallest class containing + reg number REGNO. This could be a conditional expression + or could index an array. */ + +#define REGNO_REG_CLASS(REGNO) \ + ((REGNO) == 0 ? R0_REG \ + : (REGNO) == 24 ? R24_REG \ + : (REGNO) == 25 ? R25_REG \ + : (REGNO) == 27 ? R27_REG \ + : IN_RANGE ((REGNO), 32, 62) ? FLOAT_REGS \ + : GENERAL_REGS) + +/* The class value for index registers, and the one for base regs. */ +#define INDEX_REG_CLASS NO_REGS +#define BASE_REG_CLASS GENERAL_REGS + +/* Given an rtx X being reloaded into a reg required to be + in class CLASS, return the class of reg to actually use. + In general this is just CLASS; but on some machines + in some cases it is preferable to use a more restrictive class. */ + +#define PREFERRED_RELOAD_CLASS alpha_preferred_reload_class + +/* If we are copying between general and FP registers, we need a memory + location unless the FIX extension is available. */ + +#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \ + (! TARGET_FIX && (((CLASS1) == FLOAT_REGS && (CLASS2) != FLOAT_REGS) \ + || ((CLASS2) == FLOAT_REGS && (CLASS1) != FLOAT_REGS))) + +/* Specify the mode to be used for memory when a secondary memory + location is needed. If MODE is floating-point, use it. Otherwise, + widen to a word like the default. This is needed because we always + store integers in FP registers in quadword format. This whole + area is very tricky! */ +#define SECONDARY_MEMORY_NEEDED_MODE(MODE) \ + (GET_MODE_CLASS (MODE) == MODE_FLOAT ? (MODE) \ + : GET_MODE_SIZE (MODE) >= 4 ? (MODE) \ + : mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (MODE), 0)) + +/* Return the class of registers that cannot change mode from FROM to TO. */ + +#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \ + (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \ + ? reg_classes_intersect_p (FLOAT_REGS, CLASS) : 0) + +/* Define the cost of moving between registers of various classes. Moving + between FLOAT_REGS and anything else except float regs is expensive. + In fact, we make it quite expensive because we really don't want to + do these moves unless it is clearly worth it. Optimizations may + reduce the impact of not being able to allocate a pseudo to a + hard register. */ + +#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \ + (((CLASS1) == FLOAT_REGS) == ((CLASS2) == FLOAT_REGS) ? 2 \ + : TARGET_FIX ? ((CLASS1) == FLOAT_REGS ? 6 : 8) \ + : 4+2*alpha_memory_latency) + +/* A C expressions returning the cost of moving data of MODE from a register to + or from memory. + + On the Alpha, bump this up a bit. */ + +extern int alpha_memory_latency; +#define MEMORY_MOVE_COST(MODE,CLASS,IN) (2*alpha_memory_latency) + +/* Provide the cost of a branch. Exact meaning under development. */ +#define BRANCH_COST(speed_p, predictable_p) 5 + +/* Stack layout; function entry, exit and calling. */ + +/* Define this if pushing a word on the stack + makes the stack pointer a smaller address. */ +#define STACK_GROWS_DOWNWARD + +/* Define this to nonzero if the nominal address of the stack frame + is at the high-address end of the local variables; + that is, each additional local variable allocated + goes at a more negative offset in the frame. */ +/* #define FRAME_GROWS_DOWNWARD 0 */ + +/* Offset within stack frame to start allocating local variables at. + If FRAME_GROWS_DOWNWARD, this is the offset to the END of the + first local allocated. Otherwise, it is the offset to the BEGINNING + of the first local allocated. */ + +#define STARTING_FRAME_OFFSET 0 + +/* If we generate an insn to push BYTES bytes, + this says how many the stack pointer really advances by. + On Alpha, don't define this because there are no push insns. */ +/* #define PUSH_ROUNDING(BYTES) */ + +/* Define this to be nonzero if stack checking is built into the ABI. */ +#define STACK_CHECK_BUILTIN 1 + +/* Define this if the maximum size of all the outgoing args is to be + accumulated and pushed during the prologue. The amount can be + found in the variable crtl->outgoing_args_size. */ +#define ACCUMULATE_OUTGOING_ARGS 1 + +/* Offset of first parameter from the argument pointer register value. */ + +#define FIRST_PARM_OFFSET(FNDECL) 0 + +/* Definitions for register eliminations. + + We have two registers that can be eliminated on the Alpha. First, the + frame pointer register can often be eliminated in favor of the stack + pointer register. Secondly, the argument pointer register can always be + eliminated; it is replaced with either the stack or frame pointer. */ + +/* This is an array of structures. Each structure initializes one pair + of eliminable registers. The "from" register number is given first, + followed by "to". Eliminations of the same "from" register are listed + in order of preference. */ + +#define ELIMINABLE_REGS \ +{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} + +/* Round up to a multiple of 16 bytes. */ +#define ALPHA_ROUND(X) (((X) + 15) & ~ 15) + +/* Define the offset between two registers, one to be eliminated, and the other + its replacement, at the start of a routine. */ +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ + ((OFFSET) = alpha_initial_elimination_offset(FROM, TO)) + +/* Define this if stack space is still allocated for a parameter passed + in a register. */ +/* #define REG_PARM_STACK_SPACE */ + +/* Define how to find the value returned by a function. + VALTYPE is the data type of the value (as a tree). + If the precise function being called is known, FUNC is its FUNCTION_DECL; + otherwise, FUNC is 0. + + On Alpha the value is found in $0 for integer functions and + $f0 for floating-point functions. */ + +#define FUNCTION_VALUE(VALTYPE, FUNC) \ + function_value (VALTYPE, FUNC, VOIDmode) + +/* Define how to find the value returned by a library function + assuming the value has mode MODE. */ + +#define LIBCALL_VALUE(MODE) \ + function_value (NULL, NULL, MODE) + +/* 1 if N is a possible register number for a function value + as seen by the caller. */ + +#define FUNCTION_VALUE_REGNO_P(N) \ + ((N) == 0 || (N) == 1 || (N) == 32 || (N) == 33) + +/* 1 if N is a possible register number for function argument passing. + On Alpha, these are $16-$21 and $f16-$f21. */ + +#define FUNCTION_ARG_REGNO_P(N) \ + (IN_RANGE ((N), 16, 21) || ((N) >= 16 + 32 && (N) <= 21 + 32)) + +/* Define a data type for recording info about an argument list + during the scan of that argument list. This data type should + hold all necessary information about the function itself + and about the args processed so far, enough to enable macros + such as FUNCTION_ARG to determine where the next arg should go. + + On Alpha, this is a single integer, which is a number of words + of arguments scanned so far. + Thus 6 or more means all following args should go on the stack. */ + +#define CUMULATIVE_ARGS int + +/* Initialize a variable CUM of type CUMULATIVE_ARGS + for a call to a function whose data type is FNTYPE. + For a library call, FNTYPE is 0. */ + +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \ + (CUM) = 0 + +/* Define intermediate macro to compute the size (in registers) of an argument + for the Alpha. */ + +#define ALPHA_ARG_SIZE(MODE, TYPE, NAMED) \ + ((MODE) == TFmode || (MODE) == TCmode ? 1 \ + : (((MODE) == BLKmode ? int_size_in_bytes (TYPE) : GET_MODE_SIZE (MODE)) \ + + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) + +/* Make (or fake) .linkage entry for function call. + IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */ + +/* This macro defines the start of an assembly comment. */ + +#define ASM_COMMENT_START " #" + +/* This macro produces the initial definition of a function. */ + +#undef ASM_DECLARE_FUNCTION_NAME +#define ASM_DECLARE_FUNCTION_NAME(FILE,NAME,DECL) \ + alpha_start_function(FILE,NAME,DECL); + +/* This macro closes up a function definition for the assembler. */ + +#undef ASM_DECLARE_FUNCTION_SIZE +#define ASM_DECLARE_FUNCTION_SIZE(FILE,NAME,DECL) \ + alpha_end_function(FILE,NAME,DECL) + +/* Output any profiling code before the prologue. */ + +#define PROFILE_BEFORE_PROLOGUE 1 + +/* Never use profile counters. */ + +#define NO_PROFILE_COUNTERS 1 + +/* Output assembler code to FILE to increment profiler label # LABELNO + for profiling a function entry. Under OSF/1, profiling is enabled + by simply passing -pg to the assembler and linker. */ + +#define FUNCTION_PROFILER(FILE, LABELNO) + +/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, + the stack pointer does not matter. The value is tested only in + functions that have frame pointers. + No definition is equivalent to always zero. */ + +#define EXIT_IGNORE_STACK 1 + +/* Define registers used by the epilogue and return instruction. */ + +#define EPILOGUE_USES(REGNO) ((REGNO) == 26) + +/* Length in units of the trampoline for entering a nested function. */ + +#define TRAMPOLINE_SIZE 32 + +/* The alignment of a trampoline, in bits. */ + +#define TRAMPOLINE_ALIGNMENT 64 + +/* A C expression whose value is RTL representing the value of the return + address for the frame COUNT steps up from the current frame. + FRAMEADDR is the frame pointer of the COUNT frame, or the frame pointer of + the COUNT-1 frame if RETURN_ADDR_IN_PREVIOUS_FRAME is defined. */ + +#define RETURN_ADDR_RTX alpha_return_addr + +/* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders + can use DWARF_ALT_FRAME_RETURN_COLUMN defined below. This is just the same + as the default definition in dwarf2out.c. */ +#undef DWARF_FRAME_REGNUM +#define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG) + +/* Before the prologue, RA lives in $26. */ +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, 26) +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (26) +#define DWARF_ALT_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (64) +#define DWARF_ZERO_REG 31 + +/* Describe how we implement __builtin_eh_return. */ +#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 16 : INVALID_REGNUM) +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 28) +#define EH_RETURN_HANDLER_RTX \ + gen_rtx_MEM (Pmode, plus_constant (Pmode, stack_pointer_rtx, \ + crtl->outgoing_args_size)) + +/* Addressing modes, and classification of registers for them. */ + +/* Macros to check register numbers against specific register classes. */ + +/* These assume that REGNO is a hard or pseudo reg number. + They give nonzero only if REGNO is a hard reg of the suitable class + or a pseudo reg currently allocated to a suitable hard reg. + Since they use reg_renumber, they are safe only once reg_renumber + has been allocated, which happens in reginfo.c during register + allocation. */ + +#define REGNO_OK_FOR_INDEX_P(REGNO) 0 +#define REGNO_OK_FOR_BASE_P(REGNO) \ +((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < 32 \ + || (REGNO) == 63 || reg_renumber[REGNO] == 63) + +/* Maximum number of registers that can appear in a valid memory address. */ +#define MAX_REGS_PER_ADDRESS 1 + +/* Recognize any constant value that is a valid address. For the Alpha, + there are only constants none since we want to use LDA to load any + symbolic addresses into registers. */ + +#define CONSTANT_ADDRESS_P(X) \ + (CONST_INT_P (X) \ + && (unsigned HOST_WIDE_INT) (INTVAL (X) + 0x8000) < 0x10000) + +/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx + and check its validity for a certain class. + We have two alternate definitions for each of them. + The usual definition accepts all pseudo regs; the other rejects + them unless they have been allocated suitable hard regs. + The symbol REG_OK_STRICT causes the latter definition to be used. + + Most source files want to accept pseudo regs in the hope that + they will get allocated to the class that the insn wants them to be in. + Source files for reload pass need to be strict. + After reload, it makes no difference, since pseudo regs have + been eliminated by then. */ + +/* Nonzero if X is a hard reg that can be used as an index + or if it is a pseudo reg. */ +#define REG_OK_FOR_INDEX_P(X) 0 + +/* Nonzero if X is a hard reg that can be used as a base reg + or if it is a pseudo reg. */ +#define NONSTRICT_REG_OK_FOR_BASE_P(X) \ + (REGNO (X) < 32 || REGNO (X) == 63 || REGNO (X) >= FIRST_PSEUDO_REGISTER) + +/* ??? Nonzero if X is the frame pointer, or some virtual register + that may eliminate to the frame pointer. These will be allowed to + have offsets greater than 32K. This is done because register + elimination offsets will change the hi/lo split, and if we split + before reload, we will require additional instructions. */ +#define NONSTRICT_REG_OK_FP_BASE_P(X) \ + (REGNO (X) == 31 || REGNO (X) == 63 \ + || (REGNO (X) >= FIRST_PSEUDO_REGISTER \ + && REGNO (X) < LAST_VIRTUAL_POINTER_REGISTER)) + +/* Nonzero if X is a hard reg that can be used as a base reg. */ +#define STRICT_REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X)) + +#ifdef REG_OK_STRICT +#define REG_OK_FOR_BASE_P(X) STRICT_REG_OK_FOR_BASE_P (X) +#else +#define REG_OK_FOR_BASE_P(X) NONSTRICT_REG_OK_FOR_BASE_P (X) +#endif + +/* Try a machine-dependent way of reloading an illegitimate address + operand. If we find one, push the reload and jump to WIN. This + macro is used in only one place: `find_reloads_address' in reload.c. */ + +#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_L,WIN) \ +do { \ + rtx new_x = alpha_legitimize_reload_address (X, MODE, OPNUM, TYPE, IND_L); \ + if (new_x) \ + { \ + X = new_x; \ + goto WIN; \ + } \ +} while (0) + + +/* Specify the machine mode that this machine uses + for the index in the tablejump instruction. */ +#define CASE_VECTOR_MODE SImode + +/* Define as C expression which evaluates to nonzero if the tablejump + instruction expects the table to contain offsets from the address of the + table. + + Do not define this if the table should contain absolute addresses. + On the Alpha, the table is really GP-relative, not relative to the PC + of the table, but we pretend that it is PC-relative; this should be OK, + but we should try to find some better way sometime. */ +#define CASE_VECTOR_PC_RELATIVE 1 + +/* Define this as 1 if `char' should by default be signed; else as 0. */ +#define DEFAULT_SIGNED_CHAR 1 + +/* Max number of bytes we can move to or from memory + in one reasonably fast instruction. */ + +#define MOVE_MAX 8 + +/* If a memory-to-memory move would take MOVE_RATIO or more simple + move-instruction pairs, we will do a movmem or libcall instead. + + Without byte/word accesses, we want no more than four instructions; + with, several single byte accesses are better. */ + +#define MOVE_RATIO(speed) (TARGET_BWX ? 7 : 2) + +/* Largest number of bytes of an object that can be placed in a register. + On the Alpha we have plenty of registers, so use TImode. */ +#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode) + +/* Nonzero if access to memory by bytes is no faster than for words. + Also nonzero if doing byte operations (specifically shifts) in registers + is undesirable. + + On the Alpha, we want to not use the byte operation and instead use + masking operations to access fields; these will save instructions. */ + +#define SLOW_BYTE_ACCESS 1 + +/* Define if operations between registers always perform the operation + on the full register even if a narrower mode is specified. */ +#define WORD_REGISTER_OPERATIONS + +/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD + will either zero-extend or sign-extend. The value of this macro should + be the code that says which one of the two operations is implicitly + done, UNKNOWN if none. */ +#define LOAD_EXTEND_OP(MODE) ((MODE) == SImode ? SIGN_EXTEND : ZERO_EXTEND) + +/* Define if loading short immediate values into registers sign extends. */ +#define SHORT_IMMEDIATES_SIGN_EXTEND + +/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits + is done just by pretending it is already truncated. */ +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1 + +/* The CIX ctlz and cttz instructions return 64 for zero. */ +#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 64, TARGET_CIX) +#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 64, TARGET_CIX) + +/* Define the value returned by a floating-point comparison instruction. */ + +#define FLOAT_STORE_FLAG_VALUE(MODE) \ + REAL_VALUE_ATOF ((TARGET_FLOAT_VAX ? "0.5" : "2.0"), (MODE)) + +/* Specify the machine mode that pointers have. + After generation of rtl, the compiler makes no further distinction + between pointers and any other objects of this machine mode. */ +#define Pmode DImode + +/* Mode of a function address in a call instruction (for indexing purposes). */ + +#define FUNCTION_MODE Pmode + +/* Define this if addresses of constant functions + shouldn't be put through pseudo regs where they can be cse'd. + Desirable on machines where ordinary constants are expensive + but a CALL with constant address is cheap. + + We define this on the Alpha so that gen_call and gen_call_value + get to see the SYMBOL_REF (for the hint field of the jsr). It will + then copy it into a register, thus actually letting the address be + cse'ed. */ + +#define NO_FUNCTION_CSE + +/* Define this to be nonzero if shift instructions ignore all but the low-order + few bits. */ +#define SHIFT_COUNT_TRUNCATED 1 + +/* Control the assembler format that we output. */ + +/* Output to assembler file text saying following lines + may contain character constants, extra white space, comments, etc. */ +#define ASM_APP_ON (TARGET_EXPLICIT_RELOCS ? "\t.set\tmacro\n" : "") + +/* Output to assembler file text saying following lines + no longer contain unusual constructs. */ +#define ASM_APP_OFF (TARGET_EXPLICIT_RELOCS ? "\t.set\tnomacro\n" : "") + +#define TEXT_SECTION_ASM_OP "\t.text" + +/* Output before writable data. */ + +#define DATA_SECTION_ASM_OP "\t.data" + +/* How to refer to registers in assembler output. + This sequence is indexed by compiler's hard-register-number (see above). */ + +#define REGISTER_NAMES \ +{"$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \ + "$9", "$10", "$11", "$12", "$13", "$14", "$15", \ + "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23", \ + "$24", "$25", "$26", "$27", "$28", "$29", "$30", "AP", \ + "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", \ + "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", \ + "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",\ + "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "FP"} + +/* Strip name encoding when emitting labels. */ + +#define ASM_OUTPUT_LABELREF(STREAM, NAME) \ +do { \ + const char *name_ = NAME; \ + if (*name_ == '@' || *name_ == '%') \ + name_ += 2; \ + if (*name_ == '*') \ + name_++; \ + else \ + fputs (user_label_prefix, STREAM); \ + fputs (name_, STREAM); \ +} while (0) + +/* Globalizing directive for a label. */ +#define GLOBAL_ASM_OP "\t.globl " + +/* Use dollar signs rather than periods in special g++ assembler names. */ + +#undef NO_DOLLAR_IN_LABEL + +/* This is how to store into the string LABEL + the symbol_ref name of an internal numbered label where + PREFIX is the class of label and NUM is the number within the class. + This is suitable for output with `assemble_name'. */ + +#undef ASM_GENERATE_INTERNAL_LABEL +#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \ + sprintf ((LABEL), "*$%s%ld", (PREFIX), (long)(NUM)) + +/* This is how to output an element of a case-vector that is relative. */ + +#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \ + fprintf (FILE, "\t.gprel32 $L%d\n", (VALUE)) + + +/* Print operand X (an rtx) in assembler syntax to file FILE. + CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified. + For `%' followed by punctuation, CODE is the punctuation and X is null. */ + +#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE) + +/* Determine which codes are valid without a following integer. These must + not be alphabetic. + + ~ Generates the name of the current function. + + / Generates the instruction suffix. The TRAP_SUFFIX and ROUND_SUFFIX + attributes are examined to determine what is appropriate. + + , Generates single precision suffix for floating point + instructions (s for IEEE, f for VAX) + + - Generates double precision suffix for floating point + instructions (t for IEEE, g for VAX) + */ + +#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \ + ((CODE) == '/' || (CODE) == ',' || (CODE) == '-' || (CODE) == '~' \ + || (CODE) == '#' || (CODE) == '*' || (CODE) == '&') + +/* Print a memory address as an operand to reference that memory location. */ + +#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \ + print_operand_address((FILE), (ADDR)) + +/* If we use NM, pass -g to it so it only lists globals. */ +#define NM_FLAGS "-pg" + +/* Definitions for debugging. */ + +/* Correct the offset of automatic variables and arguments. Note that + the Alpha debug format wants all automatic variables and arguments + to be in terms of two different offsets from the virtual frame pointer, + which is the stack pointer before any adjustment in the function. + The offset for the argument pointer is fixed for the native compiler, + it is either zero (for the no arguments case) or large enough to hold + all argument registers. + The offset for the auto pointer is the fourth argument to the .frame + directive (local_offset). + To stay compatible with the native tools we use the same offsets + from the virtual frame pointer and adjust the debugger arg/auto offsets + accordingly. These debugger offsets are set up in output_prolog. */ + +extern long alpha_arg_offset; +extern long alpha_auto_offset; +#define DEBUGGER_AUTO_OFFSET(X) \ + ((GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0) + alpha_auto_offset) +#define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET + alpha_arg_offset) + +#define ASM_OUTPUT_SOURCE_FILENAME(STREAM, NAME) \ + alpha_output_filename (STREAM, NAME) + +/* By default, turn on GDB extensions. */ +#define DEFAULT_GDB_EXTENSIONS 1 + +/* The system headers under Alpha systems are generally C++-aware. */ +#define NO_IMPLICIT_EXTERN_C diff --git a/gcc-4.9/gcc/config/alpha/alpha.md b/gcc-4.9/gcc/config/alpha/alpha.md new file mode 100644 index 000000000..795b4df3f --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/alpha.md @@ -0,0 +1,6113 @@ +;; Machine description for DEC Alpha for GNU C compiler +;; Copyright (C) 1992-2014 Free Software Foundation, Inc. +;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. + +;;- See file "rtl.def" for documentation on define_insn, match_*, et. al. + +;; Uses of UNSPEC in this file: + +(define_c_enum "unspec" [ + UNSPEC_XFLT_COMPARE + UNSPEC_ARG_HOME + UNSPEC_LDGP1 + UNSPEC_INSXH + UNSPEC_MSKXH + UNSPEC_CVTQL + UNSPEC_CVTLQ + UNSPEC_LDGP2 + UNSPEC_LITERAL + UNSPEC_LITUSE + UNSPEC_SIBCALL + UNSPEC_SYMBOL + + ;; TLS Support + UNSPEC_TLSGD_CALL + UNSPEC_TLSLDM_CALL + UNSPEC_TLSGD + UNSPEC_TLSLDM + UNSPEC_DTPREL + UNSPEC_TPREL + UNSPEC_TP + + ;; Builtins + UNSPEC_CMPBGE + UNSPEC_ZAP + UNSPEC_AMASK + UNSPEC_IMPLVER + UNSPEC_PERR + UNSPEC_COPYSIGN + + ;; Atomic operations + UNSPEC_MB + UNSPEC_ATOMIC + UNSPEC_CMPXCHG + UNSPEC_XCHG +]) + +;; UNSPEC_VOLATILE: + +(define_c_enum "unspecv" [ + UNSPECV_IMB + UNSPECV_BLOCKAGE + UNSPECV_SETJMPR ; builtin_setjmp_receiver + UNSPECV_LONGJMP ; builtin_longjmp + UNSPECV_TRAPB + UNSPECV_PSPL ; prologue_stack_probe_loop + UNSPECV_REALIGN + UNSPECV_EHR ; exception_receiver + UNSPECV_MCOUNT + UNSPECV_FORCE_MOV + UNSPECV_LDGP1 + UNSPECV_PLDGP2 ; prologue ldgp + UNSPECV_SET_TP + UNSPECV_RPCC + UNSPECV_SETJMPR_ER ; builtin_setjmp_receiver fragment + UNSPECV_LL ; load-locked + UNSPECV_SC ; store-conditional + UNSPECV_CMPXCHG +]) + +;; On non-BWX targets, CQImode must be handled the similarly to HImode +;; when generating reloads. +(define_mode_iterator RELOAD12 [QI HI CQI]) +(define_mode_attr reloadmode [(QI "qi") (HI "hi") (CQI "hi")]) + +;; Other mode iterators +(define_mode_iterator IMODE [QI HI SI DI]) +(define_mode_iterator I12MODE [QI HI]) +(define_mode_iterator I124MODE [QI HI SI]) +(define_mode_iterator I24MODE [HI SI]) +(define_mode_iterator I248MODE [HI SI DI]) +(define_mode_iterator I48MODE [SI DI]) + +(define_mode_attr DWI [(SI "DI") (DI "TI")]) +(define_mode_attr modesuffix [(QI "b") (HI "w") (SI "l") (DI "q") + (V8QI "b8") (V4HI "w4") + (SF "%,") (DF "%-")]) +(define_mode_attr vecmodesuffix [(QI "b8") (HI "w4")]) + +(define_code_iterator any_maxmin [smax smin umax umin]) + +(define_code_attr maxmin [(smax "maxs") (smin "mins") + (umax "maxu") (umin "minu")]) + +;; Where necessary, the suffixes _le and _be are used to distinguish between +;; little-endian and big-endian patterns. +;; +;; Note that the Unicos/Mk assembler does not support the following +;; opcodes: mov, fmov, nop, fnop, unop. + +;; Processor type -- this attribute must exactly match the processor_type +;; enumeration in alpha.h. + +(define_attr "tune" "ev4,ev5,ev6" + (const (symbol_ref "((enum attr_tune) alpha_tune)"))) + +;; Define an insn type attribute. This is used in function unit delay +;; computations, among other purposes. For the most part, we use the names +;; defined in the EV4 documentation, but add a few that we have to know about +;; separately. + +(define_attr "type" + "ild,fld,ldsym,ist,fst,ibr,callpal,fbr,jsr,iadd,ilog,shift,icmov,fcmov, + icmp,imul,fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,mb,ld_l,st_c, + multi,none" + (const_string "iadd")) + +;; Describe a user's asm statement. +(define_asm_attributes + [(set_attr "type" "multi")]) + +;; Define the operand size an insn operates on. Used primarily by mul +;; and div operations that have size dependent timings. + +(define_attr "opsize" "si,di,udi" + (const_string "di")) + +;; The TRAP attribute marks instructions that may generate traps +;; (which are imprecise and may need a trapb if software completion +;; is desired). + +(define_attr "trap" "no,yes" + (const_string "no")) + +;; The ROUND_SUFFIX attribute marks which instructions require a +;; rounding-mode suffix. The value NONE indicates no suffix, +;; the value NORMAL indicates a suffix controlled by alpha_fprm. + +(define_attr "round_suffix" "none,normal,c" + (const_string "none")) + +;; The TRAP_SUFFIX attribute marks instructions requiring a trap-mode suffix: +;; NONE no suffix +;; SU accepts only /su (cmpt et al) +;; SUI accepts only /sui (cvtqt and cvtqs) +;; V_SV accepts /v and /sv (cvtql only) +;; V_SV_SVI accepts /v, /sv and /svi (cvttq only) +;; U_SU_SUI accepts /u, /su and /sui (most fp instructions) +;; +;; The actual suffix emitted is controlled by alpha_fptm. + +(define_attr "trap_suffix" "none,su,sui,v_sv,v_sv_svi,u_su_sui" + (const_string "none")) + +;; The length of an instruction sequence in bytes. + +(define_attr "length" "" + (const_int 4)) + +;; The USEGP attribute marks instructions that have relocations that use +;; the GP. + +(define_attr "usegp" "no,yes" + (cond [(eq_attr "type" "ldsym,jsr") + (const_string "yes") + (eq_attr "type" "ild,fld,ist,fst") + (symbol_ref "((enum attr_usegp) alpha_find_lo_sum_using_gp (insn))") + ] + (const_string "no"))) + +;; The CANNOT_COPY attribute marks instructions with relocations that +;; cannot easily be duplicated. This includes insns with gpdisp relocs +;; since they have to stay in 1-1 correspondence with one another. This +;; also includes jsr insns, since they must stay in correspondence with +;; the immediately following gpdisp instructions. + +(define_attr "cannot_copy" "false,true" + (const_string "false")) + +;; Used to control the "enabled" attribute on a per-instruction basis. +;; For convenience, conflate ABI issues re loading of addresses with +;; an "isa". +(define_attr "isa" "base,bwx,max,fix,cix,vms,ner,er" + (const_string "base")) + +(define_attr "enabled" "" + (cond [(eq_attr "isa" "bwx") (symbol_ref "TARGET_BWX") + (eq_attr "isa" "max") (symbol_ref "TARGET_MAX") + (eq_attr "isa" "fix") (symbol_ref "TARGET_FIX") + (eq_attr "isa" "cix") (symbol_ref "TARGET_CIX") + (eq_attr "isa" "vms") (symbol_ref "TARGET_ABI_OPEN_VMS") + (eq_attr "isa" "ner") (symbol_ref "!TARGET_EXPLICIT_RELOCS") + (eq_attr "isa" "er") (symbol_ref "TARGET_EXPLICIT_RELOCS") + ] + (const_int 1))) + +;; Include scheduling descriptions. + +(include "ev4.md") +(include "ev5.md") +(include "ev6.md") + + +;; Operand and operator predicates and constraints + +(include "predicates.md") +(include "constraints.md") + + +;; First define the arithmetic insns. Note that the 32-bit forms also +;; sign-extend. + +;; Handle 32-64 bit extension from memory to a floating point register +;; specially, since this occurs frequently in int->double conversions. +;; +;; Note that while we must retain the =f case in the insn for reload's +;; benefit, it should be eliminated after reload, so we should never emit +;; code for that case. But we don't reject the possibility. + +(define_expand "extendsidi2" + [(set (match_operand:DI 0 "register_operand") + (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]) + +(define_insn "*cvtlq" + [(set (match_operand:DI 0 "register_operand" "=f") + (unspec:DI [(match_operand:SF 1 "reg_or_0_operand" "fG")] + UNSPEC_CVTLQ))] + "" + "cvtlq %1,%0" + [(set_attr "type" "fadd")]) + +(define_insn "*extendsidi2_1" + [(set (match_operand:DI 0 "register_operand" "=r,r,!*f") + (sign_extend:DI + (match_operand:SI 1 "nonimmediate_operand" "r,m,m")))] + "" + "@ + addl $31,%1,%0 + ldl %0,%1 + lds %0,%1\;cvtlq %0,%0" + [(set_attr "type" "iadd,ild,fld") + (set_attr "length" "*,*,8")]) + +(define_split + [(set (match_operand:DI 0 "hard_fp_register_operand") + (sign_extend:DI (match_operand:SI 1 "memory_operand")))] + "reload_completed" + [(set (match_dup 2) (match_dup 1)) + (set (match_dup 0) (unspec:DI [(match_dup 2)] UNSPEC_CVTLQ))] +{ + operands[1] = adjust_address (operands[1], SFmode, 0); + operands[2] = gen_rtx_REG (SFmode, REGNO (operands[0])); +}) + +;; Optimize sign-extension of SImode loads. This shows up in the wake of +;; reload when converting fp->int. + +(define_peephole2 + [(set (match_operand:SI 0 "hard_int_register_operand") + (match_operand:SI 1 "memory_operand")) + (set (match_operand:DI 2 "hard_int_register_operand") + (sign_extend:DI (match_dup 0)))] + "true_regnum (operands[0]) == true_regnum (operands[2]) + || peep2_reg_dead_p (2, operands[0])" + [(set (match_dup 2) + (sign_extend:DI (match_dup 1)))]) + +(define_insn "addsi3" + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") + (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ,rJ,rJ") + (match_operand:SI 2 "add_operand" "rI,O,K,L")))] + "" + "@ + addl %r1,%2,%0 + subl %r1,%n2,%0 + lda %0,%2(%r1) + ldah %0,%h2(%r1)") + +(define_split + [(set (match_operand:SI 0 "register_operand") + (plus:SI (match_operand:SI 1 "register_operand") + (match_operand:SI 2 "const_int_operand")))] + "! add_operand (operands[2], SImode)" + [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 3))) + (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 4)))] +{ + HOST_WIDE_INT val = INTVAL (operands[2]); + HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000); + HOST_WIDE_INT rest = val - low; + + operands[3] = GEN_INT (rest); + operands[4] = GEN_INT (low); +}) + +(define_insn "*addsi_se" + [(set (match_operand:DI 0 "register_operand" "=r,r") + (sign_extend:DI + (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ") + (match_operand:SI 2 "sext_add_operand" "rI,O"))))] + "" + "@ + addl %r1,%2,%0 + subl %r1,%n2,%0") + +(define_insn "*addsi_se2" + [(set (match_operand:DI 0 "register_operand" "=r,r") + (sign_extend:DI + (subreg:SI (plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ") + (match_operand:DI 2 "sext_add_operand" "rI,O")) + 0)))] + "" + "@ + addl %r1,%2,%0 + subl %r1,%n2,%0") + +(define_split + [(set (match_operand:DI 0 "register_operand") + (sign_extend:DI + (plus:SI (match_operand:SI 1 "reg_not_elim_operand") + (match_operand:SI 2 "const_int_operand")))) + (clobber (match_operand:SI 3 "reg_not_elim_operand"))] + "! sext_add_operand (operands[2], SImode) && INTVAL (operands[2]) > 0 + && INTVAL (operands[2]) % 4 == 0" + [(set (match_dup 3) (match_dup 4)) + (set (match_dup 0) (sign_extend:DI (plus:SI (mult:SI (match_dup 3) + (match_dup 5)) + (match_dup 1))))] +{ + HOST_WIDE_INT val = INTVAL (operands[2]) / 4; + int mult = 4; + + if (val % 2 == 0) + val /= 2, mult = 8; + + operands[4] = GEN_INT (val); + operands[5] = GEN_INT (mult); +}) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (sign_extend:DI + (plus:SI (match_operator:SI 1 "comparison_operator" + [(match_operand 2) + (match_operand 3)]) + (match_operand:SI 4 "add_operand")))) + (clobber (match_operand:DI 5 "register_operand"))] + "" + [(set (match_dup 5) (match_dup 6)) + (set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 7) (match_dup 4))))] +{ + operands[6] = gen_rtx_fmt_ee (GET_CODE (operands[1]), DImode, + operands[2], operands[3]); + operands[7] = gen_lowpart (SImode, operands[5]); +}) + +(define_expand "adddi3" + [(set (match_operand:DI 0 "register_operand") + (plus:DI (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "add_operand")))]) + +(define_insn "*adddi_er_lo16_dtp" + [(set (match_operand:DI 0 "register_operand" "=r") + (lo_sum:DI (match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "dtp16_symbolic_operand")))] + "HAVE_AS_TLS" + "lda %0,%2(%1)\t\t!dtprel") + +(define_insn "*adddi_er_hi32_dtp" + [(set (match_operand:DI 0 "register_operand" "=r") + (plus:DI (match_operand:DI 1 "register_operand" "r") + (high:DI (match_operand:DI 2 "dtp32_symbolic_operand"))))] + "HAVE_AS_TLS" + "ldah %0,%2(%1)\t\t!dtprelhi") + +(define_insn "*adddi_er_lo32_dtp" + [(set (match_operand:DI 0 "register_operand" "=r") + (lo_sum:DI (match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "dtp32_symbolic_operand")))] + "HAVE_AS_TLS" + "lda %0,%2(%1)\t\t!dtprello") + +(define_insn "*adddi_er_lo16_tp" + [(set (match_operand:DI 0 "register_operand" "=r") + (lo_sum:DI (match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "tp16_symbolic_operand")))] + "HAVE_AS_TLS" + "lda %0,%2(%1)\t\t!tprel") + +(define_insn "*adddi_er_hi32_tp" + [(set (match_operand:DI 0 "register_operand" "=r") + (plus:DI (match_operand:DI 1 "register_operand" "r") + (high:DI (match_operand:DI 2 "tp32_symbolic_operand"))))] + "HAVE_AS_TLS" + "ldah %0,%2(%1)\t\t!tprelhi") + +(define_insn "*adddi_er_lo32_tp" + [(set (match_operand:DI 0 "register_operand" "=r") + (lo_sum:DI (match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "tp32_symbolic_operand")))] + "HAVE_AS_TLS" + "lda %0,%2(%1)\t\t!tprello") + +(define_insn "*adddi_er_high_l" + [(set (match_operand:DI 0 "register_operand" "=r") + (plus:DI (match_operand:DI 1 "register_operand" "r") + (high:DI (match_operand:DI 2 "local_symbolic_operand"))))] + "TARGET_EXPLICIT_RELOCS && reload_completed" + "ldah %0,%2(%1)\t\t!gprelhigh" + [(set_attr "usegp" "yes")]) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (high:DI (match_operand:DI 1 "local_symbolic_operand")))] + "TARGET_EXPLICIT_RELOCS && reload_completed" + [(set (match_dup 0) + (plus:DI (match_dup 2) (high:DI (match_dup 1))))] + "operands[2] = pic_offset_table_rtx;") + +;; We used to expend quite a lot of effort choosing addq/subq/lda. +;; With complications like +;; +;; The NT stack unwind code can't handle a subq to adjust the stack +;; (that's a bug, but not one we can do anything about). As of NT4.0 SP3, +;; the exception handling code will loop if a subq is used and an +;; exception occurs. +;; +;; The 19980616 change to emit prologues as RTL also confused some +;; versions of GDB, which also interprets prologues. This has been +;; fixed as of GDB 4.18, but it does not harm to unconditionally +;; use lda here. +;; +;; and the fact that the three insns schedule exactly the same, it's +;; just not worth the effort. + +(define_insn "*adddi_internal" + [(set (match_operand:DI 0 "register_operand" "=r,r,r") + (plus:DI (match_operand:DI 1 "register_operand" "%r,r,r") + (match_operand:DI 2 "add_operand" "r,K,L")))] + "" + "@ + addq %1,%2,%0 + lda %0,%2(%1) + ldah %0,%h2(%1)") + +;; ??? Allow large constants when basing off the frame pointer or some +;; virtual register that may eliminate to the frame pointer. This is +;; done because register elimination offsets will change the hi/lo split, +;; and if we split before reload, we will require additional instructions. + +(define_insn "*adddi_fp_hack" + [(set (match_operand:DI 0 "register_operand" "=r,r,r") + (plus:DI (match_operand:DI 1 "reg_no_subreg_operand" "r,r,r") + (match_operand:DI 2 "const_int_operand" "K,L,n")))] + "NONSTRICT_REG_OK_FP_BASE_P (operands[1]) + && INTVAL (operands[2]) >= 0 + /* This is the largest constant an lda+ldah pair can add, minus + an upper bound on the displacement between SP and AP during + register elimination. See INITIAL_ELIMINATION_OFFSET. */ + && INTVAL (operands[2]) + < (0x7fff8000 + - FIRST_PSEUDO_REGISTER * UNITS_PER_WORD + - ALPHA_ROUND(crtl->outgoing_args_size) + - (ALPHA_ROUND (get_frame_size () + + max_reg_num () * UNITS_PER_WORD + + crtl->args.pretend_args_size) + - crtl->args.pretend_args_size))" + "@ + lda %0,%2(%1) + ldah %0,%h2(%1) + #") + +;; Don't do this if we are adjusting SP since we don't want to do it +;; in two steps. Don't split FP sources for the reason listed above. +(define_split + [(set (match_operand:DI 0 "register_operand") + (plus:DI (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "const_int_operand")))] + "! add_operand (operands[2], DImode) + && operands[0] != stack_pointer_rtx + && operands[1] != frame_pointer_rtx + && operands[1] != arg_pointer_rtx" + [(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 3))) + (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))] +{ + HOST_WIDE_INT val = INTVAL (operands[2]); + HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000); + HOST_WIDE_INT rest = val - low; + rtx rest_rtx = GEN_INT (rest); + + operands[4] = GEN_INT (low); + if (satisfies_constraint_L (rest_rtx)) + operands[3] = rest_rtx; + else if (can_create_pseudo_p ()) + { + operands[3] = gen_reg_rtx (DImode); + emit_move_insn (operands[3], operands[2]); + emit_insn (gen_adddi3 (operands[0], operands[1], operands[3])); + DONE; + } + else + FAIL; +}) + +(define_insn "*sadd<modesuffix>" + [(set (match_operand:I48MODE 0 "register_operand" "=r,r") + (plus:I48MODE + (mult:I48MODE (match_operand:I48MODE 1 "reg_not_elim_operand" "r,r") + (match_operand:I48MODE 2 "const48_operand" "I,I")) + (match_operand:I48MODE 3 "sext_add_operand" "rI,O")))] + "" + "@ + s%2add<modesuffix> %1,%3,%0 + s%2sub<modesuffix> %1,%n3,%0") + +(define_insn "*saddl_se" + [(set (match_operand:DI 0 "register_operand" "=r,r") + (sign_extend:DI + (plus:SI (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r,r") + (match_operand:SI 2 "const48_operand" "I,I")) + (match_operand:SI 3 "sext_add_operand" "rI,O"))))] + "" + "@ + s%2addl %1,%3,%0 + s%2subl %1,%n3,%0") + +(define_split + [(set (match_operand:DI 0 "register_operand") + (sign_extend:DI + (plus:SI (mult:SI (match_operator:SI 1 "comparison_operator" + [(match_operand 2) + (match_operand 3)]) + (match_operand:SI 4 "const48_operand")) + (match_operand:SI 5 "sext_add_operand")))) + (clobber (match_operand:DI 6 "reg_not_elim_operand"))] + "" + [(set (match_dup 6) (match_dup 7)) + (set (match_dup 0) + (sign_extend:DI (plus:SI (mult:SI (match_dup 8) (match_dup 4)) + (match_dup 5))))] +{ + operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[1]), DImode, + operands[2], operands[3]); + operands[8] = gen_lowpart (SImode, operands[6]); +}) + +(define_insn "addv<mode>3" + [(set (match_operand:I48MODE 0 "register_operand" "=r,r") + (plus:I48MODE (match_operand:I48MODE 1 "reg_or_0_operand" "%rJ,rJ") + (match_operand:I48MODE 2 "sext_add_operand" "rI,O"))) + (trap_if (ne (plus:<DWI> (sign_extend:<DWI> (match_dup 1)) + (sign_extend:<DWI> (match_dup 2))) + (sign_extend:<DWI> (plus:I48MODE (match_dup 1) + (match_dup 2)))) + (const_int 0))] + "" + "@ + add<modesuffix>v %r1,%2,%0 + sub<modesuffix>v %r1,%n2,%0") + +(define_insn "neg<mode>2" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (neg:I48MODE (match_operand:I48MODE 1 "reg_or_8bit_operand" "rI")))] + "" + "sub<modesuffix> $31,%1,%0") + +(define_insn "*negsi_se" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI (neg:SI + (match_operand:SI 1 "reg_or_8bit_operand" "rI"))))] + "" + "subl $31,%1,%0") + +(define_insn "negv<mode>2" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (neg:I48MODE (match_operand:I48MODE 1 "register_operand" "r"))) + (trap_if (ne (neg:<DWI> (sign_extend:<DWI> (match_dup 1))) + (sign_extend:<DWI> (neg:I48MODE (match_dup 1)))) + (const_int 0))] + "" + "sub<modesuffix>v $31,%1,%0") + +(define_insn "sub<mode>3" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (minus:I48MODE (match_operand:I48MODE 1 "reg_or_0_operand" "rJ") + (match_operand:I48MODE 2 "reg_or_8bit_operand" "rI")))] + "" + "sub<modesuffix> %r1,%2,%0") + +(define_insn "*subsi_se" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI + (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ") + (match_operand:SI 2 "reg_or_8bit_operand" "rI"))))] + "" + "subl %r1,%2,%0") + +(define_insn "*subsi_se2" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI + (subreg:SI (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (match_operand:DI 2 "reg_or_8bit_operand" "rI")) + 0)))] + "" + "subl %r1,%2,%0") + +(define_insn "*ssub<modesuffix>" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (minus:I48MODE + (mult:I48MODE (match_operand:I48MODE 1 "reg_not_elim_operand" "r") + (match_operand:I48MODE 2 "const48_operand" "I")) + (match_operand:I48MODE 3 "reg_or_8bit_operand" "rI")))] + "" + "s%2sub<modesuffix> %1,%3,%0") + +(define_insn "*ssubl_se" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI + (minus:SI (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r") + (match_operand:SI 2 "const48_operand" "I")) + (match_operand:SI 3 "reg_or_8bit_operand" "rI"))))] + "" + "s%2subl %1,%3,%0") + +(define_insn "subv<mode>3" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (minus:I48MODE (match_operand:I48MODE 1 "reg_or_0_operand" "rJ") + (match_operand:I48MODE 2 "reg_or_8bit_operand" "rI"))) + (trap_if (ne (minus:<DWI> (sign_extend:<DWI> (match_dup 1)) + (sign_extend:<DWI> (match_dup 2))) + (sign_extend:<DWI> (minus:I48MODE (match_dup 1) + (match_dup 2)))) + (const_int 0))] + "" + "sub<modesuffix>v %r1,%2,%0") + +(define_insn "mul<mode>3" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (mult:I48MODE (match_operand:I48MODE 1 "reg_or_0_operand" "%rJ") + (match_operand:I48MODE 2 "reg_or_8bit_operand" "rI")))] + "" + "mul<modesuffix> %r1,%2,%0" + [(set_attr "type" "imul") + (set_attr "opsize" "<mode>")]) + +(define_insn "*mulsi_se" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI + (mult:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ") + (match_operand:SI 2 "reg_or_8bit_operand" "rI"))))] + "" + "mull %r1,%2,%0" + [(set_attr "type" "imul") + (set_attr "opsize" "si")]) + +(define_insn "mulv<mode>3" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (mult:I48MODE (match_operand:I48MODE 1 "reg_or_0_operand" "%rJ") + (match_operand:I48MODE 2 "reg_or_8bit_operand" "rI"))) + (trap_if (ne (mult:<DWI> (sign_extend:<DWI> (match_dup 1)) + (sign_extend:<DWI> (match_dup 2))) + (sign_extend:<DWI> (mult:I48MODE (match_dup 1) + (match_dup 2)))) + (const_int 0))] + "" + "mul<modesuffix>v %r1,%2,%0" + [(set_attr "type" "imul") + (set_attr "opsize" "<mode>")]) + +(define_expand "umuldi3_highpart" + [(set (match_operand:DI 0 "register_operand") + (truncate:DI + (lshiftrt:TI + (mult:TI (zero_extend:TI + (match_operand:DI 1 "register_operand")) + (match_operand:DI 2 "reg_or_8bit_operand")) + (const_int 64))))] + "" +{ + if (REG_P (operands[2])) + operands[2] = gen_rtx_ZERO_EXTEND (TImode, operands[2]); +}) + +(define_insn "*umuldi3_highpart_reg" + [(set (match_operand:DI 0 "register_operand" "=r") + (truncate:DI + (lshiftrt:TI + (mult:TI (zero_extend:TI + (match_operand:DI 1 "register_operand" "r")) + (zero_extend:TI + (match_operand:DI 2 "register_operand" "r"))) + (const_int 64))))] + "" + "umulh %1,%2,%0" + [(set_attr "type" "imul") + (set_attr "opsize" "udi")]) + +(define_insn "*umuldi3_highpart_const" + [(set (match_operand:DI 0 "register_operand" "=r") + (truncate:DI + (lshiftrt:TI + (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand" "r")) + (match_operand:TI 2 "cint8_operand" "I")) + (const_int 64))))] + "" + "umulh %1,%2,%0" + [(set_attr "type" "imul") + (set_attr "opsize" "udi")]) + +(define_expand "umulditi3" + [(set (match_operand:TI 0 "register_operand") + (mult:TI + (zero_extend:TI (match_operand:DI 1 "reg_no_subreg_operand")) + (zero_extend:TI (match_operand:DI 2 "reg_no_subreg_operand"))))] + "" +{ + rtx l = gen_reg_rtx (DImode), h = gen_reg_rtx (DImode); + emit_insn (gen_muldi3 (l, operands[1], operands[2])); + emit_insn (gen_umuldi3_highpart (h, operands[1], operands[2])); + emit_move_insn (gen_lowpart (DImode, operands[0]), l); + emit_move_insn (gen_highpart (DImode, operands[0]), h); + DONE; +}) + +;; The divide and remainder operations take their inputs from r24 and +;; r25, put their output in r27, and clobber r23 and r28 on all systems. +;; +;; ??? Force sign-extension here because some versions of OSF/1 and +;; Interix/NT don't do the right thing if the inputs are not properly +;; sign-extended. But Linux, for instance, does not have this +;; problem. Is it worth the complication here to eliminate the sign +;; extension? + +(define_code_iterator any_divmod [div mod udiv umod]) + +(define_expand "<code>si3" + [(set (match_dup 3) + (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand"))) + (set (match_dup 4) + (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand"))) + (parallel [(set (match_dup 5) + (sign_extend:DI + (any_divmod:SI (match_dup 3) (match_dup 4)))) + (clobber (reg:DI 23)) + (clobber (reg:DI 28))]) + (set (match_operand:SI 0 "nonimmediate_operand") + (subreg:SI (match_dup 5) 0))] + "TARGET_ABI_OSF" +{ + operands[3] = gen_reg_rtx (DImode); + operands[4] = gen_reg_rtx (DImode); + operands[5] = gen_reg_rtx (DImode); +}) + +(define_expand "<code>di3" + [(parallel [(set (match_operand:DI 0 "register_operand") + (any_divmod:DI + (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "register_operand"))) + (clobber (reg:DI 23)) + (clobber (reg:DI 28))])] + "TARGET_ABI_OSF") + +;; Lengths of 8 for ldq $t12,__divq($gp); jsr $t9,($t12),__divq as +;; expanded by the assembler. + +(define_insn_and_split "*divmodsi_internal_er" + [(set (match_operand:DI 0 "register_operand" "=c") + (sign_extend:DI (match_operator:SI 3 "divmod_operator" + [(match_operand:DI 1 "register_operand" "a") + (match_operand:DI 2 "register_operand" "b")]))) + (clobber (reg:DI 23)) + (clobber (reg:DI 28))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "#" + "&& reload_completed" + [(parallel [(set (match_dup 0) + (sign_extend:DI (match_dup 3))) + (use (match_dup 0)) + (use (match_dup 4)) + (clobber (reg:DI 23)) + (clobber (reg:DI 28))])] +{ + const char *str; + switch (GET_CODE (operands[3])) + { + case DIV: + str = "__divl"; + break; + case UDIV: + str = "__divlu"; + break; + case MOD: + str = "__reml"; + break; + case UMOD: + str = "__remlu"; + break; + default: + gcc_unreachable (); + } + operands[4] = GEN_INT (alpha_next_sequence_number++); + emit_insn (gen_movdi_er_high_g (operands[0], pic_offset_table_rtx, + gen_rtx_SYMBOL_REF (DImode, str), + operands[4])); +} + [(set_attr "type" "jsr") + (set_attr "length" "8")]) + +(define_insn "*divmodsi_internal_er_1" + [(set (match_operand:DI 0 "register_operand" "=c") + (sign_extend:DI (match_operator:SI 3 "divmod_operator" + [(match_operand:DI 1 "register_operand" "a") + (match_operand:DI 2 "register_operand" "b")]))) + (use (match_operand:DI 4 "register_operand" "c")) + (use (match_operand 5 "const_int_operand")) + (clobber (reg:DI 23)) + (clobber (reg:DI 28))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "jsr $23,($27),__%E3%j5" + [(set_attr "type" "jsr") + (set_attr "length" "4")]) + +(define_insn "*divmodsi_internal" + [(set (match_operand:DI 0 "register_operand" "=c") + (sign_extend:DI (match_operator:SI 3 "divmod_operator" + [(match_operand:DI 1 "register_operand" "a") + (match_operand:DI 2 "register_operand" "b")]))) + (clobber (reg:DI 23)) + (clobber (reg:DI 28))] + "TARGET_ABI_OSF" + "%E3 %1,%2,%0" + [(set_attr "type" "jsr") + (set_attr "length" "8")]) + +(define_insn_and_split "*divmoddi_internal_er" + [(set (match_operand:DI 0 "register_operand" "=c") + (match_operator:DI 3 "divmod_operator" + [(match_operand:DI 1 "register_operand" "a") + (match_operand:DI 2 "register_operand" "b")])) + (clobber (reg:DI 23)) + (clobber (reg:DI 28))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "#" + "&& reload_completed" + [(parallel [(set (match_dup 0) (match_dup 3)) + (use (match_dup 0)) + (use (match_dup 4)) + (clobber (reg:DI 23)) + (clobber (reg:DI 28))])] +{ + const char *str; + switch (GET_CODE (operands[3])) + { + case DIV: + str = "__divq"; + break; + case UDIV: + str = "__divqu"; + break; + case MOD: + str = "__remq"; + break; + case UMOD: + str = "__remqu"; + break; + default: + gcc_unreachable (); + } + operands[4] = GEN_INT (alpha_next_sequence_number++); + emit_insn (gen_movdi_er_high_g (operands[0], pic_offset_table_rtx, + gen_rtx_SYMBOL_REF (DImode, str), + operands[4])); +} + [(set_attr "type" "jsr") + (set_attr "length" "8")]) + +(define_insn "*divmoddi_internal_er_1" + [(set (match_operand:DI 0 "register_operand" "=c") + (match_operator:DI 3 "divmod_operator" + [(match_operand:DI 1 "register_operand" "a") + (match_operand:DI 2 "register_operand" "b")])) + (use (match_operand:DI 4 "register_operand" "c")) + (use (match_operand 5 "const_int_operand")) + (clobber (reg:DI 23)) + (clobber (reg:DI 28))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "jsr $23,($27),__%E3%j5" + [(set_attr "type" "jsr") + (set_attr "length" "4")]) + +(define_insn "*divmoddi_internal" + [(set (match_operand:DI 0 "register_operand" "=c") + (match_operator:DI 3 "divmod_operator" + [(match_operand:DI 1 "register_operand" "a") + (match_operand:DI 2 "register_operand" "b")])) + (clobber (reg:DI 23)) + (clobber (reg:DI 28))] + "TARGET_ABI_OSF" + "%E3 %1,%2,%0" + [(set_attr "type" "jsr") + (set_attr "length" "8")]) + +;; Next are the basic logical operations. We only expose the DImode operations +;; to the rtl expanders, but SImode versions exist for combine as well as for +;; the atomic operation splitters. + +(define_insn "*andsi_internal" + [(set (match_operand:SI 0 "register_operand" "=r,r,r") + (and:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ,rJ") + (match_operand:SI 2 "and_operand" "rI,N,MH")))] + "" + "@ + and %r1,%2,%0 + bic %r1,%N2,%0 + zapnot %r1,%m2,%0" + [(set_attr "type" "ilog,ilog,shift")]) + +(define_insn "anddi3" + [(set (match_operand:DI 0 "register_operand" "=r,r,r") + (and:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ,rJ") + (match_operand:DI 2 "and_operand" "rI,N,MH")))] + "" + "@ + and %r1,%2,%0 + bic %r1,%N2,%0 + zapnot %r1,%m2,%0" + [(set_attr "type" "ilog,ilog,shift")]) + +;; There are times when we can split an AND into two AND insns. This occurs +;; when we can first clear any bytes and then clear anything else. For +;; example "I & 0xffff07" is "(I & 0xffffff) & 0xffffffffffffff07". +;; Only do this when running on 64-bit host since the computations are +;; too messy otherwise. + +(define_split + [(set (match_operand:DI 0 "register_operand") + (and:DI (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "const_int_operand")))] + "HOST_BITS_PER_WIDE_INT == 64 && ! and_operand (operands[2], DImode)" + [(set (match_dup 0) (and:DI (match_dup 1) (match_dup 3))) + (set (match_dup 0) (and:DI (match_dup 0) (match_dup 4)))] +{ + unsigned HOST_WIDE_INT mask1 = INTVAL (operands[2]); + unsigned HOST_WIDE_INT mask2 = mask1; + int i; + + /* For each byte that isn't all zeros, make it all ones. */ + for (i = 0; i < 64; i += 8) + if ((mask1 & ((HOST_WIDE_INT) 0xff << i)) != 0) + mask1 |= (HOST_WIDE_INT) 0xff << i; + + /* Now turn on any bits we've just turned off. */ + mask2 |= ~ mask1; + + operands[3] = GEN_INT (mask1); + operands[4] = GEN_INT (mask2); +}) + +(define_insn "zero_extendqi<mode>2" + [(set (match_operand:I248MODE 0 "register_operand" "=r,r") + (zero_extend:I248MODE + (match_operand:QI 1 "reg_or_bwx_memory_operand" "r,m")))] + "" + "@ + and %1,0xff,%0 + ldbu %0,%1" + [(set_attr "type" "ilog,ild") + (set_attr "isa" "*,bwx")]) + +(define_insn "zero_extendhi<mode>2" + [(set (match_operand:I48MODE 0 "register_operand" "=r,r") + (zero_extend:I48MODE + (match_operand:HI 1 "reg_or_bwx_memory_operand" "r,m")))] + "" + "@ + zapnot %1,3,%0 + ldwu %0,%1" + [(set_attr "type" "shift,ild") + (set_attr "isa" "*,bwx")]) + +(define_insn "zero_extendsidi2" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI (match_operand:SI 1 "register_operand" "r")))] + "" + "zapnot %1,15,%0" + [(set_attr "type" "shift")]) + +(define_insn "andnot<mode>3" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (and:I48MODE + (not:I48MODE (match_operand:I48MODE 1 "reg_or_8bit_operand" "rI")) + (match_operand:I48MODE 2 "reg_or_0_operand" "rJ")))] + "" + "bic %r2,%1,%0" + [(set_attr "type" "ilog")]) + +(define_insn "*iorsi_internal" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (ior:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ") + (match_operand:SI 2 "or_operand" "rI,N")))] + "" + "@ + bis %r1,%2,%0 + ornot %r1,%N2,%0" + [(set_attr "type" "ilog")]) + +(define_insn "iordi3" + [(set (match_operand:DI 0 "register_operand" "=r,r") + (ior:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ") + (match_operand:DI 2 "or_operand" "rI,N")))] + "" + "@ + bis %r1,%2,%0 + ornot %r1,%N2,%0" + [(set_attr "type" "ilog")]) + +(define_insn "*one_cmplsi_internal" + [(set (match_operand:SI 0 "register_operand" "=r") + (not:SI (match_operand:SI 1 "reg_or_8bit_operand" "rI")))] + "" + "ornot $31,%1,%0" + [(set_attr "type" "ilog")]) + +(define_insn "one_cmpldi2" + [(set (match_operand:DI 0 "register_operand" "=r") + (not:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI")))] + "" + "ornot $31,%1,%0" + [(set_attr "type" "ilog")]) + +(define_insn "*iornot<mode>3" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (ior:I48MODE + (not:I48MODE (match_operand:I48MODE 1 "reg_or_8bit_operand" "rI")) + (match_operand:I48MODE 2 "reg_or_0_operand" "rJ")))] + "" + "ornot %r2,%1,%0" + [(set_attr "type" "ilog")]) + +(define_insn "*xorsi_internal" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (xor:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ") + (match_operand:SI 2 "or_operand" "rI,N")))] + "" + "@ + xor %r1,%2,%0 + eqv %r1,%N2,%0" + [(set_attr "type" "ilog")]) + +(define_insn "xordi3" + [(set (match_operand:DI 0 "register_operand" "=r,r") + (xor:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ") + (match_operand:DI 2 "or_operand" "rI,N")))] + "" + "@ + xor %r1,%2,%0 + eqv %r1,%N2,%0" + [(set_attr "type" "ilog")]) + +(define_insn "*xornot<mode>3" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (not:I48MODE (xor:I48MODE + (match_operand:I48MODE 1 "register_operand" "%rJ") + (match_operand:I48MODE 2 "register_operand" "rI"))))] + "" + "eqv %r1,%2,%0" + [(set_attr "type" "ilog")]) + +;; Handle FFS and related insns iff we support CIX. + +(define_expand "ffsdi2" + [(set (match_dup 2) + (ctz:DI (match_operand:DI 1 "register_operand"))) + (set (match_dup 3) + (plus:DI (match_dup 2) (const_int 1))) + (set (match_operand:DI 0 "register_operand") + (if_then_else:DI (eq (match_dup 1) (const_int 0)) + (const_int 0) (match_dup 3)))] + "TARGET_CIX" +{ + operands[2] = gen_reg_rtx (DImode); + operands[3] = gen_reg_rtx (DImode); +}) + +(define_insn "clzdi2" + [(set (match_operand:DI 0 "register_operand" "=r") + (clz:DI (match_operand:DI 1 "register_operand" "r")))] + "TARGET_CIX" + "ctlz %1,%0" + [(set_attr "type" "mvi")]) + +(define_insn "ctzdi2" + [(set (match_operand:DI 0 "register_operand" "=r") + (ctz:DI (match_operand:DI 1 "register_operand" "r")))] + "TARGET_CIX" + "cttz %1,%0" + [(set_attr "type" "mvi")]) + +(define_insn "popcountdi2" + [(set (match_operand:DI 0 "register_operand" "=r") + (popcount:DI (match_operand:DI 1 "register_operand" "r")))] + "TARGET_CIX" + "ctpop %1,%0" + [(set_attr "type" "mvi")]) + +(define_expand "bswapsi2" + [(set (match_operand:SI 0 "register_operand") + (bswap:SI (match_operand:SI 1 "register_operand")))] + "!optimize_size" +{ + rtx t0, t1; + + t0 = gen_reg_rtx (DImode); + t1 = gen_reg_rtx (DImode); + + emit_insn (gen_inslh (t0, gen_lowpart (DImode, operands[1]), GEN_INT (7))); + emit_insn (gen_inswl_const (t1, gen_lowpart (HImode, operands[1]), + GEN_INT (24))); + emit_insn (gen_iordi3 (t1, t0, t1)); + emit_insn (gen_lshrdi3 (t0, t1, GEN_INT (16))); + emit_insn (gen_anddi3 (t1, t1, alpha_expand_zap_mask (0x5))); + emit_insn (gen_anddi3 (t0, t0, alpha_expand_zap_mask (0xa))); + emit_insn (gen_addsi3 (operands[0], gen_lowpart (SImode, t0), + gen_lowpart (SImode, t1))); + DONE; +}) + +(define_expand "bswapdi2" + [(set (match_operand:DI 0 "register_operand") + (bswap:DI (match_operand:DI 1 "register_operand")))] + "!optimize_size" +{ + rtx t0, t1; + + t0 = gen_reg_rtx (DImode); + t1 = gen_reg_rtx (DImode); + + /* This method of shifting and masking is not specific to Alpha, but + is only profitable on Alpha because of our handy byte zap insn. */ + + emit_insn (gen_lshrdi3 (t0, operands[1], GEN_INT (32))); + emit_insn (gen_ashldi3 (t1, operands[1], GEN_INT (32))); + emit_insn (gen_iordi3 (t1, t0, t1)); + + emit_insn (gen_lshrdi3 (t0, t1, GEN_INT (16))); + emit_insn (gen_ashldi3 (t1, t1, GEN_INT (16))); + emit_insn (gen_anddi3 (t0, t0, alpha_expand_zap_mask (0xcc))); + emit_insn (gen_anddi3 (t1, t1, alpha_expand_zap_mask (0x33))); + emit_insn (gen_iordi3 (t1, t0, t1)); + + emit_insn (gen_lshrdi3 (t0, t1, GEN_INT (8))); + emit_insn (gen_ashldi3 (t1, t1, GEN_INT (8))); + emit_insn (gen_anddi3 (t0, t0, alpha_expand_zap_mask (0xaa))); + emit_insn (gen_anddi3 (t1, t1, alpha_expand_zap_mask (0x55))); + emit_insn (gen_iordi3 (operands[0], t0, t1)); + DONE; +}) + +;; Next come the shifts and the various extract and insert operations. + +(define_insn "ashldi3" + [(set (match_operand:DI 0 "register_operand" "=r,r") + (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,rJ") + (match_operand:DI 2 "reg_or_6bit_operand" "P,rS")))] + "" +{ + switch (which_alternative) + { + case 0: + if (operands[2] == const1_rtx) + return "addq %r1,%r1,%0"; + else + return "s%P2addq %r1,0,%0"; + case 1: + return "sll %r1,%2,%0"; + default: + gcc_unreachable (); + } +} + [(set_attr "type" "iadd,shift")]) + +(define_insn "*ashldi_se" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI + (subreg:SI (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (match_operand:DI 2 "const_int_operand" "P")) + 0)))] + "INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3" +{ + if (operands[2] == const1_rtx) + return "addl %r1,%r1,%0"; + else + return "s%P2addl %r1,0,%0"; +} + [(set_attr "type" "iadd")]) + +(define_insn "lshrdi3" + [(set (match_operand:DI 0 "register_operand" "=r") + (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (match_operand:DI 2 "reg_or_6bit_operand" "rS")))] + "" + "srl %r1,%2,%0" + [(set_attr "type" "shift")]) + +(define_insn "ashrdi3" + [(set (match_operand:DI 0 "register_operand" "=r") + (ashiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (match_operand:DI 2 "reg_or_6bit_operand" "rS")))] + "" + "sra %r1,%2,%0" + [(set_attr "type" "shift")]) + +(define_insn "extendqi<mode>2" + [(set (match_operand:I24MODE 0 "register_operand" "=r") + (sign_extend:I24MODE + (match_operand:QI 1 "register_operand" "r")))] + "TARGET_BWX" + "sextb %1,%0" + [(set_attr "type" "shift")]) + +(define_expand "extendqidi2" + [(set (match_operand:DI 0 "register_operand") + (sign_extend:DI (match_operand:QI 1 "some_operand")))] + "" +{ + if (TARGET_BWX) + operands[1] = force_reg (QImode, operands[1]); + else + { + rtx x, t1, t2, i56; + + if (unaligned_memory_operand (operands[1], QImode)) + { + x = gen_unaligned_extendqidi (operands[0], XEXP (operands[1], 0)); + alpha_set_memflags (x, operands[1]); + emit_insn (x); + DONE; + } + + t1 = gen_reg_rtx (DImode); + t2 = gen_reg_rtx (DImode); + i56 = GEN_INT (56); + + x = gen_lowpart (DImode, force_reg (QImode, operands[1])); + emit_move_insn (t1, x); + emit_insn (gen_ashldi3 (t2, t1, i56)); + emit_insn (gen_ashrdi3 (operands[0], t2, i56)); + DONE; + } +}) + +(define_insn "*extendqidi2_bwx" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI (match_operand:QI 1 "register_operand" "r")))] + "TARGET_BWX" + "sextb %1,%0" + [(set_attr "type" "shift")]) + +(define_insn "extendhisi2" + [(set (match_operand:SI 0 "register_operand" "=r") + (sign_extend:SI (match_operand:HI 1 "register_operand" "r")))] + "TARGET_BWX" + "sextw %1,%0" + [(set_attr "type" "shift")]) + +(define_expand "extendhidi2" + [(set (match_operand:DI 0 "register_operand") + (sign_extend:DI (match_operand:HI 1 "some_operand")))] + "" +{ + if (TARGET_BWX) + operands[1] = force_reg (HImode, operands[1]); + else + { + rtx x, t1, t2, i48; + + if (unaligned_memory_operand (operands[1], HImode)) + { + x = gen_unaligned_extendhidi (operands[0], XEXP (operands[1], 0)); + alpha_set_memflags (x, operands[1]); + emit_insn (x); + DONE; + } + + t1 = gen_reg_rtx (DImode); + t2 = gen_reg_rtx (DImode); + i48 = GEN_INT (48); + + x = gen_lowpart (DImode, force_reg (HImode, operands[1])); + emit_move_insn (t1, x); + emit_insn (gen_ashldi3 (t2, t1, i48)); + emit_insn (gen_ashrdi3 (operands[0], t2, i48)); + DONE; + } +}) + +(define_insn "*extendhidi2_bwx" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI (match_operand:HI 1 "register_operand" "r")))] + "TARGET_BWX" + "sextw %1,%0" + [(set_attr "type" "shift")]) + +;; Here's how we sign extend an unaligned byte and halfword. Doing this +;; as a pattern saves one instruction. The code is similar to that for +;; the unaligned loads (see below). +;; +;; Operand 1 is the address, operand 0 is the result. + +(define_expand "unaligned_extendqidi" + [(set (match_dup 3) + (mem:DI (and:DI (match_operand:DI 1 "address_operand") (const_int -8)))) + (set (match_dup 4) + (ashift:DI (match_dup 3) + (minus:DI (const_int 64) + (ashift:DI + (and:DI (match_dup 2) (const_int 7)) + (const_int 3))))) + (set (match_operand:QI 0 "register_operand") + (ashiftrt:DI (match_dup 4) (const_int 56)))] + "" +{ + operands[0] = gen_lowpart (DImode, operands[0]); + operands[2] = get_unaligned_offset (operands[1], 1); + operands[3] = gen_reg_rtx (DImode); + operands[4] = gen_reg_rtx (DImode); +}) + +(define_expand "unaligned_extendhidi" + [(set (match_dup 3) + (mem:DI (and:DI (match_operand:DI 1 "address_operand") (const_int -8)))) + (set (match_dup 4) + (ashift:DI (match_dup 3) + (minus:DI (const_int 64) + (ashift:DI + (and:DI (match_dup 2) (const_int 7)) + (const_int 3))))) + (set (match_operand:HI 0 "register_operand") + (ashiftrt:DI (match_dup 4) (const_int 48)))] + "" +{ + operands[0] = gen_lowpart (DImode, operands[0]); + operands[2] = get_unaligned_offset (operands[1], 2); + operands[3] = gen_reg_rtx (DImode); + operands[4] = gen_reg_rtx (DImode); +}) + +(define_insn "*extxl_const" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (match_operand:DI 2 "mode_width_operand" "n") + (match_operand:DI 3 "mul8_operand" "I")))] + "" + "ext%M2l %r1,%s3,%0" + [(set_attr "type" "shift")]) + +(define_insn "extxl" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extract:DI + (match_operand:DI 1 "reg_or_0_operand" "rJ") + (match_operand:DI 2 "mode_width_operand" "n") + (ashift:DI (match_operand:DI 3 "reg_or_8bit_operand" "rI") + (const_int 3))))] + "" + "ext%M2l %r1,%3,%0" + [(set_attr "type" "shift")]) + +;; Combine has some strange notion of preserving existing undefined behavior +;; in shifts larger than a word size. So capture these patterns that it +;; should have turned into zero_extracts. + +(define_insn "*extxl_1" + [(set (match_operand:DI 0 "register_operand" "=r") + (and:DI (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI") + (const_int 3))) + (match_operand:DI 3 "mode_mask_operand" "n")))] + "" + "ext%U3l %1,%2,%0" + [(set_attr "type" "shift")]) + +(define_insn "*extql_2" + [(set (match_operand:DI 0 "register_operand" "=r") + (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI") + (const_int 3))))] + "" + "extql %1,%2,%0" + [(set_attr "type" "shift")]) + +(define_insn "extqh" + [(set (match_operand:DI 0 "register_operand" "=r") + (ashift:DI + (match_operand:DI 1 "reg_or_0_operand" "rJ") + (minus:DI (const_int 64) + (ashift:DI + (and:DI + (match_operand:DI 2 "reg_or_8bit_operand" "rI") + (const_int 7)) + (const_int 3)))))] + "" + "extqh %r1,%2,%0" + [(set_attr "type" "shift")]) + +(define_insn "extwh" + [(set (match_operand:DI 0 "register_operand" "=r") + (ashift:DI + (and:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (const_int 65535)) + (minus:DI (const_int 64) + (ashift:DI + (and:DI + (match_operand:DI 2 "reg_or_8bit_operand" "rI") + (const_int 7)) + (const_int 3)))))] + "" + "extwh %r1,%2,%0" + [(set_attr "type" "shift")]) + +(define_insn "extlh" + [(set (match_operand:DI 0 "register_operand" "=r") + (ashift:DI + (and:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (const_int 2147483647)) + (minus:DI (const_int 64) + (ashift:DI + (and:DI + (match_operand:DI 2 "reg_or_8bit_operand" "rI") + (const_int 7)) + (const_int 3)))))] + "" + "extlh %r1,%2,%0" + [(set_attr "type" "shift")]) + +;; This converts an extXl into an extXh with an appropriate adjustment +;; to the address calculation. + +;;(define_split +;; [(set (match_operand:DI 0 "register_operand") +;; (ashift:DI (zero_extract:DI (match_operand:DI 1 "register_operand") +;; (match_operand:DI 2 "mode_width_operand") +;; (ashift:DI (match_operand:DI 3) +;; (const_int 3))) +;; (match_operand:DI 4 "const_int_operand"))) +;; (clobber (match_operand:DI 5 "register_operand"))] +;; "INTVAL (operands[4]) == 64 - INTVAL (operands[2])" +;; [(set (match_dup 5) (match_dup 6)) +;; (set (match_dup 0) +;; (ashift:DI (zero_extract:DI (match_dup 1) (match_dup 2) +;; (ashift:DI (plus:DI (match_dup 5) +;; (match_dup 7)) +;; (const_int 3))) +;; (match_dup 4)))] +;; " +;;{ +;; operands[6] = plus_constant (DImode, operands[3], +;; INTVAL (operands[2]) / BITS_PER_UNIT); +;; operands[7] = GEN_INT (- INTVAL (operands[2]) / BITS_PER_UNIT); +;;}") + +(define_insn "ins<modesuffix>l_const" + [(set (match_operand:DI 0 "register_operand" "=r") + (ashift:DI (zero_extend:DI + (match_operand:I124MODE 1 "register_operand" "r")) + (match_operand:DI 2 "mul8_operand" "I")))] + "" + "ins<modesuffix>l %1,%s2,%0" + [(set_attr "type" "shift")]) + +(define_insn "ins<modesuffix>l" + [(set (match_operand:DI 0 "register_operand" "=r") + (ashift:DI (zero_extend:DI + (match_operand:I124MODE 1 "register_operand" "r")) + (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI") + (const_int 3))))] + "" + "ins<modesuffix>l %1,%2,%0" + [(set_attr "type" "shift")]) + +(define_insn "insql" + [(set (match_operand:DI 0 "register_operand" "=r") + (ashift:DI (match_operand:DI 1 "register_operand" "r") + (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI") + (const_int 3))))] + "" + "insql %1,%2,%0" + [(set_attr "type" "shift")]) + +;; Combine has this sometimes habit of moving the and outside of the +;; shift, making life more interesting. + +(define_insn "*insxl" + [(set (match_operand:DI 0 "register_operand" "=r") + (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "mul8_operand" "I")) + (match_operand:DI 3 "immediate_operand" "i")))] + "HOST_BITS_PER_WIDE_INT == 64 + && CONST_INT_P (operands[3]) + && (((unsigned HOST_WIDE_INT) 0xff << INTVAL (operands[2]) + == (unsigned HOST_WIDE_INT) INTVAL (operands[3])) + || ((unsigned HOST_WIDE_INT) 0xffff << INTVAL (operands[2]) + == (unsigned HOST_WIDE_INT) INTVAL (operands[3])) + || ((unsigned HOST_WIDE_INT) 0xffffffff << INTVAL (operands[2]) + == (unsigned HOST_WIDE_INT) INTVAL (operands[3])))" +{ +#if HOST_BITS_PER_WIDE_INT == 64 + if ((unsigned HOST_WIDE_INT) 0xff << INTVAL (operands[2]) + == (unsigned HOST_WIDE_INT) INTVAL (operands[3])) + return "insbl %1,%s2,%0"; + if ((unsigned HOST_WIDE_INT) 0xffff << INTVAL (operands[2]) + == (unsigned HOST_WIDE_INT) INTVAL (operands[3])) + return "inswl %1,%s2,%0"; + if ((unsigned HOST_WIDE_INT) 0xffffffff << INTVAL (operands[2]) + == (unsigned HOST_WIDE_INT) INTVAL (operands[3])) + return "insll %1,%s2,%0"; +#endif + gcc_unreachable (); +} + [(set_attr "type" "shift")]) + +;; We do not include the insXh insns because they are complex to express +;; and it does not appear that we would ever want to generate them. +;; +;; Since we need them for block moves, though, cop out and use unspec. + +(define_insn "insxh" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "mode_width_operand" "n") + (match_operand:DI 3 "reg_or_8bit_operand" "rI")] + UNSPEC_INSXH))] + "" + "ins%M2h %1,%3,%0" + [(set_attr "type" "shift")]) + +(define_insn "mskxl" + [(set (match_operand:DI 0 "register_operand" "=r") + (and:DI (not:DI (ashift:DI + (match_operand:DI 2 "mode_mask_operand" "n") + (ashift:DI + (match_operand:DI 3 "reg_or_8bit_operand" "rI") + (const_int 3)))) + (match_operand:DI 1 "reg_or_0_operand" "rJ")))] + "" + "msk%U2l %r1,%3,%0" + [(set_attr "type" "shift")]) + +;; We do not include the mskXh insns because it does not appear we would +;; ever generate one. +;; +;; Again, we do for block moves and we use unspec again. + +(define_insn "mskxh" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "mode_width_operand" "n") + (match_operand:DI 3 "reg_or_8bit_operand" "rI")] + UNSPEC_MSKXH))] + "" + "msk%M2h %1,%3,%0" + [(set_attr "type" "shift")]) + +;; Prefer AND + NE over LSHIFTRT + AND. + +(define_insn_and_split "*ze_and_ne" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (const_int 1) + (match_operand 2 "const_int_operand" "I")))] + "(unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 8" + "#" + "(unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 8" + [(set (match_dup 0) + (and:DI (match_dup 1) (match_dup 3))) + (set (match_dup 0) + (ne:DI (match_dup 0) (const_int 0)))] + "operands[3] = GEN_INT (1 << INTVAL (operands[2]));") + +;; Floating-point operations. All the double-precision insns can extend +;; from single, so indicate that. The exception are the ones that simply +;; play with the sign bits; it's not clear what to do there. + +(define_mode_iterator FMODE [SF DF]) + +(define_mode_attr opmode [(SF "si") (DF "di")]) + +(define_insn "abs<mode>2" + [(set (match_operand:FMODE 0 "register_operand" "=f") + (abs:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG")))] + "TARGET_FP" + "cpys $f31,%R1,%0" + [(set_attr "type" "fcpys")]) + +(define_insn "*nabs<mode>2" + [(set (match_operand:FMODE 0 "register_operand" "=f") + (neg:FMODE + (abs:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG"))))] + "TARGET_FP" + "cpysn $f31,%R1,%0" + [(set_attr "type" "fadd")]) + +(define_expand "abstf2" + [(parallel [(set (match_operand:TF 0 "register_operand") + (abs:TF (match_operand:TF 1 "reg_or_0_operand"))) + (use (match_dup 2))])] + "TARGET_HAS_XFLOATING_LIBS" +{ +#if HOST_BITS_PER_WIDE_INT >= 64 + operands[2] = force_reg (DImode, GEN_INT ((HOST_WIDE_INT) 1 << 63)); +#else + operands[2] = force_reg (DImode, immed_double_const (0, 0x80000000, DImode)); +#endif +}) + +(define_insn_and_split "*abstf_internal" + [(set (match_operand:TF 0 "register_operand" "=r") + (abs:TF (match_operand:TF 1 "reg_or_0_operand" "rG"))) + (use (match_operand:DI 2 "register_operand" "r"))] + "TARGET_HAS_XFLOATING_LIBS" + "#" + "&& reload_completed" + [(const_int 0)] + "alpha_split_tfmode_frobsign (operands, gen_andnotdi3); DONE;") + +(define_insn "neg<mode>2" + [(set (match_operand:FMODE 0 "register_operand" "=f") + (neg:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG")))] + "TARGET_FP" + "cpysn %R1,%R1,%0" + [(set_attr "type" "fadd")]) + +(define_expand "negtf2" + [(parallel [(set (match_operand:TF 0 "register_operand") + (neg:TF (match_operand:TF 1 "reg_or_0_operand"))) + (use (match_dup 2))])] + "TARGET_HAS_XFLOATING_LIBS" +{ +#if HOST_BITS_PER_WIDE_INT >= 64 + operands[2] = force_reg (DImode, GEN_INT ((HOST_WIDE_INT) 1 << 63)); +#else + operands[2] = force_reg (DImode, immed_double_const (0, 0x80000000, DImode)); +#endif +}) + +(define_insn_and_split "*negtf_internal" + [(set (match_operand:TF 0 "register_operand" "=r") + (neg:TF (match_operand:TF 1 "reg_or_0_operand" "rG"))) + (use (match_operand:DI 2 "register_operand" "r"))] + "TARGET_HAS_XFLOATING_LIBS" + "#" + "&& reload_completed" + [(const_int 0)] + "alpha_split_tfmode_frobsign (operands, gen_xordi3); DONE;") + +(define_insn "copysign<mode>3" + [(set (match_operand:FMODE 0 "register_operand" "=f") + (unspec:FMODE [(match_operand:FMODE 1 "reg_or_0_operand" "fG") + (match_operand:FMODE 2 "reg_or_0_operand" "fG")] + UNSPEC_COPYSIGN))] + "TARGET_FP" + "cpys %R2,%R1,%0" + [(set_attr "type" "fadd")]) + +(define_insn "*ncopysign<mode>3" + [(set (match_operand:FMODE 0 "register_operand" "=f") + (neg:FMODE + (unspec:FMODE [(match_operand:FMODE 1 "reg_or_0_operand" "fG") + (match_operand:FMODE 2 "reg_or_0_operand" "fG")] + UNSPEC_COPYSIGN)))] + "TARGET_FP" + "cpysn %R2,%R1,%0" + [(set_attr "type" "fadd")]) + +(define_insn "*add<mode>3_ieee" + [(set (match_operand:FMODE 0 "register_operand" "=&f") + (plus:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "%fG") + (match_operand:FMODE 2 "reg_or_0_operand" "fG")))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "add<modesuffix>%/ %R1,%R2,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "add<mode>3" + [(set (match_operand:FMODE 0 "register_operand" "=f") + (plus:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "%fG") + (match_operand:FMODE 2 "reg_or_0_operand" "fG")))] + "TARGET_FP" + "add<modesuffix>%/ %R1,%R2,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "*adddf_ext1" + [(set (match_operand:DF 0 "register_operand" "=f") + (plus:DF (float_extend:DF + (match_operand:SF 1 "reg_or_0_operand" "fG")) + (match_operand:DF 2 "reg_or_0_operand" "fG")))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "add%-%/ %R1,%R2,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "*adddf_ext2" + [(set (match_operand:DF 0 "register_operand" "=f") + (plus:DF (float_extend:DF + (match_operand:SF 1 "reg_or_0_operand" "%fG")) + (float_extend:DF + (match_operand:SF 2 "reg_or_0_operand" "fG"))))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "add%-%/ %R1,%R2,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_expand "addtf3" + [(use (match_operand:TF 0 "register_operand")) + (use (match_operand:TF 1 "general_operand")) + (use (match_operand:TF 2 "general_operand"))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_xfloating_arith (PLUS, operands); DONE;") + +(define_insn "*sub<mode>3_ieee" + [(set (match_operand:FMODE 0 "register_operand" "=&f") + (minus:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG") + (match_operand:FMODE 2 "reg_or_0_operand" "fG")))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "sub<modesuffix>%/ %R1,%R2,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "sub<mode>3" + [(set (match_operand:FMODE 0 "register_operand" "=f") + (minus:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG") + (match_operand:FMODE 2 "reg_or_0_operand" "fG")))] + "TARGET_FP" + "sub<modesuffix>%/ %R1,%R2,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "*subdf_ext1" + [(set (match_operand:DF 0 "register_operand" "=f") + (minus:DF (float_extend:DF + (match_operand:SF 1 "reg_or_0_operand" "fG")) + (match_operand:DF 2 "reg_or_0_operand" "fG")))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "sub%-%/ %R1,%R2,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "*subdf_ext2" + [(set (match_operand:DF 0 "register_operand" "=f") + (minus:DF (match_operand:DF 1 "reg_or_0_operand" "fG") + (float_extend:DF + (match_operand:SF 2 "reg_or_0_operand" "fG"))))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "sub%-%/ %R1,%R2,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "*subdf_ext3" + [(set (match_operand:DF 0 "register_operand" "=f") + (minus:DF (float_extend:DF + (match_operand:SF 1 "reg_or_0_operand" "fG")) + (float_extend:DF + (match_operand:SF 2 "reg_or_0_operand" "fG"))))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "sub%-%/ %R1,%R2,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_expand "subtf3" + [(use (match_operand:TF 0 "register_operand")) + (use (match_operand:TF 1 "general_operand")) + (use (match_operand:TF 2 "general_operand"))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_xfloating_arith (MINUS, operands); DONE;") + +(define_insn "*mul<mode>3_ieee" + [(set (match_operand:FMODE 0 "register_operand" "=&f") + (mult:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "%fG") + (match_operand:FMODE 2 "reg_or_0_operand" "fG")))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "mul<modesuffix>%/ %R1,%R2,%0" + [(set_attr "type" "fmul") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "mul<mode>3" + [(set (match_operand:FMODE 0 "register_operand" "=f") + (mult:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "%fG") + (match_operand:FMODE 2 "reg_or_0_operand" "fG")))] + "TARGET_FP" + "mul<modesuffix>%/ %R1,%R2,%0" + [(set_attr "type" "fmul") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "*muldf_ext1" + [(set (match_operand:DF 0 "register_operand" "=f") + (mult:DF (float_extend:DF + (match_operand:SF 1 "reg_or_0_operand" "fG")) + (match_operand:DF 2 "reg_or_0_operand" "fG")))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "mul%-%/ %R1,%R2,%0" + [(set_attr "type" "fmul") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "*muldf_ext2" + [(set (match_operand:DF 0 "register_operand" "=f") + (mult:DF (float_extend:DF + (match_operand:SF 1 "reg_or_0_operand" "%fG")) + (float_extend:DF + (match_operand:SF 2 "reg_or_0_operand" "fG"))))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "mul%-%/ %R1,%R2,%0" + [(set_attr "type" "fmul") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_expand "multf3" + [(use (match_operand:TF 0 "register_operand")) + (use (match_operand:TF 1 "general_operand")) + (use (match_operand:TF 2 "general_operand"))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_xfloating_arith (MULT, operands); DONE;") + +(define_insn "*div<mode>3_ieee" + [(set (match_operand:FMODE 0 "register_operand" "=&f") + (div:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG") + (match_operand:FMODE 2 "reg_or_0_operand" "fG")))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "div<modesuffix>%/ %R1,%R2,%0" + [(set_attr "type" "fdiv") + (set_attr "opsize" "<opmode>") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "div<mode>3" + [(set (match_operand:FMODE 0 "register_operand" "=f") + (div:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG") + (match_operand:FMODE 2 "reg_or_0_operand" "fG")))] + "TARGET_FP" + "div<modesuffix>%/ %R1,%R2,%0" + [(set_attr "type" "fdiv") + (set_attr "opsize" "<opmode>") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "*divdf_ext1" + [(set (match_operand:DF 0 "register_operand" "=f") + (div:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG")) + (match_operand:DF 2 "reg_or_0_operand" "fG")))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "div%-%/ %R1,%R2,%0" + [(set_attr "type" "fdiv") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "*divdf_ext2" + [(set (match_operand:DF 0 "register_operand" "=f") + (div:DF (match_operand:DF 1 "reg_or_0_operand" "fG") + (float_extend:DF + (match_operand:SF 2 "reg_or_0_operand" "fG"))))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "div%-%/ %R1,%R2,%0" + [(set_attr "type" "fdiv") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "*divdf_ext3" + [(set (match_operand:DF 0 "register_operand" "=f") + (div:DF (float_extend:DF + (match_operand:SF 1 "reg_or_0_operand" "fG")) + (float_extend:DF + (match_operand:SF 2 "reg_or_0_operand" "fG"))))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "div%-%/ %R1,%R2,%0" + [(set_attr "type" "fdiv") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_expand "divtf3" + [(use (match_operand:TF 0 "register_operand")) + (use (match_operand:TF 1 "general_operand")) + (use (match_operand:TF 2 "general_operand"))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_xfloating_arith (DIV, operands); DONE;") + +(define_insn "*sqrt<mode>2_ieee" + [(set (match_operand:FMODE 0 "register_operand" "=&f") + (sqrt:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG")))] + "TARGET_FP && TARGET_FIX && alpha_fptm >= ALPHA_FPTM_SU" + "sqrt<modesuffix>%/ %R1,%0" + [(set_attr "type" "fsqrt") + (set_attr "opsize" "<opmode>") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "sqrt<mode>2" + [(set (match_operand:FMODE 0 "register_operand" "=f") + (sqrt:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG")))] + "TARGET_FP && TARGET_FIX" + "sqrt<modesuffix>%/ %R1,%0" + [(set_attr "type" "fsqrt") + (set_attr "opsize" "<opmode>") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +;; Define conversion operators between DFmode and SImode, using the cvtql +;; instruction. To allow combine et al to do useful things, we keep the +;; operation as a unit until after reload, at which point we split the +;; instructions. +;; +;; Note that we (attempt to) only consider this optimization when the +;; ultimate destination is memory. If we will be doing further integer +;; processing, it is cheaper to do the truncation in the int regs. + +(define_insn "*cvtql" + [(set (match_operand:SF 0 "register_operand" "=f") + (unspec:SF [(match_operand:DI 1 "reg_or_0_operand" "fG")] + UNSPEC_CVTQL))] + "TARGET_FP" + "cvtql%/ %R1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "trap_suffix" "v_sv")]) + +(define_insn_and_split "*fix_truncdfsi_ieee" + [(set (match_operand:SI 0 "memory_operand" "=m") + (subreg:SI + (match_operator:DI 4 "fix_operator" + [(match_operand:DF 1 "reg_or_0_operand" "fG")]) 0)) + (clobber (match_scratch:DI 2 "=&f")) + (clobber (match_scratch:SF 3 "=&f"))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "#" + "&& reload_completed" + [(set (match_dup 2) (match_op_dup 4 [(match_dup 1)])) + (set (match_dup 3) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL)) + (set (match_dup 5) (match_dup 3))] +{ + operands[5] = adjust_address (operands[0], SFmode, 0); +} + [(set_attr "type" "fadd") + (set_attr "trap" "yes")]) + +(define_insn_and_split "*fix_truncdfsi_internal" + [(set (match_operand:SI 0 "memory_operand" "=m") + (subreg:SI + (match_operator:DI 3 "fix_operator" + [(match_operand:DF 1 "reg_or_0_operand" "fG")]) 0)) + (clobber (match_scratch:DI 2 "=f"))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "#" + "&& reload_completed" + [(set (match_dup 2) (match_op_dup 3 [(match_dup 1)])) + (set (match_dup 4) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL)) + (set (match_dup 5) (match_dup 4))] +{ + operands[4] = gen_rtx_REG (SFmode, REGNO (operands[2])); + operands[5] = adjust_address (operands[0], SFmode, 0); +} + [(set_attr "type" "fadd") + (set_attr "trap" "yes")]) + +(define_insn "*fix_truncdfdi_ieee" + [(set (match_operand:DI 0 "reg_no_subreg_operand" "=&f") + (match_operator:DI 2 "fix_operator" + [(match_operand:DF 1 "reg_or_0_operand" "fG")]))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "cvt%-q%/ %R1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "c") + (set_attr "trap_suffix" "v_sv_svi")]) + +(define_insn "*fix_truncdfdi2" + [(set (match_operand:DI 0 "reg_no_subreg_operand" "=f") + (match_operator:DI 2 "fix_operator" + [(match_operand:DF 1 "reg_or_0_operand" "fG")]))] + "TARGET_FP" + "cvt%-q%/ %R1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "c") + (set_attr "trap_suffix" "v_sv_svi")]) + +(define_expand "fix_truncdfdi2" + [(set (match_operand:DI 0 "reg_no_subreg_operand") + (fix:DI (match_operand:DF 1 "reg_or_0_operand")))] + "TARGET_FP") + +(define_expand "fixuns_truncdfdi2" + [(set (match_operand:DI 0 "reg_no_subreg_operand") + (unsigned_fix:DI (match_operand:DF 1 "reg_or_0_operand")))] + "TARGET_FP") + +;; Likewise between SFmode and SImode. + +(define_insn_and_split "*fix_truncsfsi_ieee" + [(set (match_operand:SI 0 "memory_operand" "=m") + (subreg:SI + (match_operator:DI 4 "fix_operator" + [(float_extend:DF + (match_operand:SF 1 "reg_or_0_operand" "fG"))]) 0)) + (clobber (match_scratch:DI 2 "=&f")) + (clobber (match_scratch:SF 3 "=&f"))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "#" + "&& reload_completed" + [(set (match_dup 2) (match_op_dup 4 [(float_extend:DF (match_dup 1))])) + (set (match_dup 3) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL)) + (set (match_dup 5) (match_dup 3))] + "operands[5] = adjust_address (operands[0], SFmode, 0);" + [(set_attr "type" "fadd") + (set_attr "trap" "yes")]) + +(define_insn_and_split "*fix_truncsfsi_internal" + [(set (match_operand:SI 0 "memory_operand" "=m") + (subreg:SI + (match_operator:DI 3 "fix_operator" + [(float_extend:DF + (match_operand:SF 1 "reg_or_0_operand" "fG"))]) 0)) + (clobber (match_scratch:DI 2 "=f"))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "#" + "&& reload_completed" + [(set (match_dup 2) (match_op_dup 3 [(float_extend:DF (match_dup 1))])) + (set (match_dup 4) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL)) + (set (match_dup 5) (match_dup 4))] +{ + operands[4] = gen_rtx_REG (SFmode, REGNO (operands[2])); + operands[5] = adjust_address (operands[0], SFmode, 0); +} + [(set_attr "type" "fadd") + (set_attr "trap" "yes")]) + +(define_insn "*fix_truncsfdi_ieee" + [(set (match_operand:DI 0 "reg_no_subreg_operand" "=&f") + (match_operator:DI 2 "fix_operator" + [(float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG"))]))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "cvt%-q%/ %R1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "c") + (set_attr "trap_suffix" "v_sv_svi")]) + +(define_insn "*fix_truncsfdi2" + [(set (match_operand:DI 0 "reg_no_subreg_operand" "=f") + (match_operator:DI 2 "fix_operator" + [(float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG"))]))] + "TARGET_FP" + "cvt%-q%/ %R1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "c") + (set_attr "trap_suffix" "v_sv_svi")]) + +(define_expand "fix_truncsfdi2" + [(set (match_operand:DI 0 "reg_no_subreg_operand") + (fix:DI (float_extend:DF (match_operand:SF 1 "reg_or_0_operand"))))] + "TARGET_FP") + +(define_expand "fixuns_truncsfdi2" + [(set (match_operand:DI 0 "reg_no_subreg_operand") + (unsigned_fix:DI + (float_extend:DF (match_operand:SF 1 "reg_or_0_operand"))))] + "TARGET_FP") + +(define_expand "fix_trunctfdi2" + [(use (match_operand:DI 0 "register_operand")) + (use (match_operand:TF 1 "general_operand"))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_xfloating_cvt (FIX, operands); DONE;") + +(define_expand "fixuns_trunctfdi2" + [(use (match_operand:DI 0 "register_operand")) + (use (match_operand:TF 1 "general_operand"))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_xfloating_cvt (UNSIGNED_FIX, operands); DONE;") + +(define_insn "*floatdisf_ieee" + [(set (match_operand:SF 0 "register_operand" "=&f") + (float:SF (match_operand:DI 1 "reg_no_subreg_operand" "f")))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "cvtq%,%/ %1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "sui")]) + +(define_insn "floatdisf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (float:SF (match_operand:DI 1 "reg_no_subreg_operand" "f")))] + "TARGET_FP" + "cvtq%,%/ %1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "sui")]) + +(define_insn_and_split "*floatsisf2_ieee" + [(set (match_operand:SF 0 "register_operand" "=&f") + (float:SF (match_operand:SI 1 "memory_operand" "m"))) + (clobber (match_scratch:DI 2 "=&f")) + (clobber (match_scratch:SF 3 "=&f"))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "#" + "&& reload_completed" + [(set (match_dup 3) (match_dup 1)) + (set (match_dup 2) (unspec:DI [(match_dup 3)] UNSPEC_CVTLQ)) + (set (match_dup 0) (float:SF (match_dup 2)))] + "operands[1] = adjust_address (operands[1], SFmode, 0);") + +(define_insn_and_split "*floatsisf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (float:SF (match_operand:SI 1 "memory_operand" "m")))] + "TARGET_FP" + "#" + "&& reload_completed" + [(set (match_dup 0) (match_dup 1)) + (set (match_dup 2) (unspec:DI [(match_dup 0)] UNSPEC_CVTLQ)) + (set (match_dup 0) (float:SF (match_dup 2)))] +{ + operands[1] = adjust_address (operands[1], SFmode, 0); + operands[2] = gen_rtx_REG (DImode, REGNO (operands[0])); +}) + +(define_insn "*floatdidf_ieee" + [(set (match_operand:DF 0 "register_operand" "=&f") + (float:DF (match_operand:DI 1 "reg_no_subreg_operand" "f")))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "cvtq%-%/ %1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "sui")]) + +(define_insn "floatdidf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (float:DF (match_operand:DI 1 "reg_no_subreg_operand" "f")))] + "TARGET_FP" + "cvtq%-%/ %1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "sui")]) + +(define_insn_and_split "*floatsidf2_ieee" + [(set (match_operand:DF 0 "register_operand" "=&f") + (float:DF (match_operand:SI 1 "memory_operand" "m"))) + (clobber (match_scratch:DI 2 "=&f")) + (clobber (match_scratch:SF 3 "=&f"))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "#" + "&& reload_completed" + [(set (match_dup 3) (match_dup 1)) + (set (match_dup 2) (unspec:DI [(match_dup 3)] UNSPEC_CVTLQ)) + (set (match_dup 0) (float:DF (match_dup 2)))] + "operands[1] = adjust_address (operands[1], SFmode, 0);") + +(define_insn_and_split "*floatsidf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (float:DF (match_operand:SI 1 "memory_operand" "m")))] + "TARGET_FP" + "#" + "&& reload_completed" + [(set (match_dup 3) (match_dup 1)) + (set (match_dup 2) (unspec:DI [(match_dup 3)] UNSPEC_CVTLQ)) + (set (match_dup 0) (float:DF (match_dup 2)))] +{ + operands[1] = adjust_address (operands[1], SFmode, 0); + operands[2] = gen_rtx_REG (DImode, REGNO (operands[0])); + operands[3] = gen_rtx_REG (SFmode, REGNO (operands[0])); +}) + +(define_expand "floatditf2" + [(use (match_operand:TF 0 "register_operand")) + (use (match_operand:DI 1 "general_operand"))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_xfloating_cvt (FLOAT, operands); DONE;") + +(define_expand "floatunsdisf2" + [(use (match_operand:SF 0 "register_operand")) + (use (match_operand:DI 1 "register_operand"))] + "TARGET_FP" + "alpha_emit_floatuns (operands); DONE;") + +(define_expand "floatunsdidf2" + [(use (match_operand:DF 0 "register_operand")) + (use (match_operand:DI 1 "register_operand"))] + "TARGET_FP" + "alpha_emit_floatuns (operands); DONE;") + +(define_expand "floatunsditf2" + [(use (match_operand:TF 0 "register_operand")) + (use (match_operand:DI 1 "general_operand"))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_xfloating_cvt (UNSIGNED_FLOAT, operands); DONE;") + +(define_expand "extendsfdf2" + [(set (match_operand:DF 0 "register_operand") + (float_extend:DF (match_operand:SF 1 "nonimmediate_operand")))] + "TARGET_FP" +{ + if (alpha_fptm >= ALPHA_FPTM_SU) + operands[1] = force_reg (SFmode, operands[1]); +}) + +;; The Unicos/Mk assembler doesn't support cvtst, but we've already +;; asserted that alpha_fptm == ALPHA_FPTM_N. + +(define_insn "*extendsfdf2_ieee" + [(set (match_operand:DF 0 "register_operand" "=&f") + (float_extend:DF (match_operand:SF 1 "register_operand" "f")))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "cvtsts %1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes")]) + +(define_insn "*extendsfdf2_internal" + [(set (match_operand:DF 0 "register_operand" "=f,f,m") + (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "f,m,f")))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "@ + cpys %1,%1,%0 + ld%, %0,%1 + st%- %1,%0" + [(set_attr "type" "fcpys,fld,fst")]) + +;; Use register_operand for operand 1 to prevent compress_float_constant +;; from doing something silly. When optimizing we'll put things back +;; together anyway. +(define_expand "extendsftf2" + [(use (match_operand:TF 0 "register_operand")) + (use (match_operand:SF 1 "register_operand"))] + "TARGET_HAS_XFLOATING_LIBS" +{ + rtx tmp = gen_reg_rtx (DFmode); + emit_insn (gen_extendsfdf2 (tmp, operands[1])); + emit_insn (gen_extenddftf2 (operands[0], tmp)); + DONE; +}) + +(define_expand "extenddftf2" + [(use (match_operand:TF 0 "register_operand")) + (use (match_operand:DF 1 "register_operand"))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_xfloating_cvt (FLOAT_EXTEND, operands); DONE;") + +(define_insn "*truncdfsf2_ieee" + [(set (match_operand:SF 0 "register_operand" "=&f") + (float_truncate:SF (match_operand:DF 1 "reg_or_0_operand" "fG")))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "cvt%-%,%/ %R1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_insn "truncdfsf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (float_truncate:SF (match_operand:DF 1 "reg_or_0_operand" "fG")))] + "TARGET_FP" + "cvt%-%,%/ %R1,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "round_suffix" "normal") + (set_attr "trap_suffix" "u_su_sui")]) + +(define_expand "trunctfdf2" + [(use (match_operand:DF 0 "register_operand")) + (use (match_operand:TF 1 "general_operand"))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_xfloating_cvt (FLOAT_TRUNCATE, operands); DONE;") + +(define_expand "trunctfsf2" + [(use (match_operand:SF 0 "register_operand")) + (use (match_operand:TF 1 "general_operand"))] + "TARGET_FP && TARGET_HAS_XFLOATING_LIBS" +{ + rtx tmpf, sticky, arg, lo, hi; + + tmpf = gen_reg_rtx (DFmode); + sticky = gen_reg_rtx (DImode); + arg = copy_to_mode_reg (TFmode, operands[1]); + lo = gen_lowpart (DImode, arg); + hi = gen_highpart (DImode, arg); + + /* Convert the low word of the TFmode value into a sticky rounding bit, + then or it into the low bit of the high word. This leaves the sticky + bit at bit 48 of the fraction, which is representable in DFmode, + which prevents rounding error in the final conversion to SFmode. */ + + emit_insn (gen_rtx_SET (VOIDmode, sticky, + gen_rtx_NE (DImode, lo, const0_rtx))); + emit_insn (gen_iordi3 (hi, hi, sticky)); + emit_insn (gen_trunctfdf2 (tmpf, arg)); + emit_insn (gen_truncdfsf2 (operands[0], tmpf)); + DONE; +}) + +;; Next are all the integer comparisons, and conditional moves and branches +;; and some of the related define_expand's and define_split's. + +(define_insn "*setcc_internal" + [(set (match_operand 0 "register_operand" "=r") + (match_operator 1 "alpha_comparison_operator" + [(match_operand:DI 2 "register_operand" "r") + (match_operand:DI 3 "reg_or_8bit_operand" "rI")]))] + "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT + && GET_MODE_SIZE (GET_MODE (operands[0])) <= 8 + && GET_MODE (operands[0]) == GET_MODE (operands[1])" + "cmp%C1 %2,%3,%0" + [(set_attr "type" "icmp")]) + +;; Yes, we can technically support reg_or_8bit_operand in operand 2, +;; but that's non-canonical rtl and allowing that causes inefficiencies +;; from cse on. +(define_insn "*setcc_swapped_internal" + [(set (match_operand 0 "register_operand" "=r") + (match_operator 1 "alpha_swapped_comparison_operator" + [(match_operand:DI 2 "register_operand" "r") + (match_operand:DI 3 "reg_or_0_operand" "rJ")]))] + "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT + && GET_MODE_SIZE (GET_MODE (operands[0])) <= 8 + && GET_MODE (operands[0]) == GET_MODE (operands[1])" + "cmp%c1 %r3,%2,%0" + [(set_attr "type" "icmp")]) + +;; Use match_operator rather than ne directly so that we can match +;; multiple integer modes. +(define_insn "*setne_internal" + [(set (match_operand 0 "register_operand" "=r") + (match_operator 1 "signed_comparison_operator" + [(match_operand:DI 2 "register_operand" "r") + (const_int 0)]))] + "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT + && GET_MODE_SIZE (GET_MODE (operands[0])) <= 8 + && GET_CODE (operands[1]) == NE + && GET_MODE (operands[0]) == GET_MODE (operands[1])" + "cmpult $31,%2,%0" + [(set_attr "type" "icmp")]) + +;; The mode folding trick can't be used with const_int operands, since +;; reload needs to know the proper mode. +;; +;; Use add_operand instead of the more seemingly natural reg_or_8bit_operand +;; in order to create more pairs of constants. As long as we're allowing +;; two constants at the same time, and will have to reload one of them... + +(define_insn "*mov<mode>cc_internal" + [(set (match_operand:IMODE 0 "register_operand" "=r,r,r,r") + (if_then_else:IMODE + (match_operator 2 "signed_comparison_operator" + [(match_operand:DI 3 "reg_or_0_operand" "rJ,rJ,J,J") + (match_operand:DI 4 "reg_or_0_operand" "J,J,rJ,rJ")]) + (match_operand:IMODE 1 "add_operand" "rI,0,rI,0") + (match_operand:IMODE 5 "add_operand" "0,rI,0,rI")))] + "(operands[3] == const0_rtx) ^ (operands[4] == const0_rtx)" + "@ + cmov%C2 %r3,%1,%0 + cmov%D2 %r3,%5,%0 + cmov%c2 %r4,%1,%0 + cmov%d2 %r4,%5,%0" + [(set_attr "type" "icmov")]) + +(define_insn "*mov<mode>cc_lbc" + [(set (match_operand:IMODE 0 "register_operand" "=r,r") + (if_then_else:IMODE + (eq (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ") + (const_int 1) + (const_int 0)) + (const_int 0)) + (match_operand:IMODE 1 "reg_or_8bit_operand" "rI,0") + (match_operand:IMODE 3 "reg_or_8bit_operand" "0,rI")))] + "" + "@ + cmovlbc %r2,%1,%0 + cmovlbs %r2,%3,%0" + [(set_attr "type" "icmov")]) + +(define_insn "*mov<mode>cc_lbs" + [(set (match_operand:IMODE 0 "register_operand" "=r,r") + (if_then_else:IMODE + (ne (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ") + (const_int 1) + (const_int 0)) + (const_int 0)) + (match_operand:IMODE 1 "reg_or_8bit_operand" "rI,0") + (match_operand:IMODE 3 "reg_or_8bit_operand" "0,rI")))] + "" + "@ + cmovlbs %r2,%1,%0 + cmovlbc %r2,%3,%0" + [(set_attr "type" "icmov")]) + +;; For ABS, we have two choices, depending on whether the input and output +;; registers are the same or not. +(define_expand "absdi2" + [(set (match_operand:DI 0 "register_operand") + (abs:DI (match_operand:DI 1 "register_operand")))] + "" +{ + if (rtx_equal_p (operands[0], operands[1])) + emit_insn (gen_absdi2_same (operands[0], gen_reg_rtx (DImode))); + else + emit_insn (gen_absdi2_diff (operands[0], operands[1])); + DONE; +}) + +(define_expand "absdi2_same" + [(set (match_operand:DI 1 "register_operand") + (neg:DI (match_operand:DI 0 "register_operand"))) + (set (match_dup 0) + (if_then_else:DI (ge (match_dup 0) (const_int 0)) + (match_dup 0) + (match_dup 1)))]) + +(define_expand "absdi2_diff" + [(set (match_operand:DI 0 "register_operand") + (neg:DI (match_operand:DI 1 "register_operand"))) + (set (match_dup 0) + (if_then_else:DI (lt (match_dup 1) (const_int 0)) + (match_dup 0) + (match_dup 1)))]) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (abs:DI (match_dup 0))) + (clobber (match_operand:DI 1 "register_operand"))] + "" + [(set (match_dup 1) (neg:DI (match_dup 0))) + (set (match_dup 0) (if_then_else:DI (ge (match_dup 0) (const_int 0)) + (match_dup 0) (match_dup 1)))]) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (abs:DI (match_operand:DI 1 "register_operand")))] + "! rtx_equal_p (operands[0], operands[1])" + [(set (match_dup 0) (neg:DI (match_dup 1))) + (set (match_dup 0) (if_then_else:DI (lt (match_dup 1) (const_int 0)) + (match_dup 0) (match_dup 1)))]) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (neg:DI (abs:DI (match_dup 0)))) + (clobber (match_operand:DI 1 "register_operand"))] + "" + [(set (match_dup 1) (neg:DI (match_dup 0))) + (set (match_dup 0) (if_then_else:DI (le (match_dup 0) (const_int 0)) + (match_dup 0) (match_dup 1)))]) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (neg:DI (abs:DI (match_operand:DI 1 "register_operand"))))] + "! rtx_equal_p (operands[0], operands[1])" + [(set (match_dup 0) (neg:DI (match_dup 1))) + (set (match_dup 0) (if_then_else:DI (gt (match_dup 1) (const_int 0)) + (match_dup 0) (match_dup 1)))]) + +(define_insn "<code><mode>3" + [(set (match_operand:I12MODE 0 "register_operand" "=r") + (any_maxmin:I12MODE + (match_operand:I12MODE 1 "reg_or_0_operand" "%rJ") + (match_operand:I12MODE 2 "reg_or_8bit_operand" "rI")))] + "TARGET_MAX" + "<maxmin><vecmodesuffix> %r1,%2,%0" + [(set_attr "type" "mvi")]) + +(define_expand "smaxdi3" + [(set (match_dup 3) + (le:DI (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand"))) + (set (match_operand:DI 0 "register_operand") + (if_then_else:DI (eq (match_dup 3) (const_int 0)) + (match_dup 1) (match_dup 2)))] + "" + "operands[3] = gen_reg_rtx (DImode);") + +(define_split + [(set (match_operand:DI 0 "register_operand") + (smax:DI (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand"))) + (clobber (match_operand:DI 3 "register_operand"))] + "operands[2] != const0_rtx" + [(set (match_dup 3) (le:DI (match_dup 1) (match_dup 2))) + (set (match_dup 0) (if_then_else:DI (eq (match_dup 3) (const_int 0)) + (match_dup 1) (match_dup 2)))]) + +(define_insn "*smax_const0" + [(set (match_operand:DI 0 "register_operand" "=r") + (smax:DI (match_operand:DI 1 "register_operand" "0") + (const_int 0)))] + "" + "cmovlt %0,0,%0" + [(set_attr "type" "icmov")]) + +(define_expand "smindi3" + [(set (match_dup 3) + (lt:DI (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand"))) + (set (match_operand:DI 0 "register_operand") + (if_then_else:DI (ne (match_dup 3) (const_int 0)) + (match_dup 1) (match_dup 2)))] + "" + "operands[3] = gen_reg_rtx (DImode);") + +(define_split + [(set (match_operand:DI 0 "register_operand") + (smin:DI (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand"))) + (clobber (match_operand:DI 3 "register_operand"))] + "operands[2] != const0_rtx" + [(set (match_dup 3) (lt:DI (match_dup 1) (match_dup 2))) + (set (match_dup 0) (if_then_else:DI (ne (match_dup 3) (const_int 0)) + (match_dup 1) (match_dup 2)))]) + +(define_insn "*smin_const0" + [(set (match_operand:DI 0 "register_operand" "=r") + (smin:DI (match_operand:DI 1 "register_operand" "0") + (const_int 0)))] + "" + "cmovgt %0,0,%0" + [(set_attr "type" "icmov")]) + +(define_expand "umaxdi3" + [(set (match_dup 3) + (leu:DI (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand"))) + (set (match_operand:DI 0 "register_operand") + (if_then_else:DI (eq (match_dup 3) (const_int 0)) + (match_dup 1) (match_dup 2)))] + "" + "operands[3] = gen_reg_rtx (DImode);") + +(define_split + [(set (match_operand:DI 0 "register_operand") + (umax:DI (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand"))) + (clobber (match_operand:DI 3 "register_operand"))] + "operands[2] != const0_rtx" + [(set (match_dup 3) (leu:DI (match_dup 1) (match_dup 2))) + (set (match_dup 0) (if_then_else:DI (eq (match_dup 3) (const_int 0)) + (match_dup 1) (match_dup 2)))]) + +(define_expand "umindi3" + [(set (match_dup 3) + (ltu:DI (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand"))) + (set (match_operand:DI 0 "register_operand") + (if_then_else:DI (ne (match_dup 3) (const_int 0)) + (match_dup 1) (match_dup 2)))] + "" + "operands[3] = gen_reg_rtx (DImode);") + +(define_split + [(set (match_operand:DI 0 "register_operand") + (umin:DI (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand"))) + (clobber (match_operand:DI 3 "register_operand"))] + "operands[2] != const0_rtx" + [(set (match_dup 3) (ltu:DI (match_dup 1) (match_dup 2))) + (set (match_dup 0) (if_then_else:DI (ne (match_dup 3) (const_int 0)) + (match_dup 1) (match_dup 2)))]) + +(define_insn "*bcc_normal" + [(set (pc) + (if_then_else + (match_operator 1 "signed_comparison_operator" + [(match_operand:DI 2 "reg_or_0_operand" "rJ") + (const_int 0)]) + (label_ref (match_operand 0)) + (pc)))] + "" + "b%C1 %r2,%0" + [(set_attr "type" "ibr")]) + +(define_insn "*bcc_reverse" + [(set (pc) + (if_then_else + (match_operator 1 "signed_comparison_operator" + [(match_operand:DI 2 "register_operand" "r") + (const_int 0)]) + + (pc) + (label_ref (match_operand 0))))] + "" + "b%c1 %2,%0" + [(set_attr "type" "ibr")]) + +(define_insn "*blbs_normal" + [(set (pc) + (if_then_else + (ne (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (const_int 1) + (const_int 0)) + (const_int 0)) + (label_ref (match_operand 0)) + (pc)))] + "" + "blbs %r1,%0" + [(set_attr "type" "ibr")]) + +(define_insn "*blbc_normal" + [(set (pc) + (if_then_else + (eq (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") + (const_int 1) + (const_int 0)) + (const_int 0)) + (label_ref (match_operand 0)) + (pc)))] + "" + "blbc %r1,%0" + [(set_attr "type" "ibr")]) + +(define_split + [(parallel + [(set (pc) + (if_then_else + (match_operator 1 "comparison_operator" + [(zero_extract:DI (match_operand:DI 2 "register_operand") + (const_int 1) + (match_operand:DI 3 "const_int_operand")) + (const_int 0)]) + (label_ref (match_operand 0)) + (pc))) + (clobber (match_operand:DI 4 "register_operand"))])] + "INTVAL (operands[3]) != 0" + [(set (match_dup 4) + (lshiftrt:DI (match_dup 2) (match_dup 3))) + (set (pc) + (if_then_else (match_op_dup 1 + [(zero_extract:DI (match_dup 4) + (const_int 1) + (const_int 0)) + (const_int 0)]) + (label_ref (match_dup 0)) + (pc)))] + ) + +;; The following are the corresponding floating-point insns. Recall +;; we need to have variants that expand the arguments from SFmode +;; to DFmode. + +(define_insn "*cmpdf_ieee" + [(set (match_operand:DF 0 "register_operand" "=&f") + (match_operator:DF 1 "alpha_fp_comparison_operator" + [(match_operand:DF 2 "reg_or_0_operand" "fG") + (match_operand:DF 3 "reg_or_0_operand" "fG")]))] + "TARGET_FP && alpha_fptm >= ALPHA_FPTM_SU" + "cmp%-%C1%/ %R2,%R3,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "trap_suffix" "su")]) + +(define_insn "*cmpdf_internal" + [(set (match_operand:DF 0 "register_operand" "=f") + (match_operator:DF 1 "alpha_fp_comparison_operator" + [(match_operand:DF 2 "reg_or_0_operand" "fG") + (match_operand:DF 3 "reg_or_0_operand" "fG")]))] + "TARGET_FP" + "cmp%-%C1%/ %R2,%R3,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "trap_suffix" "su")]) + +(define_insn "*cmpdf_ext1" + [(set (match_operand:DF 0 "register_operand" "=f") + (match_operator:DF 1 "alpha_fp_comparison_operator" + [(float_extend:DF + (match_operand:SF 2 "reg_or_0_operand" "fG")) + (match_operand:DF 3 "reg_or_0_operand" "fG")]))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "cmp%-%C1%/ %R2,%R3,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "trap_suffix" "su")]) + +(define_insn "*cmpdf_ext2" + [(set (match_operand:DF 0 "register_operand" "=f") + (match_operator:DF 1 "alpha_fp_comparison_operator" + [(match_operand:DF 2 "reg_or_0_operand" "fG") + (float_extend:DF + (match_operand:SF 3 "reg_or_0_operand" "fG"))]))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "cmp%-%C1%/ %R2,%R3,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "trap_suffix" "su")]) + +(define_insn "*cmpdf_ext3" + [(set (match_operand:DF 0 "register_operand" "=f") + (match_operator:DF 1 "alpha_fp_comparison_operator" + [(float_extend:DF + (match_operand:SF 2 "reg_or_0_operand" "fG")) + (float_extend:DF + (match_operand:SF 3 "reg_or_0_operand" "fG"))]))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "cmp%-%C1%/ %R2,%R3,%0" + [(set_attr "type" "fadd") + (set_attr "trap" "yes") + (set_attr "trap_suffix" "su")]) + +(define_insn "*mov<mode>cc_internal" + [(set (match_operand:FMODE 0 "register_operand" "=f,f") + (if_then_else:FMODE + (match_operator 3 "signed_comparison_operator" + [(match_operand:DF 4 "reg_or_0_operand" "fG,fG") + (match_operand:DF 2 "const0_operand" "G,G")]) + (match_operand:FMODE 1 "reg_or_0_operand" "fG,0") + (match_operand:FMODE 5 "reg_or_0_operand" "0,fG")))] + "TARGET_FP" + "@ + fcmov%C3 %R4,%R1,%0 + fcmov%D3 %R4,%R5,%0" + [(set_attr "type" "fcmov")]) + +(define_insn "*movdfcc_ext1" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (if_then_else:DF + (match_operator 3 "signed_comparison_operator" + [(match_operand:DF 4 "reg_or_0_operand" "fG,fG") + (match_operand:DF 2 "const0_operand" "G,G")]) + (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG,0")) + (match_operand:DF 5 "reg_or_0_operand" "0,fG")))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "@ + fcmov%C3 %R4,%R1,%0 + fcmov%D3 %R4,%R5,%0" + [(set_attr "type" "fcmov")]) + +(define_insn "*movdfcc_ext2" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (if_then_else:DF + (match_operator 3 "signed_comparison_operator" + [(float_extend:DF + (match_operand:SF 4 "reg_or_0_operand" "fG,fG")) + (match_operand:DF 2 "const0_operand" "G,G")]) + (match_operand:DF 1 "reg_or_0_operand" "fG,0") + (match_operand:DF 5 "reg_or_0_operand" "0,fG")))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "@ + fcmov%C3 %R4,%R1,%0 + fcmov%D3 %R4,%R5,%0" + [(set_attr "type" "fcmov")]) + +(define_insn "*movdfcc_ext3" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (if_then_else:SF + (match_operator 3 "signed_comparison_operator" + [(float_extend:DF + (match_operand:SF 4 "reg_or_0_operand" "fG,fG")) + (match_operand:DF 2 "const0_operand" "G,G")]) + (match_operand:SF 1 "reg_or_0_operand" "fG,0") + (match_operand:SF 5 "reg_or_0_operand" "0,fG")))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "@ + fcmov%C3 %R4,%R1,%0 + fcmov%D3 %R4,%R5,%0" + [(set_attr "type" "fcmov")]) + +(define_insn "*movdfcc_ext4" + [(set (match_operand:DF 0 "register_operand" "=f,f") + (if_then_else:DF + (match_operator 3 "signed_comparison_operator" + [(float_extend:DF + (match_operand:SF 4 "reg_or_0_operand" "fG,fG")) + (match_operand:DF 2 "const0_operand" "G,G")]) + (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG,0")) + (match_operand:DF 5 "reg_or_0_operand" "0,fG")))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" + "@ + fcmov%C3 %R4,%R1,%0 + fcmov%D3 %R4,%R5,%0" + [(set_attr "type" "fcmov")]) + +(define_expand "smaxdf3" + [(set (match_dup 3) + (le:DF (match_operand:DF 1 "reg_or_0_operand") + (match_operand:DF 2 "reg_or_0_operand"))) + (set (match_operand:DF 0 "register_operand") + (if_then_else:DF (eq (match_dup 3) (match_dup 4)) + (match_dup 1) (match_dup 2)))] + "TARGET_FP" +{ + operands[3] = gen_reg_rtx (DFmode); + operands[4] = CONST0_RTX (DFmode); +}) + +(define_expand "smindf3" + [(set (match_dup 3) + (lt:DF (match_operand:DF 1 "reg_or_0_operand") + (match_operand:DF 2 "reg_or_0_operand"))) + (set (match_operand:DF 0 "register_operand") + (if_then_else:DF (ne (match_dup 3) (match_dup 4)) + (match_dup 1) (match_dup 2)))] + "TARGET_FP" +{ + operands[3] = gen_reg_rtx (DFmode); + operands[4] = CONST0_RTX (DFmode); +}) + +(define_expand "smaxsf3" + [(set (match_dup 3) + (le:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand")) + (float_extend:DF (match_operand:SF 2 "reg_or_0_operand")))) + (set (match_operand:SF 0 "register_operand") + (if_then_else:SF (eq (match_dup 3) (match_dup 4)) + (match_dup 1) (match_dup 2)))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" +{ + operands[3] = gen_reg_rtx (DFmode); + operands[4] = CONST0_RTX (DFmode); +}) + +(define_expand "sminsf3" + [(set (match_dup 3) + (lt:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand")) + (float_extend:DF (match_operand:SF 2 "reg_or_0_operand")))) + (set (match_operand:SF 0 "register_operand") + (if_then_else:SF (ne (match_dup 3) (match_dup 4)) + (match_dup 1) (match_dup 2)))] + "TARGET_FP && alpha_fptm < ALPHA_FPTM_SU" +{ + operands[3] = gen_reg_rtx (DFmode); + operands[4] = CONST0_RTX (DFmode); +}) + +(define_insn "*fbcc_normal" + [(set (pc) + (if_then_else + (match_operator 1 "signed_comparison_operator" + [(match_operand:DF 2 "reg_or_0_operand" "fG") + (match_operand:DF 3 "const0_operand" "G")]) + (label_ref (match_operand 0)) + (pc)))] + "TARGET_FP" + "fb%C1 %R2,%0" + [(set_attr "type" "fbr")]) + +(define_insn "*fbcc_ext_normal" + [(set (pc) + (if_then_else + (match_operator 1 "signed_comparison_operator" + [(float_extend:DF + (match_operand:SF 2 "reg_or_0_operand" "fG")) + (match_operand:DF 3 "const0_operand" "G")]) + (label_ref (match_operand 0)) + (pc)))] + "TARGET_FP" + "fb%C1 %R2,%0" + [(set_attr "type" "fbr")]) + +;; These are the main define_expand's used to make conditional branches +;; and compares. + +(define_expand "cbranchdf4" + [(use (match_operator 0 "alpha_cbranch_operator" + [(match_operand:DF 1 "reg_or_0_operand") + (match_operand:DF 2 "reg_or_0_operand")])) + (use (match_operand 3))] + "TARGET_FP" + "alpha_emit_conditional_branch (operands, DFmode); DONE;") + +(define_expand "cbranchtf4" + [(use (match_operator 0 "alpha_cbranch_operator" + [(match_operand:TF 1 "general_operand") + (match_operand:TF 2 "general_operand")])) + (use (match_operand 3))] + "TARGET_HAS_XFLOATING_LIBS" + "alpha_emit_conditional_branch (operands, TFmode); DONE;") + +(define_expand "cbranchdi4" + [(use (match_operator 0 "alpha_cbranch_operator" + [(match_operand:DI 1 "some_operand") + (match_operand:DI 2 "some_operand")])) + (use (match_operand 3))] + "" + "alpha_emit_conditional_branch (operands, DImode); DONE;") + +(define_expand "cstoredf4" + [(use (match_operator:DI 1 "alpha_cbranch_operator" + [(match_operand:DF 2 "reg_or_0_operand") + (match_operand:DF 3 "reg_or_0_operand")])) + (clobber (match_operand:DI 0 "register_operand"))] + "TARGET_FP" +{ + if (alpha_emit_setcc (operands, DFmode)) + DONE; + else + FAIL; +}) + +(define_expand "cstoretf4" + [(use (match_operator:DI 1 "alpha_cbranch_operator" + [(match_operand:TF 2 "general_operand") + (match_operand:TF 3 "general_operand")])) + (clobber (match_operand:DI 0 "register_operand"))] + "TARGET_HAS_XFLOATING_LIBS" +{ + if (alpha_emit_setcc (operands, TFmode)) + DONE; + else + FAIL; +}) + +(define_expand "cstoredi4" + [(use (match_operator:DI 1 "alpha_cbranch_operator" + [(match_operand:DI 2 "some_operand") + (match_operand:DI 3 "some_operand")])) + (clobber (match_operand:DI 0 "register_operand"))] + "" +{ + if (alpha_emit_setcc (operands, DImode)) + DONE; + else + FAIL; +}) + +;; These are the main define_expand's used to make conditional moves. + +(define_expand "mov<mode>cc" + [(set (match_operand:I48MODE 0 "register_operand") + (if_then_else:I48MODE + (match_operand 1 "comparison_operator") + (match_operand:I48MODE 2 "reg_or_8bit_operand") + (match_operand:I48MODE 3 "reg_or_8bit_operand")))] + "" +{ + operands[1] = alpha_emit_conditional_move (operands[1], <MODE>mode); + if (operands[1] == 0) + FAIL; +}) + +(define_expand "mov<mode>cc" + [(set (match_operand:FMODE 0 "register_operand") + (if_then_else:FMODE + (match_operand 1 "comparison_operator") + (match_operand:FMODE 2 "reg_or_8bit_operand") + (match_operand:FMODE 3 "reg_or_8bit_operand")))] + "" +{ + operands[1] = alpha_emit_conditional_move (operands[1], <MODE>mode); + if (operands[1] == 0) + FAIL; +}) + +;; These define_split definitions are used in cases when comparisons have +;; not be stated in the correct way and we need to reverse the second +;; comparison. For example, x >= 7 has to be done as x < 6 with the +;; comparison that tests the result being reversed. We have one define_split +;; for each use of a comparison. They do not match valid insns and need +;; not generate valid insns. +;; +;; We can also handle equality comparisons (and inequality comparisons in +;; cases where the resulting add cannot overflow) by doing an add followed by +;; a comparison with zero. This is faster since the addition takes one +;; less cycle than a compare when feeding into a conditional move. +;; For this case, we also have an SImode pattern since we can merge the add +;; and sign extend and the order doesn't matter. +;; +;; We do not do this for floating-point, since it isn't clear how the "wrong" +;; operation could have been generated. + +(define_split + [(set (match_operand:DI 0 "register_operand") + (if_then_else:DI + (match_operator 1 "comparison_operator" + [(match_operand:DI 2 "reg_or_0_operand") + (match_operand:DI 3 "reg_or_cint_operand")]) + (match_operand:DI 4 "reg_or_cint_operand") + (match_operand:DI 5 "reg_or_cint_operand"))) + (clobber (match_operand:DI 6 "register_operand"))] + "operands[3] != const0_rtx" + [(set (match_dup 6) (match_dup 7)) + (set (match_dup 0) + (if_then_else:DI (match_dup 8) (match_dup 4) (match_dup 5)))] +{ + enum rtx_code code = GET_CODE (operands[1]); + int unsignedp = (code == GEU || code == LEU || code == GTU || code == LTU); + + /* If we are comparing for equality with a constant and that constant + appears in the arm when the register equals the constant, use the + register since that is more likely to match (and to produce better code + if both would). */ + + if (code == EQ && CONST_INT_P (operands[3]) + && rtx_equal_p (operands[4], operands[3])) + operands[4] = operands[2]; + + else if (code == NE && CONST_INT_P (operands[3]) + && rtx_equal_p (operands[5], operands[3])) + operands[5] = operands[2]; + + if (code == NE || code == EQ + || (extended_count (operands[2], DImode, unsignedp) >= 1 + && extended_count (operands[3], DImode, unsignedp) >= 1)) + { + if (CONST_INT_P (operands[3])) + operands[7] = gen_rtx_PLUS (DImode, operands[2], + GEN_INT (- INTVAL (operands[3]))); + else + operands[7] = gen_rtx_MINUS (DImode, operands[2], operands[3]); + + operands[8] = gen_rtx_fmt_ee (code, VOIDmode, operands[6], const0_rtx); + } + + else if (code == EQ || code == LE || code == LT + || code == LEU || code == LTU) + { + operands[7] = gen_rtx_fmt_ee (code, DImode, operands[2], operands[3]); + operands[8] = gen_rtx_NE (VOIDmode, operands[6], const0_rtx); + } + else + { + operands[7] = gen_rtx_fmt_ee (reverse_condition (code), DImode, + operands[2], operands[3]); + operands[8] = gen_rtx_EQ (VOIDmode, operands[6], const0_rtx); + } +}) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (if_then_else:DI + (match_operator 1 "comparison_operator" + [(match_operand:SI 2 "reg_or_0_operand") + (match_operand:SI 3 "reg_or_cint_operand")]) + (match_operand:DI 4 "reg_or_8bit_operand") + (match_operand:DI 5 "reg_or_8bit_operand"))) + (clobber (match_operand:DI 6 "register_operand"))] + "operands[3] != const0_rtx + && (GET_CODE (operands[1]) == EQ || GET_CODE (operands[1]) == NE)" + [(set (match_dup 6) (match_dup 7)) + (set (match_dup 0) + (if_then_else:DI (match_dup 8) (match_dup 4) (match_dup 5)))] +{ + enum rtx_code code = GET_CODE (operands[1]); + int unsignedp = (code == GEU || code == LEU || code == GTU || code == LTU); + rtx tem; + + if ((code != NE && code != EQ + && ! (extended_count (operands[2], DImode, unsignedp) >= 1 + && extended_count (operands[3], DImode, unsignedp) >= 1))) + FAIL; + + if (CONST_INT_P (operands[3])) + tem = gen_rtx_PLUS (SImode, operands[2], + GEN_INT (- INTVAL (operands[3]))); + else + tem = gen_rtx_MINUS (SImode, operands[2], operands[3]); + + operands[7] = gen_rtx_SIGN_EXTEND (DImode, tem); + operands[8] = gen_rtx_fmt_ee (GET_CODE (operands[1]), VOIDmode, + operands[6], const0_rtx); +}) + +;; Prefer to use cmp and arithmetic when possible instead of a cmove. + +(define_split + [(set (match_operand 0 "register_operand") + (if_then_else (match_operator 1 "signed_comparison_operator" + [(match_operand:DI 2 "reg_or_0_operand") + (const_int 0)]) + (match_operand 3 "const_int_operand") + (match_operand 4 "const_int_operand")))] + "" + [(const_int 0)] +{ + if (alpha_split_conditional_move (GET_CODE (operands[1]), operands[0], + operands[2], operands[3], operands[4])) + DONE; + else + FAIL; +}) + +;; ??? Why combine is allowed to create such non-canonical rtl, I don't know. +;; Oh well, we match it in movcc, so it must be partially our fault. +(define_split + [(set (match_operand 0 "register_operand") + (if_then_else (match_operator 1 "signed_comparison_operator" + [(const_int 0) + (match_operand:DI 2 "reg_or_0_operand")]) + (match_operand 3 "const_int_operand") + (match_operand 4 "const_int_operand")))] + "" + [(const_int 0)] +{ + if (alpha_split_conditional_move (swap_condition (GET_CODE (operands[1])), + operands[0], operands[2], operands[3], + operands[4])) + DONE; + else + FAIL; +}) + +(define_insn_and_split "*cmp_sadd_di" + [(set (match_operand:DI 0 "register_operand" "=r") + (plus:DI (if_then_else:DI + (match_operator 1 "alpha_zero_comparison_operator" + [(match_operand:DI 2 "reg_or_0_operand" "rJ") + (const_int 0)]) + (match_operand:DI 3 "const48_operand" "I") + (const_int 0)) + (match_operand:DI 4 "sext_add_operand" "rIO"))) + (clobber (match_scratch:DI 5 "=r"))] + "" + "#" + "" + [(set (match_dup 5) + (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) + (set (match_dup 0) + (plus:DI (mult:DI (match_dup 5) (match_dup 3)) + (match_dup 4)))] +{ + if (can_create_pseudo_p ()) + operands[5] = gen_reg_rtx (DImode); + else if (reg_overlap_mentioned_p (operands[5], operands[4])) + operands[5] = operands[0]; +}) + +(define_insn_and_split "*cmp_sadd_si" + [(set (match_operand:SI 0 "register_operand" "=r") + (plus:SI (if_then_else:SI + (match_operator 1 "alpha_zero_comparison_operator" + [(match_operand:DI 2 "reg_or_0_operand" "rJ") + (const_int 0)]) + (match_operand:SI 3 "const48_operand" "I") + (const_int 0)) + (match_operand:SI 4 "sext_add_operand" "rIO"))) + (clobber (match_scratch:DI 5 "=r"))] + "" + "#" + "" + [(set (match_dup 5) + (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) + (set (match_dup 0) + (plus:SI (mult:SI (match_dup 6) (match_dup 3)) + (match_dup 4)))] +{ + if (can_create_pseudo_p ()) + operands[5] = gen_reg_rtx (DImode); + else if (reg_overlap_mentioned_p (operands[5], operands[4])) + operands[5] = gen_lowpart (DImode, operands[0]); + + operands[6] = gen_lowpart (SImode, operands[5]); +}) + +(define_insn_and_split "*cmp_sadd_sidi" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI + (plus:SI (if_then_else:SI + (match_operator 1 "alpha_zero_comparison_operator" + [(match_operand:DI 2 "reg_or_0_operand" "rJ") + (const_int 0)]) + (match_operand:SI 3 "const48_operand" "I") + (const_int 0)) + (match_operand:SI 4 "sext_add_operand" "rIO")))) + (clobber (match_scratch:DI 5 "=r"))] + "" + "#" + "" + [(set (match_dup 5) + (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) + (set (match_dup 0) + (sign_extend:DI (plus:SI (mult:SI (match_dup 6) (match_dup 3)) + (match_dup 4))))] +{ + if (can_create_pseudo_p ()) + operands[5] = gen_reg_rtx (DImode); + else if (reg_overlap_mentioned_p (operands[5], operands[4])) + operands[5] = operands[0]; + + operands[6] = gen_lowpart (SImode, operands[5]); +}) + +(define_insn_and_split "*cmp_ssub_di" + [(set (match_operand:DI 0 "register_operand" "=r") + (minus:DI (if_then_else:DI + (match_operator 1 "alpha_zero_comparison_operator" + [(match_operand:DI 2 "reg_or_0_operand" "rJ") + (const_int 0)]) + (match_operand:DI 3 "const48_operand" "I") + (const_int 0)) + (match_operand:DI 4 "reg_or_8bit_operand" "rI"))) + (clobber (match_scratch:DI 5 "=r"))] + "" + "#" + "" + [(set (match_dup 5) + (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) + (set (match_dup 0) + (minus:DI (mult:DI (match_dup 5) (match_dup 3)) + (match_dup 4)))] +{ + if (can_create_pseudo_p ()) + operands[5] = gen_reg_rtx (DImode); + else if (reg_overlap_mentioned_p (operands[5], operands[4])) + operands[5] = operands[0]; +}) + +(define_insn_and_split "*cmp_ssub_si" + [(set (match_operand:SI 0 "register_operand" "=r") + (minus:SI (if_then_else:SI + (match_operator 1 "alpha_zero_comparison_operator" + [(match_operand:DI 2 "reg_or_0_operand" "rJ") + (const_int 0)]) + (match_operand:SI 3 "const48_operand" "I") + (const_int 0)) + (match_operand:SI 4 "reg_or_8bit_operand" "rI"))) + (clobber (match_scratch:DI 5 "=r"))] + "" + "#" + "" + [(set (match_dup 5) + (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) + (set (match_dup 0) + (minus:SI (mult:SI (match_dup 6) (match_dup 3)) + (match_dup 4)))] +{ + if (can_create_pseudo_p ()) + operands[5] = gen_reg_rtx (DImode); + else if (reg_overlap_mentioned_p (operands[5], operands[4])) + operands[5] = gen_lowpart (DImode, operands[0]); + + operands[6] = gen_lowpart (SImode, operands[5]); +}) + +(define_insn_and_split "*cmp_ssub_sidi" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI + (minus:SI (if_then_else:SI + (match_operator 1 "alpha_zero_comparison_operator" + [(match_operand:DI 2 "reg_or_0_operand" "rJ") + (const_int 0)]) + (match_operand:SI 3 "const48_operand" "I") + (const_int 0)) + (match_operand:SI 4 "reg_or_8bit_operand" "rI")))) + (clobber (match_scratch:DI 5 "=r"))] + "" + "#" + "" + [(set (match_dup 5) + (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) + (set (match_dup 0) + (sign_extend:DI (minus:SI (mult:SI (match_dup 6) (match_dup 3)) + (match_dup 4))))] +{ + if (can_create_pseudo_p ()) + operands[5] = gen_reg_rtx (DImode); + else if (reg_overlap_mentioned_p (operands[5], operands[4])) + operands[5] = operands[0]; + + operands[6] = gen_lowpart (SImode, operands[5]); +}) + +;; Here are the CALL and unconditional branch insns. Calls on NT and OSF +;; work differently, so we have different patterns for each. + +(define_expand "call" + [(use (match_operand:DI 0)) + (use (match_operand 1)) + (use (match_operand 2)) + (use (match_operand 3))] + "" +{ + if (TARGET_ABI_OPEN_VMS) + emit_call_insn (gen_call_vms (operands[0], operands[2])); + else + emit_call_insn (gen_call_osf (operands[0], operands[1])); + DONE; +}) + +(define_expand "sibcall" + [(parallel [(call (mem:DI (match_operand 0)) + (match_operand 1)) + (unspec [(reg:DI 29)] UNSPEC_SIBCALL)])] + "TARGET_ABI_OSF" +{ + gcc_assert (MEM_P (operands[0])); + operands[0] = XEXP (operands[0], 0); +}) + +(define_expand "call_osf" + [(parallel [(call (mem:DI (match_operand 0)) + (match_operand 1)) + (use (reg:DI 29)) + (clobber (reg:DI 26))])] + "" +{ + gcc_assert (MEM_P (operands[0])); + + operands[0] = XEXP (operands[0], 0); + if (! call_operand (operands[0], Pmode)) + operands[0] = copy_to_mode_reg (Pmode, operands[0]); +}) + +;; +;; call openvms/alpha +;; op 0: symbol ref for called function +;; op 1: next_arg_reg (argument information value for R25) +;; +(define_expand "call_vms" + [(parallel [(call (mem:DI (match_operand 0)) + (match_operand 1)) + (use (match_dup 2)) + (use (reg:DI 25)) + (use (reg:DI 26)) + (clobber (reg:DI 27))])] + "" +{ + gcc_assert (MEM_P (operands[0])); + + operands[0] = XEXP (operands[0], 0); + + /* Always load AI with argument information, then handle symbolic and + indirect call differently. Load RA and set operands[2] to PV in + both cases. */ + + emit_move_insn (gen_rtx_REG (DImode, 25), operands[1]); + if (GET_CODE (operands[0]) == SYMBOL_REF) + { + operands[2] = const0_rtx; + } + else + { + emit_move_insn (gen_rtx_REG (Pmode, 26), + gen_rtx_MEM (Pmode, plus_constant (Pmode, + operands[0], 8))); + operands[2] = operands[0]; + } +}) + +(define_expand "call_value" + [(use (match_operand 0)) + (use (match_operand:DI 1)) + (use (match_operand 2)) + (use (match_operand 3)) + (use (match_operand 4))] + "" +{ + if (TARGET_ABI_OPEN_VMS) + emit_call_insn (gen_call_value_vms (operands[0], operands[1], + operands[3])); + else + emit_call_insn (gen_call_value_osf (operands[0], operands[1], + operands[2])); + DONE; +}) + +(define_expand "sibcall_value" + [(parallel [(set (match_operand 0) + (call (mem:DI (match_operand 1)) + (match_operand 2))) + (unspec [(reg:DI 29)] UNSPEC_SIBCALL)])] + "TARGET_ABI_OSF" +{ + gcc_assert (MEM_P (operands[1])); + operands[1] = XEXP (operands[1], 0); +}) + +(define_expand "call_value_osf" + [(parallel [(set (match_operand 0) + (call (mem:DI (match_operand 1)) + (match_operand 2))) + (use (reg:DI 29)) + (clobber (reg:DI 26))])] + "" +{ + gcc_assert (MEM_P (operands[1])); + + operands[1] = XEXP (operands[1], 0); + if (! call_operand (operands[1], Pmode)) + operands[1] = copy_to_mode_reg (Pmode, operands[1]); +}) + +(define_expand "call_value_vms" + [(parallel [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1)) + (match_operand 2))) + (use (match_dup 3)) + (use (reg:DI 25)) + (use (reg:DI 26)) + (clobber (reg:DI 27))])] + "" +{ + gcc_assert (MEM_P (operands[1])); + + operands[1] = XEXP (operands[1], 0); + + /* Always load AI with argument information, then handle symbolic and + indirect call differently. Load RA and set operands[3] to PV in + both cases. */ + + emit_move_insn (gen_rtx_REG (DImode, 25), operands[2]); + if (GET_CODE (operands[1]) == SYMBOL_REF) + { + operands[3] = const0_rtx; + } + else + { + emit_move_insn (gen_rtx_REG (Pmode, 26), + gen_rtx_MEM (Pmode, plus_constant (Pmode, + operands[1], 8))); + operands[3] = operands[1]; + } +}) + +(define_insn "*call_osf_1_er_noreturn" + [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s")) + (match_operand 1)) + (use (reg:DI 29)) + (clobber (reg:DI 26))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF + && find_reg_note (insn, REG_NORETURN, NULL_RTX)" + "@ + jsr $26,($27),0 + bsr $26,%0\t\t!samegp + ldq $27,%0($29)\t\t!literal!%#\;jsr $26,($27),%0\t\t!lituse_jsr!%#" + [(set_attr "type" "jsr") + (set_attr "length" "*,*,8")]) + +(define_insn "*call_osf_1_er" + [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s")) + (match_operand 1)) + (use (reg:DI 29)) + (clobber (reg:DI 26))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "@ + jsr $26,(%0),0\;ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%* + bsr $26,%0\t\t!samegp + ldq $27,%0($29)\t\t!literal!%#\;jsr $26,($27),%0\t\t!lituse_jsr!%#\;ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%*" + [(set_attr "type" "jsr") + (set_attr "length" "12,*,16")]) + +;; We must use peep2 instead of a split because we need accurate life +;; information for $gp. Consider the case of { bar(); while (1); }. +(define_peephole2 + [(parallel [(call (mem:DI (match_operand:DI 0 "call_operand")) + (match_operand 1)) + (use (reg:DI 29)) + (clobber (reg:DI 26))])] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF && reload_completed + && ! samegp_function_operand (operands[0], Pmode) + && (peep2_regno_dead_p (1, 29) + || find_reg_note (insn, REG_NORETURN, NULL_RTX))" + [(parallel [(call (mem:DI (match_dup 2)) + (match_dup 1)) + (use (reg:DI 29)) + (use (match_dup 0)) + (use (match_dup 3)) + (clobber (reg:DI 26))])] +{ + if (CONSTANT_P (operands[0])) + { + operands[2] = gen_rtx_REG (Pmode, 27); + operands[3] = GEN_INT (alpha_next_sequence_number++); + emit_insn (gen_movdi_er_high_g (operands[2], pic_offset_table_rtx, + operands[0], operands[3])); + } + else + { + operands[2] = operands[0]; + operands[0] = const0_rtx; + operands[3] = const0_rtx; + } +}) + +(define_peephole2 + [(parallel [(call (mem:DI (match_operand:DI 0 "call_operand")) + (match_operand 1)) + (use (reg:DI 29)) + (clobber (reg:DI 26))])] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF && reload_completed + && ! samegp_function_operand (operands[0], Pmode) + && ! (peep2_regno_dead_p (1, 29) + || find_reg_note (insn, REG_NORETURN, NULL_RTX))" + [(parallel [(call (mem:DI (match_dup 2)) + (match_dup 1)) + (set (match_dup 5) + (unspec:DI [(match_dup 5) (match_dup 3)] UNSPEC_LDGP1)) + (use (match_dup 0)) + (use (match_dup 4)) + (clobber (reg:DI 26))]) + (set (match_dup 5) + (unspec:DI [(match_dup 5) (match_dup 3)] UNSPEC_LDGP2))] +{ + if (CONSTANT_P (operands[0])) + { + operands[2] = gen_rtx_REG (Pmode, 27); + operands[4] = GEN_INT (alpha_next_sequence_number++); + emit_insn (gen_movdi_er_high_g (operands[2], pic_offset_table_rtx, + operands[0], operands[4])); + } + else + { + operands[2] = operands[0]; + operands[0] = const0_rtx; + operands[4] = const0_rtx; + } + operands[3] = GEN_INT (alpha_next_sequence_number++); + operands[5] = pic_offset_table_rtx; +}) + +(define_insn "*call_osf_2_er_nogp" + [(call (mem:DI (match_operand:DI 0 "register_operand" "c")) + (match_operand 1)) + (use (reg:DI 29)) + (use (match_operand 2)) + (use (match_operand 3 "const_int_operand")) + (clobber (reg:DI 26))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "jsr $26,(%0),%2%J3" + [(set_attr "type" "jsr")]) + +(define_insn "*call_osf_2_er" + [(call (mem:DI (match_operand:DI 0 "register_operand" "c")) + (match_operand 1)) + (set (reg:DI 29) + (unspec:DI [(reg:DI 29) (match_operand 4 "const_int_operand")] + UNSPEC_LDGP1)) + (use (match_operand 2)) + (use (match_operand 3 "const_int_operand")) + (clobber (reg:DI 26))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "jsr $26,(%0),%2%J3\;ldah $29,0($26)\t\t!gpdisp!%4" + [(set_attr "type" "jsr") + (set_attr "cannot_copy" "true") + (set_attr "length" "8")]) + +(define_insn "*call_osf_1_noreturn" + [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s")) + (match_operand 1)) + (use (reg:DI 29)) + (clobber (reg:DI 26))] + "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF + && find_reg_note (insn, REG_NORETURN, NULL_RTX)" + "@ + jsr $26,($27),0 + bsr $26,$%0..ng + jsr $26,%0" + [(set_attr "type" "jsr") + (set_attr "length" "*,*,8")]) + +(define_insn "*call_osf_1" + [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s")) + (match_operand 1)) + (use (reg:DI 29)) + (clobber (reg:DI 26))] + "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "@ + jsr $26,($27),0\;ldgp $29,0($26) + bsr $26,$%0..ng + jsr $26,%0\;ldgp $29,0($26)" + [(set_attr "type" "jsr") + (set_attr "length" "12,*,16")]) + +(define_insn "*sibcall_osf_1_er" + [(call (mem:DI (match_operand:DI 0 "symbolic_operand" "R,s")) + (match_operand 1)) + (unspec [(reg:DI 29)] UNSPEC_SIBCALL)] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "@ + br $31,%0\t\t!samegp + ldq $27,%0($29)\t\t!literal!%#\;jmp $31,($27),%0\t\t!lituse_jsr!%#" + [(set_attr "type" "jsr") + (set_attr "length" "*,8")]) + +;; Note that the DEC assembler expands "jmp foo" with $at, which +;; doesn't do what we want. +(define_insn "*sibcall_osf_1" + [(call (mem:DI (match_operand:DI 0 "symbolic_operand" "R,s")) + (match_operand 1)) + (unspec [(reg:DI 29)] UNSPEC_SIBCALL)] + "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "@ + br $31,$%0..ng + lda $27,%0\;jmp $31,($27),%0" + [(set_attr "type" "jsr") + (set_attr "length" "*,8")]) + +; GAS relies on the order and position of instructions output below in order +; to generate relocs for VMS link to potentially optimize the call. +; Please do not molest. +(define_insn "*call_vms_1" + [(call (mem:DI (match_operand:DI 0 "call_operand" "r,s")) + (match_operand 1)) + (use (match_operand:DI 2 "nonmemory_operand" "r,n")) + (use (reg:DI 25)) + (use (reg:DI 26)) + (clobber (reg:DI 27))] + "TARGET_ABI_OPEN_VMS" +{ + switch (which_alternative) + { + case 0: + return "mov %2,$27\;jsr $26,0\;ldq $27,0($29)"; + case 1: + operands [2] = alpha_use_linkage (operands [0], true, false); + operands [3] = alpha_use_linkage (operands [0], false, false); + return "ldq $26,%3\;ldq $27,%2\;jsr $26,%0\;ldq $27,0($29)"; + default: + gcc_unreachable (); + } +} + [(set_attr "type" "jsr") + (set_attr "length" "12,16")]) + +;; Call subroutine returning any type. + +(define_expand "untyped_call" + [(parallel [(call (match_operand 0) + (const_int 0)) + (match_operand 1) + (match_operand 2)])] + "" +{ + int i; + + emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx)); + + for (i = 0; i < XVECLEN (operands[2], 0); i++) + { + rtx set = XVECEXP (operands[2], 0, i); + emit_move_insn (SET_DEST (set), SET_SRC (set)); + } + + /* The optimizer does not know that the call sets the function value + registers we stored in the result block. We avoid problems by + claiming that all hard registers are used and clobbered at this + point. */ + emit_insn (gen_blockage ()); + + DONE; +}) + +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and +;; all of memory. This blocks insns from being moved across this point. + +(define_insn "blockage" + [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)] + "" + "" + [(set_attr "length" "0") + (set_attr "type" "none")]) + +(define_insn "jump" + [(set (pc) + (label_ref (match_operand 0)))] + "" + "br $31,%l0" + [(set_attr "type" "ibr")]) + +(define_expand "return" + [(return)] + "direct_return ()") + +(define_insn "*return_internal" + [(return)] + "reload_completed" + "ret $31,($26),1" + [(set_attr "type" "ibr")]) + +(define_insn "indirect_jump" + [(set (pc) (match_operand:DI 0 "register_operand" "r"))] + "" + "jmp $31,(%0),0" + [(set_attr "type" "ibr")]) + +(define_expand "tablejump" + [(parallel [(set (pc) + (match_operand 0 "register_operand")) + (use (label_ref:DI (match_operand 1)))])] + "" +{ + if (TARGET_ABI_OSF) + { + rtx dest = gen_reg_rtx (DImode); + emit_insn (gen_extendsidi2 (dest, operands[0])); + emit_insn (gen_adddi3 (dest, pic_offset_table_rtx, dest)); + operands[0] = dest; + } +}) + +(define_insn "*tablejump_internal" + [(set (pc) + (match_operand:DI 0 "register_operand" "r")) + (use (label_ref (match_operand 1)))] + "" + "jmp $31,(%0),0" + [(set_attr "type" "ibr")]) + +;; Cache flush. Used by alpha_trampoline_init. 0x86 is PAL_imb, but we don't +;; want to have to include pal.h in our .s file. +(define_insn "imb" + [(unspec_volatile [(const_int 0)] UNSPECV_IMB)] + "" + "call_pal 0x86" + [(set_attr "type" "callpal")]) + +(define_expand "clear_cache" + [(match_operand:DI 0) ; region start + (match_operand:DI 1)] ; region end + "" +{ + emit_insn (gen_imb ()); + DONE; +}) + +;; BUGCHK is documented common to OSF/1 and VMS PALcode. +(define_insn "trap" + [(trap_if (const_int 1) (const_int 0))] + "" + "call_pal 0x81" + [(set_attr "type" "callpal")]) + +;; For userland, we load the thread pointer from the TCB. +;; For the kernel, we load the per-cpu private value. + +(define_insn "get_thread_pointerdi" + [(set (match_operand:DI 0 "register_operand" "=v") + (unspec:DI [(const_int 0)] UNSPEC_TP))] + "TARGET_ABI_OSF" +{ + if (TARGET_TLS_KERNEL) + return "call_pal 0x32"; + else + return "call_pal 0x9e"; +} + [(set_attr "type" "callpal")]) + +;; For completeness, and possibly a __builtin function, here's how to +;; set the thread pointer. Since we don't describe enough of this +;; quantity for CSE, we have to use a volatile unspec, and then there's +;; not much point in creating an R16_REG register class. + +(define_expand "set_thread_pointerdi" + [(set (reg:DI 16) (match_operand:DI 0 "input_operand")) + (unspec_volatile [(reg:DI 16)] UNSPECV_SET_TP)] + "TARGET_ABI_OSF") + +(define_insn "*set_tp" + [(unspec_volatile [(reg:DI 16)] UNSPECV_SET_TP)] + "TARGET_ABI_OSF" +{ + if (TARGET_TLS_KERNEL) + return "call_pal 0x31"; + else + return "call_pal 0x9f"; +} + [(set_attr "type" "callpal")]) + +;; Special builtins for establishing and reverting VMS condition handlers. + +(define_expand "builtin_establish_vms_condition_handler" + [(set (reg:DI 0) (match_operand:DI 0 "register_operand")) + (use (match_operand:DI 1 "address_operand"))] + "TARGET_ABI_OPEN_VMS" +{ + alpha_expand_builtin_establish_vms_condition_handler (operands[0], + operands[1]); +}) + +(define_expand "builtin_revert_vms_condition_handler" + [(set (reg:DI 0) (match_operand:DI 0 "register_operand"))] + "TARGET_ABI_OPEN_VMS" + "alpha_expand_builtin_revert_vms_condition_handler (operands[0]);") + +;; Finally, we have the basic data motion insns. The byte and word insns +;; are done via define_expand. Start with the floating-point insns, since +;; they are simpler. + +(define_expand "movsf" + [(set (match_operand:SF 0 "nonimmediate_operand") + (match_operand:SF 1 "general_operand"))] + "" +{ + if (MEM_P (operands[0]) + && ! reg_or_0_operand (operands[1], SFmode)) + operands[1] = force_reg (SFmode, operands[1]); +}) + +(define_insn "*movsf" + [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,*r,*r,m,m,f,*r") + (match_operand:SF 1 "input_operand" "fG,m,*rG,m,fG,*r,*r,f"))] + "register_operand (operands[0], SFmode) + || reg_or_0_operand (operands[1], SFmode)" + "@ + cpys %R1,%R1,%0 + ld%, %0,%1 + bis $31,%r1,%0 + ldl %0,%1 + st%, %R1,%0 + stl %r1,%0 + itofs %1,%0 + ftois %1,%0" + [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist,itof,ftoi") + (set_attr "isa" "*,*,*,*,*,*,fix,fix")]) + +(define_expand "movdf" + [(set (match_operand:DF 0 "nonimmediate_operand") + (match_operand:DF 1 "general_operand"))] + "" +{ + if (MEM_P (operands[0]) + && ! reg_or_0_operand (operands[1], DFmode)) + operands[1] = force_reg (DFmode, operands[1]); +}) + +(define_insn "*movdf" + [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,*r,*r,m,m,f,*r") + (match_operand:DF 1 "input_operand" "fG,m,*rG,m,fG,*r,*r,f"))] + "register_operand (operands[0], DFmode) + || reg_or_0_operand (operands[1], DFmode)" + "@ + cpys %R1,%R1,%0 + ld%- %0,%1 + bis $31,%r1,%0 + ldq %0,%1 + st%- %R1,%0 + stq %r1,%0 + itoft %1,%0 + ftoit %1,%0" + [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist,itof,ftoi") + (set_attr "isa" "*,*,*,*,*,*,fix,fix")]) + +;; Subregs suck for register allocation. Pretend we can move TFmode +;; data between general registers until after reload. +;; ??? Is this still true now that we have the lower-subreg pass? + +(define_expand "movtf" + [(set (match_operand:TF 0 "nonimmediate_operand") + (match_operand:TF 1 "general_operand"))] + "" +{ + if (MEM_P (operands[0]) + && ! reg_or_0_operand (operands[1], TFmode)) + operands[1] = force_reg (TFmode, operands[1]); +}) + +(define_insn_and_split "*movtf" + [(set (match_operand:TF 0 "nonimmediate_operand" "=r,o") + (match_operand:TF 1 "input_operand" "roG,rG"))] + "register_operand (operands[0], TFmode) + || reg_or_0_operand (operands[1], TFmode)" + "#" + "reload_completed" + [(set (match_dup 0) (match_dup 2)) + (set (match_dup 1) (match_dup 3))] + "alpha_split_tmode_pair (operands, TFmode, true);") + +;; We do two major things here: handle mem->mem and construct long +;; constants. + +(define_expand "movsi" + [(set (match_operand:SI 0 "nonimmediate_operand") + (match_operand:SI 1 "general_operand"))] + "" +{ + if (alpha_expand_mov (SImode, operands)) + DONE; +}) + +(define_insn "*movsi" + [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,m,r") + (match_operand:SI 1 "input_operand" "rJ,K,L,n,m,rJ,s"))] + "register_operand (operands[0], SImode) + || reg_or_0_operand (operands[1], SImode)" + "@ + bis $31,%r1,%0 + lda %0,%1($31) + ldah %0,%h1($31) + # + ldl %0,%1 + stl %r1,%0 + lda %0,%1" + [(set_attr "type" "ilog,iadd,iadd,multi,ild,ist,ldsym") + (set_attr "isa" "*,*,*,*,*,*,vms")]) + +;; Split a load of a large constant into the appropriate two-insn +;; sequence. + +(define_split + [(set (match_operand:SI 0 "register_operand") + (match_operand:SI 1 "non_add_const_operand"))] + "" + [(const_int 0)] +{ + if (alpha_split_const_mov (SImode, operands)) + DONE; + else + FAIL; +}) + +(define_insn "*movdi_er_low_l" + [(set (match_operand:DI 0 "register_operand" "=r") + (lo_sum:DI (match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "local_symbolic_operand")))] + "TARGET_EXPLICIT_RELOCS" +{ + if (true_regnum (operands[1]) == 29) + return "lda %0,%2(%1)\t\t!gprel"; + else + return "lda %0,%2(%1)\t\t!gprellow"; +} + [(set_attr "usegp" "yes")]) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (match_operand:DI 1 "small_symbolic_operand"))] + "TARGET_EXPLICIT_RELOCS && reload_completed" + [(set (match_dup 0) + (lo_sum:DI (match_dup 2) (match_dup 1)))] + "operands[2] = pic_offset_table_rtx;") + +(define_split + [(set (match_operand:DI 0 "register_operand") + (match_operand:DI 1 "local_symbolic_operand"))] + "TARGET_EXPLICIT_RELOCS && reload_completed" + [(set (match_dup 0) + (plus:DI (match_dup 2) (high:DI (match_dup 1)))) + (set (match_dup 0) + (lo_sum:DI (match_dup 0) (match_dup 1)))] + "operands[2] = pic_offset_table_rtx;") + +(define_split + [(match_operand 0 "some_small_symbolic_operand")] + "" + [(match_dup 0)] + "operands[0] = split_small_symbolic_operand (operands[0]);") + +;; Accepts any symbolic, not just global, since function calls that +;; don't go via bsr still use !literal in hopes of linker relaxation. +(define_insn "movdi_er_high_g" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "symbolic_operand") + (match_operand 3 "const_int_operand")] + UNSPEC_LITERAL))] + "TARGET_EXPLICIT_RELOCS" +{ + if (INTVAL (operands[3]) == 0) + return "ldq %0,%2(%1)\t\t!literal"; + else + return "ldq %0,%2(%1)\t\t!literal!%3"; +} + [(set_attr "type" "ldsym")]) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (match_operand:DI 1 "global_symbolic_operand"))] + "TARGET_EXPLICIT_RELOCS && reload_completed" + [(set (match_dup 0) + (unspec:DI [(match_dup 2) + (match_dup 1) + (const_int 0)] UNSPEC_LITERAL))] + "operands[2] = pic_offset_table_rtx;") + +(define_insn "movdi_er_tlsgd" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "symbolic_operand") + (match_operand 3 "const_int_operand")] + UNSPEC_TLSGD))] + "HAVE_AS_TLS" +{ + if (INTVAL (operands[3]) == 0) + return "lda %0,%2(%1)\t\t!tlsgd"; + else + return "lda %0,%2(%1)\t\t!tlsgd!%3"; +}) + +(define_insn "movdi_er_tlsldm" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand 2 "const_int_operand")] + UNSPEC_TLSLDM))] + "HAVE_AS_TLS" +{ + if (INTVAL (operands[2]) == 0) + return "lda %0,%&(%1)\t\t!tlsldm"; + else + return "lda %0,%&(%1)\t\t!tlsldm!%2"; +}) + +(define_insn "*movdi_er_gotdtp" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "symbolic_operand")] + UNSPEC_DTPREL))] + "HAVE_AS_TLS" + "ldq %0,%2(%1)\t\t!gotdtprel" + [(set_attr "type" "ild") + (set_attr "usegp" "yes")]) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (match_operand:DI 1 "gotdtp_symbolic_operand"))] + "HAVE_AS_TLS && reload_completed" + [(set (match_dup 0) + (unspec:DI [(match_dup 2) + (match_dup 1)] UNSPEC_DTPREL))] +{ + operands[1] = XVECEXP (XEXP (operands[1], 0), 0, 0); + operands[2] = pic_offset_table_rtx; +}) + +(define_insn "*movdi_er_gottp" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "symbolic_operand")] + UNSPEC_TPREL))] + "HAVE_AS_TLS" + "ldq %0,%2(%1)\t\t!gottprel" + [(set_attr "type" "ild") + (set_attr "usegp" "yes")]) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (match_operand:DI 1 "gottp_symbolic_operand"))] + "HAVE_AS_TLS && reload_completed" + [(set (match_dup 0) + (unspec:DI [(match_dup 2) + (match_dup 1)] UNSPEC_TPREL))] +{ + operands[1] = XVECEXP (XEXP (operands[1], 0), 0, 0); + operands[2] = pic_offset_table_rtx; +}) + +(define_insn "*movdi" + [(set (match_operand:DI 0 "nonimmediate_operand" + "=r,r,r,r,r,r,r,r, m, *f,*f, Q, r,*f") + (match_operand:DI 1 "input_operand" + "rJ,K,L,T,s,n,s,m,rJ,*fJ, Q,*f,*f, r"))] + "register_operand (operands[0], DImode) + || reg_or_0_operand (operands[1], DImode)" + "@ + mov %r1,%0 + lda %0,%1($31) + ldah %0,%h1($31) + # + # + # + lda %0,%1 + ldq%A1 %0,%1 + stq%A0 %r1,%0 + fmov %R1,%0 + ldt %0,%1 + stt %R1,%0 + ftoit %1,%0 + itoft %1,%0" + [(set_attr "type" "ilog,iadd,iadd,iadd,ldsym,multi,ldsym,ild,ist,fcpys,fld,fst,ftoi,itof") + (set_attr "isa" "*,*,*,er,er,*,ner,*,*,*,*,*,fix,fix") + (set_attr "usegp" "*,*,*,yes,*,*,*,*,*,*,*,*,*,*")]) + +;; VMS needs to set up "vms_base_regno" for unwinding. This move +;; often appears dead to the life analysis code, at which point we +;; die for emitting dead prologue instructions. Force this live. + +(define_insn "force_movdi" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r")] + UNSPECV_FORCE_MOV))] + "" + "mov %1,%0" + [(set_attr "type" "ilog")]) + +;; We do three major things here: handle mem->mem, put 64-bit constants in +;; memory, and construct long 32-bit constants. + +(define_expand "movdi" + [(set (match_operand:DI 0 "nonimmediate_operand") + (match_operand:DI 1 "general_operand"))] + "" +{ + if (alpha_expand_mov (DImode, operands)) + DONE; +}) + +;; Split a load of a large constant into the appropriate two-insn +;; sequence. + +(define_split + [(set (match_operand:DI 0 "register_operand") + (match_operand:DI 1 "non_add_const_operand"))] + "" + [(const_int 0)] +{ + if (alpha_split_const_mov (DImode, operands)) + DONE; + else + FAIL; +}) + +;; We need to prevent reload from splitting TImode moves, because it +;; might decide to overwrite a pointer with the value it points to. +;; In that case we have to do the loads in the appropriate order so +;; that the pointer is not destroyed too early. + +(define_insn_and_split "*movti_internal" + [(set (match_operand:TI 0 "nonimmediate_operand" "=r,o") + (match_operand:TI 1 "input_operand" "roJ,rJ"))] + "(register_operand (operands[0], TImode) + /* Prevent rematerialization of constants. */ + && ! CONSTANT_P (operands[1])) + || reg_or_0_operand (operands[1], TImode)" + "#" + "reload_completed" + [(set (match_dup 0) (match_dup 2)) + (set (match_dup 1) (match_dup 3))] + "alpha_split_tmode_pair (operands, TImode, true);") + +(define_expand "movti" + [(set (match_operand:TI 0 "nonimmediate_operand") + (match_operand:TI 1 "general_operand"))] + "" +{ + if (MEM_P (operands[0]) + && ! reg_or_0_operand (operands[1], TImode)) + operands[1] = force_reg (TImode, operands[1]); + + if (operands[1] == const0_rtx) + ; + /* We must put 64-bit constants in memory. We could keep the + 32-bit constants in TImode and rely on the splitter, but + this doesn't seem to be worth the pain. */ + else if (CONST_INT_P (operands[1]) + || GET_CODE (operands[1]) == CONST_DOUBLE) + { + rtx in[2], out[2], target; + + gcc_assert (can_create_pseudo_p ()); + + split_double (operands[1], &in[0], &in[1]); + + if (in[0] == const0_rtx) + out[0] = const0_rtx; + else + { + out[0] = gen_reg_rtx (DImode); + emit_insn (gen_movdi (out[0], in[0])); + } + + if (in[1] == const0_rtx) + out[1] = const0_rtx; + else + { + out[1] = gen_reg_rtx (DImode); + emit_insn (gen_movdi (out[1], in[1])); + } + + if (!REG_P (operands[0])) + target = gen_reg_rtx (TImode); + else + target = operands[0]; + + emit_insn (gen_movdi (operand_subword (target, 0, 0, TImode), out[0])); + emit_insn (gen_movdi (operand_subword (target, 1, 0, TImode), out[1])); + + if (target != operands[0]) + emit_insn (gen_rtx_SET (VOIDmode, operands[0], target)); + + DONE; + } +}) + +;; These are the partial-word cases. +;; +;; First we have the code to load an aligned word. Operand 0 is the register +;; in which to place the result. It's mode is QImode or HImode. Operand 1 +;; is an SImode MEM at the low-order byte of the proper word. Operand 2 is the +;; number of bits within the word that the value is. Operand 3 is an SImode +;; scratch register. If operand 0 is a hard register, operand 3 may be the +;; same register. It is allowed to conflict with operand 1 as well. + +(define_expand "aligned_loadqi" + [(set (match_operand:SI 3 "register_operand") + (match_operand:SI 1 "memory_operand")) + (set (match_operand:DI 0 "register_operand") + (zero_extract:DI (subreg:DI (match_dup 3) 0) + (const_int 8) + (match_operand:DI 2 "const_int_operand")))]) + +(define_expand "aligned_loadhi" + [(set (match_operand:SI 3 "register_operand") + (match_operand:SI 1 "memory_operand")) + (set (match_operand:DI 0 "register_operand") + (zero_extract:DI (subreg:DI (match_dup 3) 0) + (const_int 16) + (match_operand:DI 2 "const_int_operand")))]) + +;; Similar for unaligned loads, where we use the sequence from the +;; Alpha Architecture manual. We have to distinguish between little-endian +;; and big-endian systems as the sequences are different. +;; +;; Operand 1 is the address. Operands 2 and 3 are temporaries, where +;; operand 3 can overlap the input and output registers. + +(define_expand "unaligned_loadqi" + [(set (match_operand:DI 2 "register_operand") + (mem:DI (and:DI (match_operand:DI 1 "address_operand") + (const_int -8)))) + (set (match_operand:DI 3 "register_operand") + (match_dup 1)) + (set (match_operand:DI 0 "register_operand") + (zero_extract:DI (match_dup 2) + (const_int 8) + (ashift:DI (match_dup 3) (const_int 3))))]) + +(define_expand "unaligned_loadhi" + [(set (match_operand:DI 2 "register_operand") + (mem:DI (and:DI (match_operand:DI 1 "address_operand") + (const_int -8)))) + (set (match_operand:DI 3 "register_operand") + (match_dup 1)) + (set (match_operand:DI 0 "register_operand") + (zero_extract:DI (match_dup 2) + (const_int 16) + (ashift:DI (match_dup 3) (const_int 3))))]) + +;; Storing an aligned byte or word requires two temporaries. Operand 0 is the +;; aligned SImode MEM. Operand 1 is the register containing the +;; byte or word to store. Operand 2 is the number of bits within the word that +;; the value should be placed. Operands 3 and 4 are SImode temporaries. + +(define_expand "aligned_store" + [(set (match_operand:SI 3 "register_operand") + (match_operand:SI 0 "memory_operand")) + (set (subreg:DI (match_dup 3) 0) + (and:DI (subreg:DI (match_dup 3) 0) (match_dup 5))) + (set (subreg:DI (match_operand:SI 4 "register_operand") 0) + (ashift:DI (zero_extend:DI (match_operand 1 "register_operand")) + (match_operand:DI 2 "const_int_operand"))) + (set (subreg:DI (match_dup 4) 0) + (ior:DI (subreg:DI (match_dup 4) 0) (subreg:DI (match_dup 3) 0))) + (set (match_dup 0) (match_dup 4))] + "" +{ + operands[5] = GEN_INT (~ (GET_MODE_MASK (GET_MODE (operands[1])) + << INTVAL (operands[2]))); +}) + +;; For the unaligned byte and halfword cases, we use code similar to that +;; in the ;; Architecture book, but reordered to lower the number of registers +;; required. Operand 0 is the address. Operand 1 is the data to store. +;; Operands 2, 3, and 4 are DImode temporaries, where operands 2 and 4 may +;; be the same temporary, if desired. If the address is in a register, +;; operand 2 can be that register. + +(define_expand "unaligned_store<mode>" + [(set (match_operand:DI 3 "register_operand") + (mem:DI (and:DI (match_operand:DI 0 "address_operand") + (const_int -8)))) + (set (match_operand:DI 2 "register_operand") + (match_dup 0)) + (set (match_dup 3) + (and:DI (not:DI (ashift:DI (match_dup 5) + (ashift:DI (match_dup 2) (const_int 3)))) + (match_dup 3))) + (set (match_operand:DI 4 "register_operand") + (ashift:DI (zero_extend:DI + (match_operand:I12MODE 1 "register_operand")) + (ashift:DI (match_dup 2) (const_int 3)))) + (set (match_dup 4) (ior:DI (match_dup 4) (match_dup 3))) + (set (mem:DI (and:DI (match_dup 0) (const_int -8))) + (match_dup 4))] + "" + "operands[5] = GEN_INT (GET_MODE_MASK (<MODE>mode));") + +;; Here are the define_expand's for QI and HI moves that use the above +;; patterns. We have the normal sets, plus the ones that need scratch +;; registers for reload. + +(define_expand "mov<mode>" + [(set (match_operand:I12MODE 0 "nonimmediate_operand") + (match_operand:I12MODE 1 "general_operand"))] + "" +{ + if (TARGET_BWX + ? alpha_expand_mov (<MODE>mode, operands) + : alpha_expand_mov_nobwx (<MODE>mode, operands)) + DONE; +}) + +(define_insn "*movqi" + [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m") + (match_operand:QI 1 "input_operand" "rJ,n,m,rJ"))] + "register_operand (operands[0], QImode) + || reg_or_0_operand (operands[1], QImode)" + "@ + bis $31,%r1,%0 + lda %0,%L1($31) + ldbu %0,%1 + stb %r1,%0" + [(set_attr "type" "ilog,iadd,ild,ist") + (set_attr "isa" "*,*,bwx,bwx")]) + +(define_insn "*movhi" + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m") + (match_operand:HI 1 "input_operand" "rJ,n,m,rJ"))] + "register_operand (operands[0], HImode) + || reg_or_0_operand (operands[1], HImode)" + "@ + bis $31,%r1,%0 + lda %0,%L1($31) + ldwu %0,%1 + stw %r1,%0" + [(set_attr "type" "ilog,iadd,ild,ist") + (set_attr "isa" "*,*,bwx,bwx")]) + +;; We need to hook into the extra support that we have for HImode +;; reloads when BWX insns are not available. +(define_expand "movcqi" + [(set (match_operand:CQI 0 "nonimmediate_operand") + (match_operand:CQI 1 "general_operand"))] + "!TARGET_BWX" +{ + if (GET_CODE (operands[0]) == CONCAT || GET_CODE (operands[1]) == CONCAT) + ; + else if (!any_memory_operand (operands[0], CQImode)) + { + if (!any_memory_operand (operands[1], CQImode)) + { + emit_move_insn (gen_lowpart (HImode, operands[0]), + gen_lowpart (HImode, operands[1])); + DONE; + } + if (aligned_memory_operand (operands[1], CQImode)) + { + bool done; + do_aligned1: + operands[1] = gen_lowpart (HImode, operands[1]); + do_aligned2: + operands[0] = gen_lowpart (HImode, operands[0]); + done = alpha_expand_mov_nobwx (HImode, operands); + gcc_assert (done); + DONE; + } + } + else if (aligned_memory_operand (operands[0], CQImode)) + { + if (MEM_P (operands[1])) + { + rtx x = gen_reg_rtx (HImode); + emit_move_insn (gen_lowpart (CQImode, x), operands[1]); + operands[1] = x; + goto do_aligned2; + } + goto do_aligned1; + } + + gcc_assert (!reload_in_progress); + emit_move_complex_parts (operands[0], operands[1]); + DONE; +}) + +;; Here are the versions for reload. +;; +;; The aligned input case is recognized early in alpha_secondary_reload +;; in order to avoid allocating an unnecessary scratch register. +;; +;; Note that in the unaligned cases we know that the operand must not be +;; a pseudo-register because stack slots are always aligned references. + +(define_expand "reload_in<mode>" + [(parallel [(match_operand:RELOAD12 0 "register_operand" "=r") + (match_operand:RELOAD12 1 "any_memory_operand" "m") + (match_operand:TI 2 "register_operand" "=&r")])] + "!TARGET_BWX" +{ + rtx scratch, seq, addr; + unsigned regno = REGNO (operands[2]); + + /* It is possible that one of the registers we got for operands[2] + might coincide with that of operands[0] (which is why we made + it TImode). Pick the other one to use as our scratch. */ + if (regno == REGNO (operands[0])) + regno++; + scratch = gen_rtx_REG (DImode, regno); + + addr = get_unaligned_address (operands[1]); + operands[0] = gen_rtx_REG (DImode, REGNO (operands[0])); + seq = gen_unaligned_load<reloadmode> (operands[0], addr, + scratch, operands[0]); + alpha_set_memflags (seq, operands[1]); + + emit_insn (seq); + DONE; +}) + +(define_expand "reload_out<mode>" + [(parallel [(match_operand:RELOAD12 0 "any_memory_operand" "=m") + (match_operand:RELOAD12 1 "register_operand" "r") + (match_operand:TI 2 "register_operand" "=&r")])] + "!TARGET_BWX" +{ + unsigned regno = REGNO (operands[2]); + + if (<MODE>mode == CQImode) + { + operands[0] = gen_lowpart (HImode, operands[0]); + operands[1] = gen_lowpart (HImode, operands[1]); + } + + if (aligned_memory_operand (operands[0], <MODE>mode)) + { + emit_insn (gen_reload_out<reloadmode>_aligned + (operands[0], operands[1], + gen_rtx_REG (SImode, regno), + gen_rtx_REG (SImode, regno + 1))); + } + else + { + rtx addr = get_unaligned_address (operands[0]); + rtx scratch1 = gen_rtx_REG (DImode, regno); + rtx scratch2 = gen_rtx_REG (DImode, regno + 1); + rtx scratch3 = scratch1; + rtx seq; + + if (REG_P (addr)) + scratch1 = addr; + + seq = gen_unaligned_store<reloadmode> (addr, operands[1], scratch1, + scratch2, scratch3); + alpha_set_memflags (seq, operands[0]); + emit_insn (seq); + } + DONE; +}) + +;; Helpers for the above. The way reload is structured, we can't +;; always get a proper address for a stack slot during reload_foo +;; expansion, so we must delay our address manipulations until after. + +(define_insn_and_split "reload_in<mode>_aligned" + [(set (match_operand:I12MODE 0 "register_operand" "=r") + (match_operand:I12MODE 1 "memory_operand" "m"))] + "!TARGET_BWX && (reload_in_progress || reload_completed)" + "#" + "!TARGET_BWX && reload_completed" + [(const_int 0)] +{ + rtx aligned_mem, bitnum; + get_aligned_mem (operands[1], &aligned_mem, &bitnum); + emit_insn (gen_aligned_load<reloadmode> + (gen_lowpart (DImode, operands[0]), aligned_mem, bitnum, + gen_rtx_REG (SImode, REGNO (operands[0])))); + DONE; +}) + +(define_insn_and_split "reload_out<mode>_aligned" + [(set (match_operand:I12MODE 0 "memory_operand" "=m") + (match_operand:I12MODE 1 "register_operand" "r")) + (clobber (match_operand:SI 2 "register_operand" "=r")) + (clobber (match_operand:SI 3 "register_operand" "=r"))] + "!TARGET_BWX && (reload_in_progress || reload_completed)" + "#" + "!TARGET_BWX && reload_completed" + [(const_int 0)] +{ + rtx aligned_mem, bitnum; + get_aligned_mem (operands[0], &aligned_mem, &bitnum); + emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum, + operands[2], operands[3])); + DONE; +}) + +;; Vector operations + +(define_mode_iterator VEC [V8QI V4HI V2SI]) +(define_mode_iterator VEC12 [V8QI V4HI]) + +(define_expand "mov<mode>" + [(set (match_operand:VEC 0 "nonimmediate_operand") + (match_operand:VEC 1 "general_operand"))] + "" +{ + if (alpha_expand_mov (<MODE>mode, operands)) + DONE; +}) + +(define_split + [(set (match_operand:VEC 0 "register_operand") + (match_operand:VEC 1 "non_zero_const_operand"))] + "" + [(const_int 0)] +{ + if (alpha_split_const_mov (<MODE>mode, operands)) + DONE; + else + FAIL; +}) + + +(define_expand "movmisalign<mode>" + [(set (match_operand:VEC 0 "nonimmediate_operand") + (match_operand:VEC 1 "general_operand"))] + "" +{ + alpha_expand_movmisalign (<MODE>mode, operands); + DONE; +}) + +(define_insn "*mov<mode>_fix" + [(set (match_operand:VEC 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,m,r,*f") + (match_operand:VEC 1 "input_operand" "rW,i,m,rW,*fW,m,*f,*f,r"))] + "register_operand (operands[0], <MODE>mode) + || reg_or_0_operand (operands[1], <MODE>mode)" + "@ + bis $31,%r1,%0 + # + ldq %0,%1 + stq %r1,%0 + cpys %R1,%R1,%0 + ldt %0,%1 + stt %R1,%0 + ftoit %1,%0 + itoft %1,%0" + [(set_attr "type" "ilog,multi,ild,ist,fcpys,fld,fst,ftoi,itof") + (set_attr "isa" "*,*,*,*,*,*,*,fix,fix")]) + +(define_insn "<code><mode>3" + [(set (match_operand:VEC12 0 "register_operand" "=r") + (any_maxmin:VEC12 + (match_operand:VEC12 1 "reg_or_0_operand" "rW") + (match_operand:VEC12 2 "reg_or_0_operand" "rW")))] + "TARGET_MAX" + "<maxmin><modesuffix> %r1,%r2,%0" + [(set_attr "type" "mvi")]) + +(define_insn "one_cmpl<mode>2" + [(set (match_operand:VEC 0 "register_operand" "=r") + (not:VEC (match_operand:VEC 1 "register_operand" "r")))] + "" + "ornot $31,%1,%0" + [(set_attr "type" "ilog")]) + +(define_insn "and<mode>3" + [(set (match_operand:VEC 0 "register_operand" "=r") + (and:VEC (match_operand:VEC 1 "register_operand" "r") + (match_operand:VEC 2 "register_operand" "r")))] + "" + "and %1,%2,%0" + [(set_attr "type" "ilog")]) + +(define_insn "*andnot<mode>3" + [(set (match_operand:VEC 0 "register_operand" "=r") + (and:VEC (not:VEC (match_operand:VEC 1 "register_operand" "r")) + (match_operand:VEC 2 "register_operand" "r")))] + "" + "bic %2,%1,%0" + [(set_attr "type" "ilog")]) + +(define_insn "ior<mode>3" + [(set (match_operand:VEC 0 "register_operand" "=r") + (ior:VEC (match_operand:VEC 1 "register_operand" "r") + (match_operand:VEC 2 "register_operand" "r")))] + "" + "bis %1,%2,%0" + [(set_attr "type" "ilog")]) + +(define_insn "*iornot<mode>3" + [(set (match_operand:VEC 0 "register_operand" "=r") + (ior:VEC (not:DI (match_operand:VEC 1 "register_operand" "r")) + (match_operand:VEC 2 "register_operand" "r")))] + "" + "ornot %2,%1,%0" + [(set_attr "type" "ilog")]) + +(define_insn "xor<mode>3" + [(set (match_operand:VEC 0 "register_operand" "=r") + (xor:VEC (match_operand:VEC 1 "register_operand" "r") + (match_operand:VEC 2 "register_operand" "r")))] + "" + "xor %1,%2,%0" + [(set_attr "type" "ilog")]) + +(define_insn "*xornot<mode>3" + [(set (match_operand:VEC 0 "register_operand" "=r") + (not:VEC (xor:VEC (match_operand:VEC 1 "register_operand" "r") + (match_operand:VEC 2 "register_operand" "r"))))] + "" + "eqv %1,%2,%0" + [(set_attr "type" "ilog")]) + +(define_expand "vec_shl_<mode>" + [(set (match_operand:VEC 0 "register_operand") + (ashift:DI (match_operand:VEC 1 "register_operand") + (match_operand:DI 2 "reg_or_6bit_operand")))] + "" +{ + operands[0] = gen_lowpart (DImode, operands[0]); + operands[1] = gen_lowpart (DImode, operands[1]); +}) + +(define_expand "vec_shr_<mode>" + [(set (match_operand:VEC 0 "register_operand") + (lshiftrt:DI (match_operand:VEC 1 "register_operand") + (match_operand:DI 2 "reg_or_6bit_operand")))] + "" +{ + operands[0] = gen_lowpart (DImode, operands[0]); + operands[1] = gen_lowpart (DImode, operands[1]); +}) + +;; Bit field extract patterns which use ext[wlq][lh] + +(define_expand "extvmisaligndi" + [(set (match_operand:DI 0 "register_operand") + (sign_extract:DI (match_operand:BLK 1 "memory_operand") + (match_operand:DI 2 "const_int_operand") + (match_operand:DI 3 "const_int_operand")))] + "" +{ + /* We can do 16, 32 and 64 bit fields, if aligned on byte boundaries. */ + if (INTVAL (operands[3]) % 8 != 0 + || (INTVAL (operands[2]) != 16 + && INTVAL (operands[2]) != 32 + && INTVAL (operands[2]) != 64)) + FAIL; + + alpha_expand_unaligned_load (operands[0], operands[1], + INTVAL (operands[2]) / 8, + INTVAL (operands[3]) / 8, 1); + DONE; +}) + +(define_expand "extzvdi" + [(set (match_operand:DI 0 "register_operand") + (zero_extract:DI (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "const_int_operand") + (match_operand:DI 3 "const_int_operand")))] + "" +{ + /* We can do 8, 16, 32 and 64 bit fields, if aligned on byte boundaries. */ + if (INTVAL (operands[3]) % 8 != 0 + || (INTVAL (operands[2]) != 8 + && INTVAL (operands[2]) != 16 + && INTVAL (operands[2]) != 32 + && INTVAL (operands[2]) != 64)) + FAIL; +}) + +(define_expand "extzvmisaligndi" + [(set (match_operand:DI 0 "register_operand") + (zero_extract:DI (match_operand:BLK 1 "memory_operand") + (match_operand:DI 2 "const_int_operand") + (match_operand:DI 3 "const_int_operand")))] + "" +{ + /* We can do 16, 32 and 64 bit fields, if aligned on byte boundaries. + We fail 8-bit fields, falling back on a simple byte load. */ + if (INTVAL (operands[3]) % 8 != 0 + || (INTVAL (operands[2]) != 16 + && INTVAL (operands[2]) != 32 + && INTVAL (operands[2]) != 64)) + FAIL; + + alpha_expand_unaligned_load (operands[0], operands[1], + INTVAL (operands[2]) / 8, + INTVAL (operands[3]) / 8, 0); + DONE; +}) + +(define_expand "insvmisaligndi" + [(set (zero_extract:DI (match_operand:BLK 0 "memory_operand") + (match_operand:DI 1 "const_int_operand") + (match_operand:DI 2 "const_int_operand")) + (match_operand:DI 3 "register_operand"))] + "" +{ + /* We can do 16, 32 and 64 bit fields, if aligned on byte boundaries. */ + if (INTVAL (operands[2]) % 8 != 0 + || (INTVAL (operands[1]) != 16 + && INTVAL (operands[1]) != 32 + && INTVAL (operands[1]) != 64)) + FAIL; + + alpha_expand_unaligned_store (operands[0], operands[3], + INTVAL (operands[1]) / 8, + INTVAL (operands[2]) / 8); + DONE; +}) + +;; Block move/clear, see alpha.c for more details. +;; Argument 0 is the destination +;; Argument 1 is the source +;; Argument 2 is the length +;; Argument 3 is the alignment + +(define_expand "movmemqi" + [(parallel [(set (match_operand:BLK 0 "memory_operand") + (match_operand:BLK 1 "memory_operand")) + (use (match_operand:DI 2 "immediate_operand")) + (use (match_operand:DI 3 "immediate_operand"))])] + "" +{ + if (alpha_expand_block_move (operands)) + DONE; + else + FAIL; +}) + +(define_expand "movmemdi" + [(parallel [(set (match_operand:BLK 0 "memory_operand") + (match_operand:BLK 1 "memory_operand")) + (use (match_operand:DI 2 "immediate_operand")) + (use (match_operand:DI 3 "immediate_operand")) + (use (match_dup 4)) + (clobber (reg:DI 25)) + (clobber (reg:DI 16)) + (clobber (reg:DI 17)) + (clobber (reg:DI 18)) + (clobber (reg:DI 19)) + (clobber (reg:DI 20)) + (clobber (reg:DI 26)) + (clobber (reg:DI 27))])] + "TARGET_ABI_OPEN_VMS" + "operands[4] = gen_rtx_SYMBOL_REF (Pmode, \"OTS$MOVE\");") + +(define_insn "*movmemdi_1" + [(set (match_operand:BLK 0 "memory_operand" "=m,=m") + (match_operand:BLK 1 "memory_operand" "m,m")) + (use (match_operand:DI 2 "nonmemory_operand" "r,i")) + (use (match_operand:DI 3 "immediate_operand")) + (use (match_operand:DI 4 "call_operand" "i,i")) + (clobber (reg:DI 25)) + (clobber (reg:DI 16)) + (clobber (reg:DI 17)) + (clobber (reg:DI 18)) + (clobber (reg:DI 19)) + (clobber (reg:DI 20)) + (clobber (reg:DI 26)) + (clobber (reg:DI 27))] + "TARGET_ABI_OPEN_VMS" +{ + operands [5] = alpha_use_linkage (operands [4], false, true); + switch (which_alternative) + { + case 0: + return "lda $16,%0\;bis $31,%2,$17\;lda $18,%1\;ldq $26,%5\;lda $25,3($31)\;jsr $26,%4\;ldq $27,0($29)"; + case 1: + return "lda $16,%0\;lda $17,%2($31)\;lda $18,%1\;ldq $26,%5\;lda $25,3($31)\;jsr $26,%4\;ldq $27,0($29)"; + default: + gcc_unreachable (); + } +} + [(set_attr "type" "multi") + (set_attr "length" "28")]) + +(define_expand "setmemqi" + [(parallel [(set (match_operand:BLK 0 "memory_operand") + (match_operand 2 "const_int_operand")) + (use (match_operand:DI 1 "immediate_operand")) + (use (match_operand:DI 3 "immediate_operand"))])] + "" +{ + /* If value to set is not zero, use the library routine. */ + if (operands[2] != const0_rtx) + FAIL; + + if (alpha_expand_block_clear (operands)) + DONE; + else + FAIL; +}) + +(define_expand "setmemdi" + [(parallel [(set (match_operand:BLK 0 "memory_operand") + (match_operand 2 "const_int_operand")) + (use (match_operand:DI 1 "immediate_operand")) + (use (match_operand:DI 3 "immediate_operand")) + (use (match_dup 4)) + (clobber (reg:DI 25)) + (clobber (reg:DI 16)) + (clobber (reg:DI 17)) + (clobber (reg:DI 26)) + (clobber (reg:DI 27))])] + "TARGET_ABI_OPEN_VMS" +{ + /* If value to set is not zero, use the library routine. */ + if (operands[2] != const0_rtx) + FAIL; + + operands[4] = gen_rtx_SYMBOL_REF (Pmode, "OTS$ZERO"); +}) + +(define_insn "*clrmemdi_1" + [(set (match_operand:BLK 0 "memory_operand" "=m,=m") + (const_int 0)) + (use (match_operand:DI 1 "nonmemory_operand" "r,i")) + (use (match_operand:DI 2 "immediate_operand")) + (use (match_operand:DI 3 "call_operand" "i,i")) + (clobber (reg:DI 25)) + (clobber (reg:DI 16)) + (clobber (reg:DI 17)) + (clobber (reg:DI 26)) + (clobber (reg:DI 27))] + "TARGET_ABI_OPEN_VMS" +{ + operands [4] = alpha_use_linkage (operands [3], false, true); + switch (which_alternative) + { + case 0: + return "lda $16,%0\;bis $31,%1,$17\;ldq $26,%4\;lda $25,2($31)\;jsr $26,%3\;ldq $27,0($29)"; + case 1: + return "lda $16,%0\;lda $17,%1($31)\;ldq $26,%4\;lda $25,2($31)\;jsr $26,%3\;ldq $27,0($29)"; + default: + gcc_unreachable (); + } +} + [(set_attr "type" "multi") + (set_attr "length" "24")]) + + +;; Subroutine of stack space allocation. Perform a stack probe. +(define_expand "probe_stack" + [(set (match_dup 1) (match_operand:DI 0 "const_int_operand"))] + "" +{ + operands[1] = gen_rtx_MEM (DImode, plus_constant (Pmode, stack_pointer_rtx, + INTVAL (operands[0]))); + MEM_VOLATILE_P (operands[1]) = 1; + + operands[0] = const0_rtx; +}) + +;; This is how we allocate stack space. If we are allocating a +;; constant amount of space and we know it is less than 4096 +;; bytes, we need do nothing. +;; +;; If it is more than 4096 bytes, we need to probe the stack +;; periodically. +(define_expand "allocate_stack" + [(set (reg:DI 30) + (plus:DI (reg:DI 30) + (match_operand:DI 1 "reg_or_cint_operand"))) + (set (match_operand:DI 0 "register_operand" "=r") + (match_dup 2))] + "" +{ + if (CONST_INT_P (operands[1]) + && INTVAL (operands[1]) < 32768) + { + if (INTVAL (operands[1]) >= 4096) + { + /* We do this the same way as in the prologue and generate explicit + probes. Then we update the stack by the constant. */ + + int probed = 4096; + + emit_insn (gen_probe_stack (GEN_INT (- probed))); + while (probed + 8192 < INTVAL (operands[1])) + emit_insn (gen_probe_stack (GEN_INT (- (probed += 8192)))); + + if (probed + 4096 < INTVAL (operands[1])) + emit_insn (gen_probe_stack (GEN_INT (- INTVAL(operands[1])))); + } + + operands[1] = GEN_INT (- INTVAL (operands[1])); + operands[2] = virtual_stack_dynamic_rtx; + } + else + { + rtx out_label = 0; + rtx loop_label = gen_label_rtx (); + rtx want = gen_reg_rtx (Pmode); + rtx tmp = gen_reg_rtx (Pmode); + rtx memref, test; + + emit_insn (gen_subdi3 (want, stack_pointer_rtx, + force_reg (Pmode, operands[1]))); + + if (!CONST_INT_P (operands[1])) + { + rtx limit = GEN_INT (4096); + out_label = gen_label_rtx (); + test = gen_rtx_LTU (VOIDmode, operands[1], limit); + emit_jump_insn + (gen_cbranchdi4 (test, operands[1], limit, out_label)); + } + + emit_insn (gen_adddi3 (tmp, stack_pointer_rtx, GEN_INT (-4096))); + emit_label (loop_label); + memref = gen_rtx_MEM (DImode, tmp); + MEM_VOLATILE_P (memref) = 1; + emit_move_insn (memref, const0_rtx); + emit_insn (gen_adddi3 (tmp, tmp, GEN_INT(-8192))); + test = gen_rtx_GTU (VOIDmode, tmp, want); + emit_jump_insn (gen_cbranchdi4 (test, tmp, want, loop_label)); + + memref = gen_rtx_MEM (DImode, want); + MEM_VOLATILE_P (memref) = 1; + emit_move_insn (memref, const0_rtx); + + if (out_label) + emit_label (out_label); + + emit_move_insn (stack_pointer_rtx, want); + emit_move_insn (operands[0], virtual_stack_dynamic_rtx); + DONE; + } +}) + +;; This is used by alpha_expand_prolog to do the same thing as above, +;; except we cannot at that time generate new basic blocks, so we hide +;; the loop in this one insn. + +(define_insn "prologue_stack_probe_loop" + [(unspec_volatile [(match_operand:DI 0 "register_operand" "r") + (match_operand:DI 1 "register_operand" "r")] + UNSPECV_PSPL)] + "" +{ + operands[2] = gen_label_rtx (); + (*targetm.asm_out.internal_label) (asm_out_file, "L", + CODE_LABEL_NUMBER (operands[2])); + + return "stq $31,-8192(%1)\;subq %0,1,%0\;lda %1,-8192(%1)\;bne %0,%l2"; +} + [(set_attr "length" "16") + (set_attr "type" "multi")]) + +(define_expand "prologue" + [(const_int 0)] + "" +{ + alpha_expand_prologue (); + DONE; +}) + +;; These take care of emitting the ldgp insn in the prologue. This will be +;; an lda/ldah pair and we want to align them properly. So we have two +;; unspec_volatile insns, the first of which emits the ldgp assembler macro +;; and the second of which emits nothing. However, both are marked as type +;; IADD (the default) so the alignment code in alpha.c does the right thing +;; with them. + +(define_expand "prologue_ldgp" + [(set (match_dup 0) + (unspec_volatile:DI [(match_dup 1) (match_dup 2)] UNSPECV_LDGP1)) + (set (match_dup 0) + (unspec_volatile:DI [(match_dup 0) (match_dup 2)] UNSPECV_PLDGP2))] + "" +{ + operands[0] = pic_offset_table_rtx; + operands[1] = gen_rtx_REG (Pmode, 27); + operands[2] = (TARGET_EXPLICIT_RELOCS + ? GEN_INT (alpha_next_sequence_number++) + : const0_rtx); +}) + +(define_insn "*ldgp_er_1" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand 2 "const_int_operand")] + UNSPECV_LDGP1))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "ldah %0,0(%1)\t\t!gpdisp!%2" + [(set_attr "cannot_copy" "true")]) + +(define_insn "*ldgp_er_2" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand 2 "const_int_operand")] + UNSPEC_LDGP2))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "lda %0,0(%1)\t\t!gpdisp!%2" + [(set_attr "cannot_copy" "true")]) + +(define_insn "*prologue_ldgp_er_2" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand 2 "const_int_operand")] + UNSPECV_PLDGP2))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "lda %0,0(%1)\t\t!gpdisp!%2\n$%~..ng:" + [(set_attr "cannot_copy" "true")]) + +(define_insn "*prologue_ldgp_1" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand 2 "const_int_operand")] + UNSPECV_LDGP1))] + "" + "ldgp %0,0(%1)\n$%~..ng:" + [(set_attr "cannot_copy" "true")]) + +(define_insn "*prologue_ldgp_2" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r") + (match_operand 2 "const_int_operand")] + UNSPECV_PLDGP2))] + "" + ) + +;; The _mcount profiling hook has special calling conventions, and +;; does not clobber all the registers that a normal call would. So +;; hide the fact this is a call at all. + +(define_insn "prologue_mcount" + [(unspec_volatile [(const_int 0)] UNSPECV_MCOUNT)] + "" +{ + if (TARGET_EXPLICIT_RELOCS) + /* Note that we cannot use a lituse_jsr reloc, since _mcount + cannot be called via the PLT. */ + return "ldq $28,_mcount($29)\t\t!literal\;jsr $28,($28),_mcount"; + else + return "lda $28,_mcount\;jsr $28,($28),_mcount"; +} + [(set_attr "type" "multi") + (set_attr "length" "8")]) + +(define_insn "init_fp" + [(set (match_operand:DI 0 "register_operand" "=r") + (match_operand:DI 1 "register_operand" "r")) + (clobber (mem:BLK (match_operand:DI 2 "register_operand" "=r")))] + "" + "bis $31,%1,%0") + +(define_expand "epilogue" + [(return)] + "" + "alpha_expand_epilogue ();") + +(define_expand "sibcall_epilogue" + [(return)] + "TARGET_ABI_OSF" +{ + alpha_expand_epilogue (); + DONE; +}) + +(define_expand "builtin_longjmp" + [(use (match_operand:DI 0 "register_operand" "r"))] + "TARGET_ABI_OSF" +{ + /* The elements of the buffer are, in order: */ + rtx fp = gen_rtx_MEM (Pmode, operands[0]); + rtx lab = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 8)); + rtx stack = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 16)); + rtx pv = gen_rtx_REG (Pmode, 27); + + /* This bit is the same as expand_builtin_longjmp. */ + emit_move_insn (hard_frame_pointer_rtx, fp); + emit_move_insn (pv, lab); + emit_stack_restore (SAVE_NONLOCAL, stack); + emit_use (hard_frame_pointer_rtx); + emit_use (stack_pointer_rtx); + + /* Load the label we are jumping through into $27 so that we know + where to look for it when we get back to setjmp's function for + restoring the gp. */ + emit_jump_insn (gen_builtin_longjmp_internal (pv)); + emit_barrier (); + DONE; +}) + +;; This is effectively a copy of indirect_jump, but constrained such +;; that register renaming cannot foil our cunning plan with $27. +(define_insn "builtin_longjmp_internal" + [(set (pc) + (unspec_volatile [(match_operand:DI 0 "register_operand" "c")] + UNSPECV_LONGJMP))] + "" + "jmp $31,(%0),0" + [(set_attr "type" "ibr")]) + +(define_expand "builtin_setjmp_receiver" + [(unspec_volatile [(label_ref (match_operand 0))] UNSPECV_SETJMPR)] + "TARGET_ABI_OSF") + +(define_insn_and_split "*builtin_setjmp_receiver_1" + [(unspec_volatile [(match_operand 0)] UNSPECV_SETJMPR)] + "TARGET_ABI_OSF" +{ + if (TARGET_EXPLICIT_RELOCS) + return "#"; + else + return "br $27,$LSJ%=\n$LSJ%=:\;ldgp $29,0($27)"; +} + "&& TARGET_EXPLICIT_RELOCS && reload_completed" + [(set (match_dup 1) + (unspec_volatile:DI [(match_dup 2) (match_dup 3)] UNSPECV_LDGP1)) + (set (match_dup 1) + (unspec:DI [(match_dup 1) (match_dup 3)] UNSPEC_LDGP2))] +{ + if (prev_nonnote_insn (curr_insn) != XEXP (operands[0], 0)) + emit_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (1, operands[0]), + UNSPECV_SETJMPR_ER)); + operands[1] = pic_offset_table_rtx; + operands[2] = gen_rtx_REG (Pmode, 27); + operands[3] = GEN_INT (alpha_next_sequence_number++); +} + [(set_attr "length" "12") + (set_attr "type" "multi")]) + +(define_insn "*builtin_setjmp_receiver_er_sl_1" + [(unspec_volatile [(match_operand 0)] UNSPECV_SETJMPR_ER)] + "TARGET_ABI_OSF && TARGET_EXPLICIT_RELOCS" + "lda $27,$LSJ%=-%l0($27)\n$LSJ%=:") + +;; When flag_reorder_blocks_and_partition is in effect, compiler puts +;; exception landing pads in a cold section. To prevent inter-section offset +;; calculation, a jump to original landing pad is emitted in the place of the +;; original landing pad. Since landing pad is moved, RA-relative GP +;; calculation in the prologue of landing pad breaks. To solve this problem, +;; we use alternative GP load approach. + +(define_expand "exception_receiver" + [(unspec_volatile [(match_dup 0)] UNSPECV_EHR)] + "TARGET_ABI_OSF" +{ + if (flag_reorder_blocks_and_partition) + operands[0] = alpha_gp_save_rtx (); + else + operands[0] = const0_rtx; +}) + +(define_insn "*exception_receiver_2" + [(unspec_volatile [(match_operand:DI 0 "memory_operand" "m")] UNSPECV_EHR)] + "TARGET_ABI_OSF && flag_reorder_blocks_and_partition" + "ldq $29,%0" + [(set_attr "type" "ild")]) + +(define_insn_and_split "*exception_receiver_1" + [(unspec_volatile [(const_int 0)] UNSPECV_EHR)] + "TARGET_ABI_OSF" +{ + if (TARGET_EXPLICIT_RELOCS) + return "ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%*"; + else + return "ldgp $29,0($26)"; +} + "&& TARGET_EXPLICIT_RELOCS && reload_completed" + [(set (match_dup 0) + (unspec_volatile:DI [(match_dup 1) (match_dup 2)] UNSPECV_LDGP1)) + (set (match_dup 0) + (unspec:DI [(match_dup 0) (match_dup 2)] UNSPEC_LDGP2))] +{ + operands[0] = pic_offset_table_rtx; + operands[1] = gen_rtx_REG (Pmode, 26); + operands[2] = GEN_INT (alpha_next_sequence_number++); +} + [(set_attr "length" "8") + (set_attr "type" "multi")]) + +(define_expand "nonlocal_goto_receiver" + [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE) + (set (reg:DI 27) (mem:DI (reg:DI 29))) + (unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE) + (use (reg:DI 27))] + "TARGET_ABI_OPEN_VMS") + +(define_insn "arg_home" + [(unspec [(const_int 0)] UNSPEC_ARG_HOME) + (use (reg:DI 1)) + (use (reg:DI 25)) + (use (reg:DI 16)) + (use (reg:DI 17)) + (use (reg:DI 18)) + (use (reg:DI 19)) + (use (reg:DI 20)) + (use (reg:DI 21)) + (use (reg:DI 48)) + (use (reg:DI 49)) + (use (reg:DI 50)) + (use (reg:DI 51)) + (use (reg:DI 52)) + (use (reg:DI 53)) + (clobber (mem:BLK (const_int 0))) + (clobber (reg:DI 24)) + (clobber (reg:DI 25)) + (clobber (reg:DI 0))] + "TARGET_ABI_OPEN_VMS" + "lda $0,OTS$HOME_ARGS\;ldq $0,8($0)\;jsr $0,OTS$HOME_ARGS" + [(set_attr "length" "16") + (set_attr "type" "multi")]) + +;; Prefetch data. +;; +;; On EV4, these instructions are nops -- no load occurs. +;; +;; On EV5, these instructions act as a normal load, and thus can trap +;; if the address is invalid. The OS may (or may not) handle this in +;; the entMM fault handler and suppress the fault. If so, then this +;; has the effect of a read prefetch instruction. +;; +;; On EV6, these become official prefetch instructions. + +(define_insn "prefetch" + [(prefetch (match_operand:DI 0 "address_operand" "p") + (match_operand:DI 1 "const_int_operand" "n") + (match_operand:DI 2 "const_int_operand" "n"))] + "TARGET_FIXUP_EV5_PREFETCH || alpha_cpu == PROCESSOR_EV6" +{ + /* Interpret "no temporal locality" as this data should be evicted once + it is used. The "evict next" alternatives load the data into the cache + and leave the LRU eviction counter pointing to that block. */ + static const char * const alt[2][2] = { + { + "ldq $31,%a0", /* read, evict next */ + "ldl $31,%a0", /* read, evict last */ + }, + { + "ldt $f31,%a0", /* write, evict next */ + "lds $f31,%a0", /* write, evict last */ + } + }; + + bool write = INTVAL (operands[1]) != 0; + bool lru = INTVAL (operands[2]) != 0; + + return alt[write][lru]; +} + [(set_attr "type" "ild")]) + +;; Close the trap shadow of preceding instructions. This is generated +;; by alpha_reorg. + +(define_insn "trapb" + [(unspec_volatile [(const_int 0)] UNSPECV_TRAPB)] + "" + "trapb" + [(set_attr "type" "misc")]) + +;; No-op instructions used by machine-dependent reorg to preserve +;; alignment for instruction issue. +;; The Unicos/Mk assembler does not support these opcodes. + +(define_insn "nop" + [(const_int 0)] + "" + "bis $31,$31,$31" + [(set_attr "type" "ilog")]) + +(define_insn "fnop" + [(const_int 1)] + "TARGET_FP" + "cpys $f31,$f31,$f31" + [(set_attr "type" "fcpys")]) + +(define_insn "unop" + [(const_int 2)] + "" + "ldq_u $31,0($30)") + +(define_insn "realign" + [(unspec_volatile [(match_operand 0 "immediate_operand" "i")] + UNSPECV_REALIGN)] + "" + ".align %0 #realign") + +;; Instructions to be emitted from __builtins. + +(define_insn "builtin_cmpbge" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "reg_or_0_operand" "rJ") + (match_operand:DI 2 "reg_or_8bit_operand" "rI")] + UNSPEC_CMPBGE))] + "" + "cmpbge %r1,%2,%0" + ;; The EV6 data sheets list this as ILOG. OTOH, EV6 doesn't + ;; actually differentiate between ILOG and ICMP in the schedule. + [(set_attr "type" "icmp")]) + +(define_expand "extbl" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + emit_insn (gen_extxl (operands[0], operands[1], GEN_INT (8), operands[2])); + DONE; +}) + +(define_expand "extwl" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + emit_insn (gen_extxl (operands[0], operands[1], GEN_INT (16), operands[2])); + DONE; +}) + +(define_expand "extll" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + emit_insn (gen_extxl (operands[0], operands[1], GEN_INT (32), operands[2])); + DONE; +}) + +(define_expand "extql" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + emit_insn (gen_extxl (operands[0], operands[1], GEN_INT (64), operands[2])); + DONE; +}) + +(define_expand "builtin_insbl" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + operands[1] = gen_lowpart (QImode, operands[1]); + emit_insn (gen_insbl (operands[0], operands[1], operands[2])); + DONE; +}) + +(define_expand "builtin_inswl" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + operands[1] = gen_lowpart (HImode, operands[1]); + emit_insn (gen_inswl (operands[0], operands[1], operands[2])); + DONE; +}) + +(define_expand "builtin_insll" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + operands[1] = gen_lowpart (SImode, operands[1]); + emit_insn (gen_insll (operands[0], operands[1], operands[2])); + DONE; +}) + +(define_expand "inswh" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + emit_insn (gen_insxh (operands[0], operands[1], GEN_INT (16), operands[2])); + DONE; +}) + +(define_expand "inslh" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + emit_insn (gen_insxh (operands[0], operands[1], GEN_INT (32), operands[2])); + DONE; +}) + +(define_expand "insqh" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + emit_insn (gen_insxh (operands[0], operands[1], GEN_INT (64), operands[2])); + DONE; +}) + +(define_expand "mskbl" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + rtx mask = GEN_INT (0xff); + emit_insn (gen_mskxl (operands[0], operands[1], mask, operands[2])); + DONE; +}) + +(define_expand "mskwl" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + rtx mask = GEN_INT (0xffff); + emit_insn (gen_mskxl (operands[0], operands[1], mask, operands[2])); + DONE; +}) + +(define_expand "mskll" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + rtx mask = immed_double_const (0xffffffff, 0, DImode); + emit_insn (gen_mskxl (operands[0], operands[1], mask, operands[2])); + DONE; +}) + +(define_expand "mskql" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + rtx mask = constm1_rtx; + emit_insn (gen_mskxl (operands[0], operands[1], mask, operands[2])); + DONE; +}) + +(define_expand "mskwh" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + emit_insn (gen_mskxh (operands[0], operands[1], GEN_INT (16), operands[2])); + DONE; +}) + +(define_expand "msklh" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + emit_insn (gen_mskxh (operands[0], operands[1], GEN_INT (32), operands[2])); + DONE; +}) + +(define_expand "mskqh" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "reg_or_8bit_operand")] + "" +{ + emit_insn (gen_mskxh (operands[0], operands[1], GEN_INT (64), operands[2])); + DONE; +}) + +(define_expand "builtin_zap" + [(set (match_operand:DI 0 "register_operand") + (and:DI (unspec:DI + [(match_operand:DI 2 "reg_or_cint_operand")] + UNSPEC_ZAP) + (match_operand:DI 1 "reg_or_cint_operand")))] + "" +{ + if (CONST_INT_P (operands[2])) + { + rtx mask = alpha_expand_zap_mask (INTVAL (operands[2])); + + if (mask == const0_rtx) + { + emit_move_insn (operands[0], const0_rtx); + DONE; + } + if (mask == constm1_rtx) + { + emit_move_insn (operands[0], operands[1]); + DONE; + } + + operands[1] = force_reg (DImode, operands[1]); + emit_insn (gen_anddi3 (operands[0], operands[1], mask)); + DONE; + } + + operands[1] = force_reg (DImode, operands[1]); + operands[2] = gen_lowpart (QImode, operands[2]); +}) + +(define_insn "*builtin_zap_1" + [(set (match_operand:DI 0 "register_operand" "=r,r,r,r") + (and:DI (unspec:DI + [(match_operand:QI 2 "reg_or_cint_operand" "n,n,r,r")] + UNSPEC_ZAP) + (match_operand:DI 1 "reg_or_cint_operand" "n,r,J,r")))] + "" + "@ + # + # + bis $31,$31,%0 + zap %r1,%2,%0" + [(set_attr "type" "shift,shift,ilog,shift")]) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (and:DI (unspec:DI + [(match_operand:QI 2 "const_int_operand")] + UNSPEC_ZAP) + (match_operand:DI 1 "const_int_operand")))] + "" + [(const_int 0)] +{ + rtx mask = alpha_expand_zap_mask (INTVAL (operands[2])); + if (HOST_BITS_PER_WIDE_INT >= 64 || CONST_INT_P (mask)) + operands[1] = gen_int_mode (INTVAL (operands[1]) & INTVAL (mask), DImode); + else + { + HOST_WIDE_INT c_lo = INTVAL (operands[1]); + HOST_WIDE_INT c_hi = (c_lo < 0 ? -1 : 0); + operands[1] = immed_double_const (c_lo & CONST_DOUBLE_LOW (mask), + c_hi & CONST_DOUBLE_HIGH (mask), + DImode); + } + emit_move_insn (operands[0], operands[1]); + DONE; +}) + +(define_split + [(set (match_operand:DI 0 "register_operand") + (and:DI (unspec:DI + [(match_operand:QI 2 "const_int_operand")] + UNSPEC_ZAP) + (match_operand:DI 1 "register_operand")))] + "" + [(set (match_dup 0) + (and:DI (match_dup 1) (match_dup 2)))] +{ + operands[2] = alpha_expand_zap_mask (INTVAL (operands[2])); + if (operands[2] == const0_rtx) + { + emit_move_insn (operands[0], const0_rtx); + DONE; + } + if (operands[2] == constm1_rtx) + { + emit_move_insn (operands[0], operands[1]); + DONE; + } +}) + +(define_expand "builtin_zapnot" + [(set (match_operand:DI 0 "register_operand") + (and:DI (unspec:DI + [(not:QI (match_operand:DI 2 "reg_or_cint_operand"))] + UNSPEC_ZAP) + (match_operand:DI 1 "reg_or_cint_operand")))] + "" +{ + if (CONST_INT_P (operands[2])) + { + rtx mask = alpha_expand_zap_mask (~ INTVAL (operands[2])); + + if (mask == const0_rtx) + { + emit_move_insn (operands[0], const0_rtx); + DONE; + } + if (mask == constm1_rtx) + { + emit_move_insn (operands[0], operands[1]); + DONE; + } + + operands[1] = force_reg (DImode, operands[1]); + emit_insn (gen_anddi3 (operands[0], operands[1], mask)); + DONE; + } + + operands[1] = force_reg (DImode, operands[1]); + operands[2] = gen_lowpart (QImode, operands[2]); +}) + +(define_insn "*builtin_zapnot_1" + [(set (match_operand:DI 0 "register_operand" "=r") + (and:DI (unspec:DI + [(not:QI (match_operand:QI 2 "register_operand" "r"))] + UNSPEC_ZAP) + (match_operand:DI 1 "reg_or_0_operand" "rJ")))] + "" + "zapnot %r1,%2,%0" + [(set_attr "type" "shift")]) + +(define_insn "builtin_amask" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "reg_or_8bit_operand" "rI")] + UNSPEC_AMASK))] + "" + "amask %1,%0" + [(set_attr "type" "ilog")]) + +(define_insn "builtin_implver" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(const_int 0)] UNSPEC_IMPLVER))] + "" + "implver %0" + [(set_attr "type" "ilog")]) + +(define_insn "builtin_rpcc" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec_volatile:DI [(const_int 0)] UNSPECV_RPCC))] + "" + "rpcc %0" + [(set_attr "type" "ilog")]) + +(define_expand "builtin_minub8" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_0_operand")] + "TARGET_MAX" +{ + alpha_expand_builtin_vector_binop (gen_uminv8qi3, V8QImode, operands[0], + operands[1], operands[2]); + DONE; +}) + +(define_expand "builtin_minsb8" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_0_operand")] + "TARGET_MAX" +{ + alpha_expand_builtin_vector_binop (gen_sminv8qi3, V8QImode, operands[0], + operands[1], operands[2]); + DONE; +}) + +(define_expand "builtin_minuw4" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_0_operand")] + "TARGET_MAX" +{ + alpha_expand_builtin_vector_binop (gen_uminv4hi3, V4HImode, operands[0], + operands[1], operands[2]); + DONE; +}) + +(define_expand "builtin_minsw4" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_0_operand")] + "TARGET_MAX" +{ + alpha_expand_builtin_vector_binop (gen_sminv4hi3, V4HImode, operands[0], + operands[1], operands[2]); + DONE; +}) + +(define_expand "builtin_maxub8" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_0_operand")] + "TARGET_MAX" +{ + alpha_expand_builtin_vector_binop (gen_umaxv8qi3, V8QImode, operands[0], + operands[1], operands[2]); + DONE; +}) + +(define_expand "builtin_maxsb8" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_0_operand")] + "TARGET_MAX" +{ + alpha_expand_builtin_vector_binop (gen_smaxv8qi3, V8QImode, operands[0], + operands[1], operands[2]); + DONE; +}) + +(define_expand "builtin_maxuw4" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_0_operand")] + "TARGET_MAX" +{ + alpha_expand_builtin_vector_binop (gen_umaxv4hi3, V4HImode, operands[0], + operands[1], operands[2]); + DONE; +}) + +(define_expand "builtin_maxsw4" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "reg_or_0_operand") + (match_operand:DI 2 "reg_or_0_operand")] + "TARGET_MAX" +{ + alpha_expand_builtin_vector_binop (gen_smaxv4hi3, V4HImode, operands[0], + operands[1], operands[2]); + DONE; +}) + +(define_insn "builtin_perr" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "reg_or_0_operand" "%rJ") + (match_operand:DI 2 "reg_or_8bit_operand" "rJ")] + UNSPEC_PERR))] + "TARGET_MAX" + "perr %r1,%r2,%0" + [(set_attr "type" "mvi")]) + +(define_expand "builtin_pklb" + [(set (match_operand:DI 0 "register_operand") + (vec_concat:V8QI + (vec_concat:V4QI + (truncate:V2QI (match_operand:DI 1 "register_operand")) + (match_dup 2)) + (match_dup 3)))] + "TARGET_MAX" +{ + operands[0] = gen_lowpart (V8QImode, operands[0]); + operands[1] = gen_lowpart (V2SImode, operands[1]); + operands[2] = CONST0_RTX (V2QImode); + operands[3] = CONST0_RTX (V4QImode); +}) + +(define_insn "*pklb" + [(set (match_operand:V8QI 0 "register_operand" "=r") + (vec_concat:V8QI + (vec_concat:V4QI + (truncate:V2QI (match_operand:V2SI 1 "register_operand" "r")) + (match_operand:V2QI 2 "const0_operand")) + (match_operand:V4QI 3 "const0_operand")))] + "TARGET_MAX" + "pklb %r1,%0" + [(set_attr "type" "mvi")]) + +(define_expand "builtin_pkwb" + [(set (match_operand:DI 0 "register_operand") + (vec_concat:V8QI + (truncate:V4QI (match_operand:DI 1 "register_operand")) + (match_dup 2)))] + "TARGET_MAX" +{ + operands[0] = gen_lowpart (V8QImode, operands[0]); + operands[1] = gen_lowpart (V4HImode, operands[1]); + operands[2] = CONST0_RTX (V4QImode); +}) + +(define_insn "*pkwb" + [(set (match_operand:V8QI 0 "register_operand" "=r") + (vec_concat:V8QI + (truncate:V4QI (match_operand:V4HI 1 "register_operand" "r")) + (match_operand:V4QI 2 "const0_operand")))] + "TARGET_MAX" + "pkwb %r1,%0" + [(set_attr "type" "mvi")]) + +(define_expand "builtin_unpkbl" + [(set (match_operand:DI 0 "register_operand") + (zero_extend:V2SI + (vec_select:V2QI (match_operand:DI 1 "register_operand") + (parallel [(const_int 0) (const_int 1)]))))] + "TARGET_MAX" +{ + operands[0] = gen_lowpart (V2SImode, operands[0]); + operands[1] = gen_lowpart (V8QImode, operands[1]); +}) + +(define_insn "*unpkbl" + [(set (match_operand:V2SI 0 "register_operand" "=r") + (zero_extend:V2SI + (vec_select:V2QI (match_operand:V8QI 1 "reg_or_0_operand" "rW") + (parallel [(const_int 0) (const_int 1)]))))] + "TARGET_MAX" + "unpkbl %r1,%0" + [(set_attr "type" "mvi")]) + +(define_expand "builtin_unpkbw" + [(set (match_operand:DI 0 "register_operand") + (zero_extend:V4HI + (vec_select:V4QI (match_operand:DI 1 "register_operand") + (parallel [(const_int 0) + (const_int 1) + (const_int 2) + (const_int 3)]))))] + "TARGET_MAX" +{ + operands[0] = gen_lowpart (V4HImode, operands[0]); + operands[1] = gen_lowpart (V8QImode, operands[1]); +}) + +(define_insn "*unpkbw" + [(set (match_operand:V4HI 0 "register_operand" "=r") + (zero_extend:V4HI + (vec_select:V4QI (match_operand:V8QI 1 "reg_or_0_operand" "rW") + (parallel [(const_int 0) + (const_int 1) + (const_int 2) + (const_int 3)]))))] + "TARGET_MAX" + "unpkbw %r1,%0" + [(set_attr "type" "mvi")]) + +(include "sync.md") + +;; The call patterns are at the end of the file because their +;; wildcard operand0 interferes with nice recognition. + +(define_insn "*call_value_osf_1_er_noreturn" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) + (match_operand 2))) + (use (reg:DI 29)) + (clobber (reg:DI 26))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF + && find_reg_note (insn, REG_NORETURN, NULL_RTX)" + "@ + jsr $26,($27),0 + bsr $26,%1\t\t!samegp + ldq $27,%1($29)\t\t!literal!%#\;jsr $26,($27),%1\t\t!lituse_jsr!%#" + [(set_attr "type" "jsr") + (set_attr "length" "*,*,8")]) + +(define_insn "*call_value_osf_1_er" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) + (match_operand 2))) + (use (reg:DI 29)) + (clobber (reg:DI 26))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "@ + jsr $26,(%1),0\;ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%* + bsr $26,%1\t\t!samegp + ldq $27,%1($29)\t\t!literal!%#\;jsr $26,($27),0\t\t!lituse_jsr!%#\;ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%*" + [(set_attr "type" "jsr") + (set_attr "length" "12,*,16")]) + +;; We must use peep2 instead of a split because we need accurate life +;; information for $gp. Consider the case of { bar(); while (1); }. +(define_peephole2 + [(parallel [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "call_operand")) + (match_operand 2))) + (use (reg:DI 29)) + (clobber (reg:DI 26))])] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF && reload_completed + && ! samegp_function_operand (operands[1], Pmode) + && (peep2_regno_dead_p (1, 29) + || find_reg_note (insn, REG_NORETURN, NULL_RTX))" + [(parallel [(set (match_dup 0) + (call (mem:DI (match_dup 3)) + (match_dup 2))) + (use (reg:DI 29)) + (use (match_dup 1)) + (use (match_dup 4)) + (clobber (reg:DI 26))])] +{ + if (CONSTANT_P (operands[1])) + { + operands[3] = gen_rtx_REG (Pmode, 27); + operands[4] = GEN_INT (alpha_next_sequence_number++); + emit_insn (gen_movdi_er_high_g (operands[3], pic_offset_table_rtx, + operands[1], operands[4])); + } + else + { + operands[3] = operands[1]; + operands[1] = const0_rtx; + operands[4] = const0_rtx; + } +}) + +(define_peephole2 + [(parallel [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "call_operand")) + (match_operand 2))) + (use (reg:DI 29)) + (clobber (reg:DI 26))])] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF && reload_completed + && ! samegp_function_operand (operands[1], Pmode) + && ! (peep2_regno_dead_p (1, 29) + || find_reg_note (insn, REG_NORETURN, NULL_RTX))" + [(parallel [(set (match_dup 0) + (call (mem:DI (match_dup 3)) + (match_dup 2))) + (set (match_dup 6) + (unspec:DI [(match_dup 6) (match_dup 4)] UNSPEC_LDGP1)) + (use (match_dup 1)) + (use (match_dup 5)) + (clobber (reg:DI 26))]) + (set (match_dup 6) + (unspec:DI [(match_dup 6) (match_dup 4)] UNSPEC_LDGP2))] +{ + if (CONSTANT_P (operands[1])) + { + operands[3] = gen_rtx_REG (Pmode, 27); + operands[5] = GEN_INT (alpha_next_sequence_number++); + emit_insn (gen_movdi_er_high_g (operands[3], pic_offset_table_rtx, + operands[1], operands[5])); + } + else + { + operands[3] = operands[1]; + operands[1] = const0_rtx; + operands[5] = const0_rtx; + } + operands[4] = GEN_INT (alpha_next_sequence_number++); + operands[6] = pic_offset_table_rtx; +}) + +(define_insn "*call_value_osf_2_er_nogp" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "register_operand" "c")) + (match_operand 2))) + (use (reg:DI 29)) + (use (match_operand 3)) + (use (match_operand 4)) + (clobber (reg:DI 26))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "jsr $26,(%1),%3%J4" + [(set_attr "type" "jsr")]) + +(define_insn "*call_value_osf_2_er" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "register_operand" "c")) + (match_operand 2))) + (set (reg:DI 29) + (unspec:DI [(reg:DI 29) (match_operand 5 "const_int_operand")] + UNSPEC_LDGP1)) + (use (match_operand 3)) + (use (match_operand 4)) + (clobber (reg:DI 26))] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "jsr $26,(%1),%3%J4\;ldah $29,0($26)\t\t!gpdisp!%5" + [(set_attr "type" "jsr") + (set_attr "cannot_copy" "true") + (set_attr "length" "8")]) + +(define_insn "*call_value_osf_1_noreturn" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) + (match_operand 2))) + (use (reg:DI 29)) + (clobber (reg:DI 26))] + "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF + && find_reg_note (insn, REG_NORETURN, NULL_RTX)" + "@ + jsr $26,($27),0 + bsr $26,$%1..ng + jsr $26,%1" + [(set_attr "type" "jsr") + (set_attr "length" "*,*,8")]) + +(define_insn_and_split "call_value_osf_tlsgd" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "symbolic_operand")) + (const_int 0))) + (unspec [(match_operand:DI 2 "const_int_operand")] UNSPEC_TLSGD_CALL) + (use (reg:DI 29)) + (clobber (reg:DI 26))] + "HAVE_AS_TLS" + "#" + "&& reload_completed" + [(set (match_dup 3) + (unspec:DI [(match_dup 5) + (match_dup 1) + (match_dup 2)] UNSPEC_LITERAL)) + (parallel [(set (match_dup 0) + (call (mem:DI (match_dup 3)) + (const_int 0))) + (set (match_dup 5) + (unspec:DI [(match_dup 5) (match_dup 4)] UNSPEC_LDGP1)) + (use (match_dup 1)) + (use (unspec [(match_dup 2)] UNSPEC_TLSGD_CALL)) + (clobber (reg:DI 26))]) + (set (match_dup 5) + (unspec:DI [(match_dup 5) (match_dup 4)] UNSPEC_LDGP2))] +{ + operands[3] = gen_rtx_REG (Pmode, 27); + operands[4] = GEN_INT (alpha_next_sequence_number++); + operands[5] = pic_offset_table_rtx; +} + [(set_attr "type" "multi")]) + +(define_insn_and_split "call_value_osf_tlsldm" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "symbolic_operand")) + (const_int 0))) + (unspec [(match_operand:DI 2 "const_int_operand")] UNSPEC_TLSLDM_CALL) + (use (reg:DI 29)) + (clobber (reg:DI 26))] + "HAVE_AS_TLS" + "#" + "&& reload_completed" + [(set (match_dup 3) + (unspec:DI [(match_dup 5) + (match_dup 1) + (match_dup 2)] UNSPEC_LITERAL)) + (parallel [(set (match_dup 0) + (call (mem:DI (match_dup 3)) + (const_int 0))) + (set (match_dup 5) + (unspec:DI [(match_dup 5) (match_dup 4)] UNSPEC_LDGP1)) + (use (match_dup 1)) + (use (unspec [(match_dup 2)] UNSPEC_TLSLDM_CALL)) + (clobber (reg:DI 26))]) + (set (match_dup 5) + (unspec:DI [(match_dup 5) (match_dup 4)] UNSPEC_LDGP2))] +{ + operands[3] = gen_rtx_REG (Pmode, 27); + operands[4] = GEN_INT (alpha_next_sequence_number++); + operands[5] = pic_offset_table_rtx; +} + [(set_attr "type" "multi")]) + +(define_insn "*call_value_osf_1" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) + (match_operand 2))) + (use (reg:DI 29)) + (clobber (reg:DI 26))] + "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "@ + jsr $26,($27),0\;ldgp $29,0($26) + bsr $26,$%1..ng + jsr $26,%1\;ldgp $29,0($26)" + [(set_attr "type" "jsr") + (set_attr "length" "12,*,16")]) + +(define_insn "*sibcall_value_osf_1_er" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "symbolic_operand" "R,s")) + (match_operand 2))) + (unspec [(reg:DI 29)] UNSPEC_SIBCALL)] + "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "@ + br $31,%1\t\t!samegp + ldq $27,%1($29)\t\t!literal!%#\;jmp $31,($27),%1\t\t!lituse_jsr!%#" + [(set_attr "type" "jsr") + (set_attr "length" "*,8")]) + +(define_insn "*sibcall_value_osf_1" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "symbolic_operand" "R,s")) + (match_operand 2))) + (unspec [(reg:DI 29)] UNSPEC_SIBCALL)] + "! TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF" + "@ + br $31,$%1..ng + lda $27,%1\;jmp $31,($27),%1" + [(set_attr "type" "jsr") + (set_attr "length" "*,8")]) + +; GAS relies on the order and position of instructions output below in order +; to generate relocs for VMS link to potentially optimize the call. +; Please do not molest. +(define_insn "*call_value_vms_1" + [(set (match_operand 0) + (call (mem:DI (match_operand:DI 1 "call_operand" "r,s")) + (match_operand 2))) + (use (match_operand:DI 3 "nonmemory_operand" "r,n")) + (use (reg:DI 25)) + (use (reg:DI 26)) + (clobber (reg:DI 27))] + "TARGET_ABI_OPEN_VMS" +{ + switch (which_alternative) + { + case 0: + return "mov %3,$27\;jsr $26,0\;ldq $27,0($29)"; + case 1: + operands [3] = alpha_use_linkage (operands [1], true, false); + operands [4] = alpha_use_linkage (operands [1], false, false); + return "ldq $26,%4\;ldq $27,%3\;jsr $26,%1\;ldq $27,0($29)"; + default: + gcc_unreachable (); + } +} + [(set_attr "type" "jsr") + (set_attr "length" "12,16")]) diff --git a/gcc-4.9/gcc/config/alpha/alpha.opt b/gcc-4.9/gcc/config/alpha/alpha.opt new file mode 100644 index 000000000..dc937ac66 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/alpha.opt @@ -0,0 +1,130 @@ +; Options for the DEC Alpha port of the compiler +; +; Copyright (C) 2005-2014 Free Software Foundation, Inc. +; +; This file is part of GCC. +; +; GCC is free software; you can redistribute it and/or modify it under +; the terms of the GNU General Public License as published by the Free +; Software Foundation; either version 3, or (at your option) any later +; version. +; +; GCC is distributed in the hope that it will be useful, but WITHOUT +; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +; License for more details. +; +; You should have received a copy of the GNU General Public License +; along with GCC; see the file COPYING3. If not see +; <http://www.gnu.org/licenses/>. + +msoft-float +Target Report Mask(SOFT_FP) +Do not use hardware fp + +mfp-regs +Target Report Mask(FPREGS) +Use fp registers + +mgas +Target Ignore +Does nothing. Preserved for backward compatibility. + +mieee-conformant +Target RejectNegative Mask(IEEE_CONFORMANT) +Request IEEE-conformant math library routines (OSF/1) + +mieee +Target Report RejectNegative Mask(IEEE) +Emit IEEE-conformant code, without inexact exceptions + +mieee-with-inexact +Target Report RejectNegative Mask(IEEE_WITH_INEXACT) + +mbuild-constants +Target Report Mask(BUILD_CONSTANTS) +Do not emit complex integer constants to read-only memory + +mfloat-vax +Target Report RejectNegative Mask(FLOAT_VAX) +Use VAX fp + +mfloat-ieee +Target Report RejectNegative InverseMask(FLOAT_VAX) +Do not use VAX fp + +mbwx +Target Report Mask(BWX) +Emit code for the byte/word ISA extension + +mmax +Target Report Mask(MAX) +Emit code for the motion video ISA extension + +mfix +Target Report Mask(FIX) +Emit code for the fp move and sqrt ISA extension + +mcix +Target Report Mask(CIX) +Emit code for the counting ISA extension + +mexplicit-relocs +Target Report Mask(EXPLICIT_RELOCS) +Emit code using explicit relocation directives + +msmall-data +Target Report RejectNegative Mask(SMALL_DATA) +Emit 16-bit relocations to the small data areas + +mlarge-data +Target Report RejectNegative InverseMask(SMALL_DATA) +Emit 32-bit relocations to the small data areas + +msmall-text +Target Report RejectNegative Mask(SMALL_TEXT) +Emit direct branches to local functions + +mlarge-text +Target Report RejectNegative InverseMask(SMALL_TEXT) +Emit indirect branches to local functions + +mtls-kernel +Target Report Mask(TLS_KERNEL) +Emit rdval instead of rduniq for thread pointer + +mlong-double-128 +Target Report RejectNegative Mask(LONG_DOUBLE_128) +Use 128-bit long double + +mlong-double-64 +Target Report RejectNegative InverseMask(LONG_DOUBLE_128) +Use 64-bit long double + +mcpu= +Target RejectNegative Joined Var(alpha_cpu_string) +Use features of and schedule given CPU + +mtune= +Target RejectNegative Joined Var(alpha_tune_string) +Schedule given CPU + +mfp-rounding-mode= +Target RejectNegative Joined Var(alpha_fprm_string) +Control the generated fp rounding mode + +mfp-trap-mode= +Target RejectNegative Joined Var(alpha_fptm_string) +Control the IEEE trap mode + +mtrap-precision= +Target RejectNegative Joined Var(alpha_tp_string) +Control the precision given to fp exceptions + +mmemory-latency= +Target RejectNegative Joined Var(alpha_mlat_string) +Tune expected memory latency + +mtls-size= +Target RejectNegative Joined UInteger Var(alpha_tls_size) Init(32) +Specify bit size of immediate TLS offsets diff --git a/gcc-4.9/gcc/config/alpha/constraints.md b/gcc-4.9/gcc/config/alpha/constraints.md new file mode 100644 index 000000000..e67c9a9a0 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/constraints.md @@ -0,0 +1,120 @@ +;; Constraint definitions for DEC Alpha. +;; Copyright (C) 2007-2014 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. + +;;; Unused letters: +;;; ABCDEF V YZ +;;; de ghijkl pq tu wxyz + +;; Integer register constraints. + +(define_register_constraint "a" "R24_REG" + "General register 24, input to division routine") + +(define_register_constraint "b" "R25_REG" + "General register 24, input to division routine") + +(define_register_constraint "c" "R27_REG" + "General register 27, function call address") + +(define_register_constraint "f" "TARGET_FPREGS ? FLOAT_REGS : NO_REGS" + "Any floating-point register") + +(define_register_constraint "v" "R0_REG" + "General register 0, function value return address") + +(define_memory_constraint "w" + "A memory whose address is only a register" + (match_operand 0 "mem_noofs_operand")) + +;; Integer constant constraints. +(define_constraint "I" + "An unsigned 8 bit constant" + (and (match_code "const_int") + (match_test "IN_RANGE (ival, 0, 255)"))) + +(define_constraint "J" + "The constant zero" + (and (match_code "const_int") + (match_test "ival == 0"))) + +(define_constraint "K" + "Signed 16-bit integer constant" + (and (match_code "const_int") + (match_test "IN_RANGE (ival, -32768, 32767)"))) + +(define_constraint "L" + "A shifted signed 16-bit constant appropriate for LDAH" + (and (match_code "const_int") + (match_test "(ival & 0xffff) == 0 + && (ival >> 31 == -1 || ival >> 31 == 0)"))) + +(define_constraint "M" + "A valid operand of a ZAP insn" + (and (match_code "const_int") + (match_test "zap_mask (ival) != 0"))) + +(define_constraint "N" + "A complemented unsigned 8-bit constant" + (and (match_code "const_int") + (match_test "IN_RANGE (~ival, 0, 255)"))) + +(define_constraint "O" + "A negated unsigned 8-bit constant" + (and (match_code "const_int") + (match_test "IN_RANGE (-ival, 0, 255)"))) + +(define_constraint "P" + "The constant 1, 2 or 3" + (and (match_code "const_int") + (match_test "ival == 1 || ival == 2 || ival == 3"))) + +(define_constraint "H" + "A valid operand of a ZAP insn, when building with 32-bit HOST_WIDE_INT" + (and (match_code "const_double") + (match_test "mode == VOIDmode && zap_mask (hval) && zap_mask (lval)"))) + +;; Floating-point constant constraints. +(define_constraint "G" + "The floating point zero constant" + (and (match_code "const_double") + (match_test "GET_MODE_CLASS (mode) == MODE_FLOAT + && op == CONST0_RTX (mode)"))) + +;; "Extra" constraints. +(define_constraint "Q" + "@internal A normal_memory_operand" + (match_operand 0 "normal_memory_operand")) + +(define_constraint "R" + "@internal A direct_call_operand" + (match_operand:DI 0 "direct_call_operand")) + +(define_constraint "S" + "An unsigned 6-bit constant" + (and (match_code "const_int") + (match_test "IN_RANGE (ival, 0, 63)"))) + +(define_constraint "T" + "@internal A high-part symbol" + (match_code "high")) + +(define_constraint "W" + "A vector zero constant" + (and (match_code "const_vector") + (match_test "op == CONST0_RTX (mode)"))) diff --git a/gcc-4.9/gcc/config/alpha/driver-alpha.c b/gcc-4.9/gcc/config/alpha/driver-alpha.c new file mode 100644 index 000000000..1981d0e6d --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/driver-alpha.c @@ -0,0 +1,99 @@ +/* Subroutines for the gcc driver. + Copyright (C) 2009-2014 Free Software Foundation, Inc. + Contributed by Arthur Loiret <aloiret@debian.org> + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" + +/* Chip family type IDs, returned by implver instruction. */ +#define IMPLVER_EV4_FAMILY 0 /* LCA/EV4/EV45 */ +#define IMPLVER_EV5_FAMILY 1 /* EV5/EV56/PCA56 */ +#define IMPLVER_EV6_FAMILY 2 /* EV6 */ +#define IMPLVER_EV7_FAMILY 3 /* EV7 */ + +/* Bit defines for amask instruction. */ +#define AMASK_BWX 0x1 /* byte/word extension. */ +#define AMASK_FIX 0x2 /* sqrt and f <-> i conversions + extension. */ +#define AMASK_CIX 0x4 /* count extension. */ +#define AMASK_MVI 0x100 /* multimedia extension. */ +#define AMASK_PRECISE 0x200 /* Precise arithmetic traps. */ +#define AMASK_LOCKPFTCHOK 0x1000 /* Safe to prefetch lock cache + block. */ + +/* This will be called by the spec parser in gcc.c when it sees + a %:local_cpu_detect(args) construct. Currently it will be called + with either "cpu" or "tune" as argument depending on if -mcpu=native + or -mtune=native is to be substituted. + + It returns a string containing new command line parameters to be + put at the place of the above two options, depending on what CPU + this is executed. E.g. "-mcpu=ev6" on an Alpha 21264 for + -mcpu=native. If the routine can't detect a known processor, + the -mcpu or -mtune option is discarded. + + ARGC and ARGV are set depending on the actual arguments given + in the spec. */ +const char * +host_detect_local_cpu (int argc, const char **argv) +{ + static const struct cpu_types { + long implver; + long amask; + const char *const cpu; + } cpu_types[] = { + { IMPLVER_EV7_FAMILY, AMASK_BWX|AMASK_MVI|AMASK_FIX|AMASK_CIX, "ev67" }, + { IMPLVER_EV6_FAMILY, AMASK_BWX|AMASK_MVI|AMASK_FIX|AMASK_CIX, "ev67" }, + { IMPLVER_EV6_FAMILY, AMASK_BWX|AMASK_MVI|AMASK_FIX, "ev6" }, + { IMPLVER_EV5_FAMILY, AMASK_BWX|AMASK_MVI, "pca56" }, + { IMPLVER_EV5_FAMILY, AMASK_BWX, "ev56" }, + { IMPLVER_EV5_FAMILY, 0, "ev5" }, + { IMPLVER_EV4_FAMILY, 0, "ev4" }, + { 0, 0, NULL } + }; + long implver; + long amask; + const char *cpu; + int i; + + if (argc < 1) + return NULL; + + if (strcmp (argv[0], "cpu") && strcmp (argv[0], "tune")) + return NULL; + + implver = __builtin_alpha_implver (); + amask = __builtin_alpha_amask (~0L); + cpu = NULL; + + for (i = 0; cpu_types[i].cpu != NULL; i++) + if (implver == cpu_types[i].implver + && (~amask & cpu_types[i].amask) == cpu_types[i].amask) + { + cpu = cpu_types[i].cpu; + break; + } + + if (cpu == NULL) + return NULL; + + return concat ("-m", argv[0], "=", cpu, NULL); +} diff --git a/gcc-4.9/gcc/config/alpha/elf.h b/gcc-4.9/gcc/config/alpha/elf.h new file mode 100644 index 000000000..5a6803aba --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/elf.h @@ -0,0 +1,168 @@ +/* Definitions of target machine for GNU compiler, for DEC Alpha w/ELF. + Copyright (C) 1996-2014 Free Software Foundation, Inc. + Contributed by Richard Henderson (rth@tamu.edu). + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#undef CC1_SPEC +#define CC1_SPEC "%{G*}" + +#undef ASM_SPEC +#define ASM_SPEC "%{G*} %{relax:-relax} %{!gstabs*:-no-mdebug}%{gstabs*:-mdebug}" + +/* Do not output a .file directive at the beginning of the input file. */ + +#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE +#define TARGET_ASM_FILE_START_FILE_DIRECTIVE false + +/* This is how to output an assembler line + that says to advance the location counter + to a multiple of 2**LOG bytes. */ + +#define ASM_OUTPUT_ALIGN(FILE,LOG) \ + if ((LOG) != 0) \ + fprintf (FILE, "\t.align %d\n", LOG); + +/* This says how to output assembler code to declare an + uninitialized internal linkage data object. Under SVR4, + the linker seems to want the alignment of data objects + to depend on their types. We do exactly that here. */ + +#undef ASM_OUTPUT_ALIGNED_LOCAL +#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \ +do { \ + if ((SIZE) <= (unsigned HOST_WIDE_INT) g_switch_value) \ + switch_to_section (sbss_section); \ + else \ + switch_to_section (bss_section); \ + ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \ + if (!flag_inhibit_size_directive) \ + ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, SIZE); \ + ASM_OUTPUT_ALIGN ((FILE), exact_log2((ALIGN) / BITS_PER_UNIT)); \ + ASM_OUTPUT_LABEL(FILE, NAME); \ + ASM_OUTPUT_SKIP((FILE), (SIZE) ? (SIZE) : 1); \ +} while (0) + +/* This says how to output assembler code to declare an + uninitialized external linkage data object. */ + +#undef ASM_OUTPUT_ALIGNED_BSS +#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \ +do { \ + ASM_OUTPUT_ALIGNED_LOCAL (FILE, NAME, SIZE, ALIGN); \ +} while (0) + +#undef BSS_SECTION_ASM_OP +#define BSS_SECTION_ASM_OP "\t.section\t.bss" +#undef SBSS_SECTION_ASM_OP +#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\"" +#undef SDATA_SECTION_ASM_OP +#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\"" + +/* This is how we tell the assembler that two symbols have the same value. */ + +#undef ASM_OUTPUT_DEF +#define ASM_OUTPUT_DEF(FILE, ALIAS, NAME) \ + do { \ + assemble_name(FILE, ALIAS); \ + fputs(" = ", FILE); \ + assemble_name(FILE, NAME); \ + fputc('\n', FILE); \ + } while (0) + +#undef ASM_OUTPUT_DEF_FROM_DECLS +#define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL, TARGET) \ + do { \ + const char *alias = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \ + const char *name = IDENTIFIER_POINTER (TARGET); \ + if (TREE_CODE (DECL) == FUNCTION_DECL) \ + { \ + fputc ('$', FILE); \ + assemble_name (FILE, alias); \ + fputs ("..ng = $", FILE); \ + assemble_name (FILE, name); \ + fputs ("..ng\n", FILE); \ + } \ + ASM_OUTPUT_DEF (FILE, alias, name); \ + } while (0) + +/* Provide a STARTFILE_SPEC appropriate for ELF. Here we add the + (even more) magical crtbegin.o file which provides part of the + support for getting C++ file-scope static object constructed + before entering `main'. */ + +#undef STARTFILE_SPEC +#ifdef HAVE_LD_PIE +#define STARTFILE_SPEC \ + "%{!shared: %{pg|p:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}}\ + crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}" +#else +#define STARTFILE_SPEC \ + "%{!shared: %{pg|p:gcrt1.o%s;:crt1.o%s}}\ + crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}" +#endif + +/* Provide a ENDFILE_SPEC appropriate for ELF. Here we tack on the + magical crtend.o file which provides part of the support for + getting C++ file-scope static object constructed before entering + `main', followed by a normal ELF "finalizer" file, `crtn.o'. */ + +#undef ENDFILE_SPEC +#define ENDFILE_SPEC \ + "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \ + %{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s" + +/* Select a format to encode pointers in exception handling data. CODE + is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is + true if the symbol may be affected by dynamic relocations. + + Since application size is already constrained to <2GB by the form of + the ldgp relocation, we can use a 32-bit pc-relative relocation to + static data. Dynamic data is accessed indirectly to allow for read + only EH sections. */ +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \ + (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4) + +/* If defined, a C statement to be executed just prior to the output of + assembler code for INSN. */ +#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \ + (alpha_this_literal_sequence_number = 0, \ + alpha_this_gpdisp_sequence_number = 0) +extern int alpha_this_literal_sequence_number; +extern int alpha_this_gpdisp_sequence_number; + +/* Since the bits of the _init and _fini function is spread across + many object files, each potentially with its own GP, we must assume + we need to load our GP. Further, the .init/.fini section can + easily be more than 4MB away from the function to call so we can't + use bsr. */ +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ + asm (SECTION_OP "\n" \ +" br $29,1f\n" \ +"1: ldgp $29,0($29)\n" \ +" unop\n" \ +" jsr $26," USER_LABEL_PREFIX #FUNC "\n" \ +" .align 3\n" \ +" .previous"); + +/* If we have the capability create headers for efficient EH lookup. + As of Jan 2002, only glibc 2.2.4 can actually make use of this, but + I imagine that other systems will catch up. In the meantime, it + doesn't harm to make sure that the data exists to be used later. */ +#if defined(HAVE_LD_EH_FRAME_HDR) +#define LINK_EH_SPEC "%{!static:--eh-frame-hdr} " +#endif diff --git a/gcc-4.9/gcc/config/alpha/elf.opt b/gcc-4.9/gcc/config/alpha/elf.opt new file mode 100644 index 000000000..680379033 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/elf.opt @@ -0,0 +1,29 @@ +; Alpha ELF options. + +; Copyright (C) 2011-2014 Free Software Foundation, Inc. +; +; This file is part of GCC. +; +; GCC is free software; you can redistribute it and/or modify it under +; the terms of the GNU General Public License as published by the Free +; Software Foundation; either version 3, or (at your option) any later +; version. +; +; GCC is distributed in the hope that it will be useful, but WITHOUT ANY +; WARRANTY; without even the implied warranty of MERCHANTABILITY or +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +; for more details. +; +; You should have received a copy of the GNU General Public License +; along with GCC; see the file COPYING3. If not see +; <http://www.gnu.org/licenses/>. + +; See the GCC internals manual (options.texi) for a description of +; this file's format. + +; Please try to keep this file in ASCII collating order. + +relax +Driver + +; This comment is to ensure we retain the blank line above. diff --git a/gcc-4.9/gcc/config/alpha/ev4.md b/gcc-4.9/gcc/config/alpha/ev4.md new file mode 100644 index 000000000..89d6c98e3 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/ev4.md @@ -0,0 +1,161 @@ +;; Scheduling description for Alpha EV4. +;; Copyright (C) 2002-2014 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. + +; On EV4 there are two classes of resources to consider: resources needed +; to issue, and resources needed to execute. IBUS[01] are in the first +; category. ABOX, BBOX, EBOX, FBOX, IMUL & FDIV make up the second. +; (There are a few other register-like resources, but ...) + +(define_automaton "ev4_0,ev4_1,ev4_2") +(define_cpu_unit "ev4_ib0,ev4_ib1,ev4_abox,ev4_bbox" "ev4_0") +(define_cpu_unit "ev4_ebox,ev4_imul" "ev4_1") +(define_cpu_unit "ev4_fbox,ev4_fdiv" "ev4_2") +(define_reservation "ev4_ib01" "ev4_ib0|ev4_ib1") + +; Assume type "multi" single issues. +(define_insn_reservation "ev4_multi" 1 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "multi")) + "ev4_ib0+ev4_ib1") + +; Loads from L0 completes in three cycles. adjust_cost still factors +; in user-specified memory latency, so return 1 here. +(define_insn_reservation "ev4_ld" 1 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "ild,fld,ldsym,ld_l")) + "ev4_ib01+ev4_abox") + +; Stores can issue before the data (but not address) is ready. +(define_insn_reservation "ev4_ist" 1 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "ist")) + "ev4_ib1+ev4_abox") + +; ??? Separate from ev4_ist because store_data_bypass_p can't handle +; the patterns with multiple sets, like store-conditional. +(define_insn_reservation "ev4_ist_c" 1 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "st_c")) + "ev4_ib1+ev4_abox") + +(define_insn_reservation "ev4_fst" 1 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "fst")) + "ev4_ib0+ev4_abox") + +; Memory barrier blocks ABOX insns until it's acknowledged by the external +; memory bus. This may be *quite* slow. Setting this to 4 cycles gets +; about all the benefit without making the DFA too large. +(define_insn_reservation "ev4_mb" 4 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "mb")) + "ev4_ib1+ev4_abox,ev4_abox*3") + +; Branches have no delay cost, but do tie up the unit for two cycles. +(define_insn_reservation "ev4_ibr" 2 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "ibr,jsr")) + "ev4_ib1+ev4_bbox,ev4_bbox") + +(define_insn_reservation "ev4_callpal" 2 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "callpal")) + "ev4_ib1+ev4_bbox,ev4_bbox") + +(define_insn_reservation "ev4_fbr" 2 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "fbr")) + "ev4_ib0+ev4_bbox,ev4_bbox") + +; Arithmetic insns are normally have their results available after +; two cycles. There are a number of exceptions. + +(define_insn_reservation "ev4_iaddlog" 2 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "iadd,ilog")) + "ev4_ib0+ev4_ebox") + +(define_bypass 1 + "ev4_iaddlog" + "ev4_ibr,ev4_iaddlog,ev4_shiftcm,ev4_icmp,ev4_imulsi,ev4_imuldi") + +(define_insn_reservation "ev4_shiftcm" 2 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "shift,icmov")) + "ev4_ib0+ev4_ebox") + +(define_insn_reservation "ev4_icmp" 2 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "icmp")) + "ev4_ib0+ev4_ebox") + +(define_bypass 1 "ev4_icmp" "ev4_ibr") + +(define_bypass 0 + "ev4_iaddlog,ev4_shiftcm,ev4_icmp" + "ev4_ist" + "store_data_bypass_p") + +; Multiplies use a non-pipelined imul unit. Also, "no [ebox] insn can +; be issued exactly three cycles before an integer multiply completes". + +(define_insn_reservation "ev4_imulsi" 21 + (and (eq_attr "tune" "ev4") + (and (eq_attr "type" "imul") + (eq_attr "opsize" "si"))) + "ev4_ib0+ev4_imul,ev4_imul*18,ev4_ebox") + +(define_bypass 20 "ev4_imulsi" "ev4_ist" "store_data_bypass_p") + +(define_insn_reservation "ev4_imuldi" 23 + (and (eq_attr "tune" "ev4") + (and (eq_attr "type" "imul") + (eq_attr "opsize" "!si"))) + "ev4_ib0+ev4_imul,ev4_imul*20,ev4_ebox") + +(define_bypass 22 "ev4_imuldi" "ev4_ist" "store_data_bypass_p") + +; Most FP insns have a 6 cycle latency, but with a 4 cycle bypass back in. +(define_insn_reservation "ev4_fpop" 6 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "fadd,fmul,fcpys,fcmov")) + "ev4_ib1+ev4_fbox") + +(define_bypass 4 "ev4_fpop" "ev4_fpop") + +; The floating point divider is not pipelined. Also, "no FPOP insn can be +; issued exactly five or exactly six cycles before an fdiv insn completes". + +(define_insn_reservation "ev4_fdivsf" 34 + (and (eq_attr "tune" "ev4") + (and (eq_attr "type" "fdiv") + (eq_attr "opsize" "si"))) + "ev4_ib1+ev4_fdiv,ev4_fdiv*28,ev4_fdiv+ev4_fbox,ev4_fbox") + +(define_insn_reservation "ev4_fdivdf" 63 + (and (eq_attr "tune" "ev4") + (and (eq_attr "type" "fdiv") + (eq_attr "opsize" "di"))) + "ev4_ib1+ev4_fdiv,ev4_fdiv*57,ev4_fdiv+ev4_fbox,ev4_fbox") + +; Traps don't consume or produce data. +(define_insn_reservation "ev4_misc" 1 + (and (eq_attr "tune" "ev4") + (eq_attr "type" "misc")) + "ev4_ib1") diff --git a/gcc-4.9/gcc/config/alpha/ev5.md b/gcc-4.9/gcc/config/alpha/ev5.md new file mode 100644 index 000000000..9d1871ea9 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/ev5.md @@ -0,0 +1,194 @@ +;; Scheduling description for Alpha EV5. +;; Copyright (C) 2002-2014 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. + +;; EV5 has two asymmetric integer units, E0 and E1, plus separate +;; FP add and multiply units. + +(define_automaton "ev5_0,ev5_1") +(define_cpu_unit "ev5_e0,ev5_e1,ev5_fa,ev5_fm" "ev5_0") +(define_reservation "ev5_e01" "ev5_e0|ev5_e1") +(define_reservation "ev5_fam" "ev5_fa|ev5_fm") +(define_cpu_unit "ev5_imul" "ev5_0") +(define_cpu_unit "ev5_fdiv" "ev5_1") + +; Assume type "multi" single issues. +(define_insn_reservation "ev5_multi" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "multi")) + "ev5_e0+ev5_e1+ev5_fa+ev5_fm") + +; Stores can only issue to E0, and may not issue with loads. +; Model this with some fake units. + +(define_cpu_unit "ev5_l0,ev5_l1,ev5_st" "ev5_0") +(define_reservation "ev5_ld" "ev5_l0|ev5_l1") +(exclusion_set "ev5_l0,ev5_l1" "ev5_st") + +(define_insn_reservation "ev5_st" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "ist,fst,st_c,mb")) + "ev5_e0+ev5_st") + +; Loads from L0 complete in two cycles. adjust_cost still factors +; in user-specified memory latency, so return 1 here. +(define_insn_reservation "ev5_ld" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "ild,fld,ldsym")) + "ev5_e01+ev5_ld") + +(define_insn_reservation "ev5_ld_l" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "ld_l")) + "ev5_e0+ev5_ld") + +; Integer branches slot only to E1. +(define_insn_reservation "ev5_ibr" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "ibr")) + "ev5_e1") + +(define_insn_reservation "ev5_callpal" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "callpal")) + "ev5_e1") + +(define_insn_reservation "ev5_jsr" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "jsr")) + "ev5_e1") + +(define_insn_reservation "ev5_shift" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "shift")) + "ev5_e0") + +(define_insn_reservation "ev5_mvi" 2 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "mvi")) + "ev5_e0") + +(define_insn_reservation "ev5_cmov" 2 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "icmov")) + "ev5_e01") + +(define_insn_reservation "ev5_iadd" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "iadd")) + "ev5_e01") + +(define_insn_reservation "ev5_ilogcmp" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "ilog,icmp")) + "ev5_e01") + +; Conditional move and branch can issue the same cycle as the test. +(define_bypass 0 "ev5_ilogcmp" "ev5_ibr,ev5_cmov" "if_test_bypass_p") + +; Multiplies use a non-pipelined imul unit. Also, "no insn can be issued +; to E0 exactly two cycles before an integer multiply completes". + +(define_insn_reservation "ev5_imull" 8 + (and (eq_attr "tune" "ev5") + (and (eq_attr "type" "imul") + (eq_attr "opsize" "si"))) + "ev5_e0+ev5_imul,ev5_imul*3,nothing,ev5_e0") + +(define_insn_reservation "ev5_imulq" 12 + (and (eq_attr "tune" "ev5") + (and (eq_attr "type" "imul") + (eq_attr "opsize" "di"))) + "ev5_e0+ev5_imul,ev5_imul*7,nothing,ev5_e0") + +(define_insn_reservation "ev5_imulh" 14 + (and (eq_attr "tune" "ev5") + (and (eq_attr "type" "imul") + (eq_attr "opsize" "udi"))) + "ev5_e0+ev5_imul,ev5_imul*7,nothing*3,ev5_e0") + +; The multiplier is unable to receive data from Ebox bypass paths. The +; instruction issues at the expected time, but its latency is increased +; by the time it takes for the input data to become available to the +; multiplier. For example, an IMULL instruction issued one cycle later +; than an ADDL instruction, which produced one of its operands, has a +; latency of 10 (8 + 2). If the IMULL instruction is issued two cycles +; later than the ADDL instruction, the latency is 9 (8 + 1). +; +; Model this instead with increased latency on the input instruction. + +(define_bypass 3 + "ev5_ld,ev5_ld_l,ev5_shift,ev5_mvi,ev5_cmov,ev5_iadd,ev5_ilogcmp" + "ev5_imull,ev5_imulq,ev5_imulh") + +(define_bypass 9 "ev5_imull" "ev5_imull,ev5_imulq,ev5_imulh") +(define_bypass 13 "ev5_imulq" "ev5_imull,ev5_imulq,ev5_imulh") +(define_bypass 15 "ev5_imulh" "ev5_imull,ev5_imulq,ev5_imulh") + +; Similarly for the FPU we have two asymmetric units. + +(define_insn_reservation "ev5_fadd" 4 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "fadd,fcmov")) + "ev5_fa") + +(define_insn_reservation "ev5_fbr" 1 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "fbr")) + "ev5_fa") + +(define_insn_reservation "ev5_fcpys" 4 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "fcpys")) + "ev5_fam") + +(define_insn_reservation "ev5_fmul" 4 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "fmul")) + "ev5_fm") + +; The floating point divider is not pipelined. Also, "no insn can be issued +; to FA exactly five before an fdiv insn completes". +; +; ??? Do not model this late reservation due to the enormously increased +; size of the resulting DFA. +; +; ??? Putting ev5_fa and ev5_fdiv alone into the same automata produces +; a DFA of acceptable size, but putting ev5_fm and ev5_fa into separate +; automata produces incorrect results for insns that can choose one or +; the other, i.e. ev5_fcpys. + +(define_insn_reservation "ev5_fdivsf" 15 + (and (eq_attr "tune" "ev5") + (and (eq_attr "type" "fdiv") + (eq_attr "opsize" "si"))) + ; "ev5_fa+ev5_fdiv,ev5_fdiv*9,ev5_fa+ev5_fdiv,ev5_fdiv*4" + "ev5_fa+ev5_fdiv,ev5_fdiv*14") + +(define_insn_reservation "ev5_fdivdf" 22 + (and (eq_attr "tune" "ev5") + (and (eq_attr "type" "fdiv") + (eq_attr "opsize" "di"))) + ; "ev5_fa+ev5_fdiv,ev5_fdiv*17,ev5_fa+ev5_fdiv,ev5_fdiv*4" + "ev5_fa+ev5_fdiv,ev5_fdiv*21") + +; Traps don't consume or produce data; rpcc is latency 2 if we ever add it. +(define_insn_reservation "ev5_misc" 2 + (and (eq_attr "tune" "ev5") + (eq_attr "type" "misc")) + "ev5_e0") diff --git a/gcc-4.9/gcc/config/alpha/ev6.md b/gcc-4.9/gcc/config/alpha/ev6.md new file mode 100644 index 000000000..e0612a411 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/ev6.md @@ -0,0 +1,181 @@ +;; Scheduling description for Alpha EV6. +;; Copyright (C) 2002-2014 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. + +; EV6 can issue 4 insns per clock. It's out-of-order, so this isn't +; expected to help over-much, but a precise description can be important +; for software pipelining. +; +; EV6 has two symmetric pairs ("clusters") of two asymmetric integer +; units ("upper" and "lower"), yielding pipe names U0, U1, L0, L1. +; +; ??? The clusters have independent register files that are re-synced +; every cycle. Thus there is one additional cycle of latency between +; insns issued on different clusters. Possibly model that by duplicating +; all EBOX insn_reservations that can issue to either cluster, increasing +; all latencies by one, and adding bypasses within the cluster. +; +; ??? In addition, instruction order affects cluster issue. + +(define_automaton "ev6_0,ev6_1") +(define_cpu_unit "ev6_u0,ev6_u1,ev6_l0,ev6_l1" "ev6_0") +(define_reservation "ev6_u" "ev6_u0|ev6_u1") +(define_reservation "ev6_l" "ev6_l0|ev6_l1") +(define_reservation "ev6_ebox" "ev6_u|ev6_l") + +(define_cpu_unit "ev6_fa" "ev6_1") +(define_cpu_unit "ev6_fm,ev6_fst0,ev6_fst1" "ev6_0") +(define_reservation "ev6_fst" "ev6_fst0|ev6_fst1") + +; Assume type "multi" single issues. +(define_insn_reservation "ev6_multi" 1 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "multi")) + "ev6_u0+ev6_u1+ev6_l0+ev6_l1+ev6_fa+ev6_fm+ev6_fst0+ev6_fst1") + +; Integer loads take at least 3 clocks, and only issue to lower units. +; adjust_cost still factors in user-specified memory latency, so return 1 here. +(define_insn_reservation "ev6_ild" 1 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "ild,ldsym,ld_l")) + "ev6_l") + +(define_insn_reservation "ev6_ist" 1 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "ist,st_c")) + "ev6_l") + +(define_insn_reservation "ev6_mb" 1 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "mb")) + "ev6_l1") + +; FP loads take at least 4 clocks. adjust_cost still factors +; in user-specified memory latency, so return 2 here. +(define_insn_reservation "ev6_fld" 2 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "fld")) + "ev6_l") + +; The FPU communicates with memory and the integer register file +; via two fp store units. We need a slot in the fst immediately, and +; a slot in LOW after the operand data is ready. At which point the +; data may be moved either to the store queue or the integer register +; file and the insn retired. + +(define_insn_reservation "ev6_fst" 3 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "fst")) + "ev6_fst,nothing,ev6_l") + +; Arithmetic goes anywhere. +(define_insn_reservation "ev6_arith" 1 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "iadd,ilog,icmp")) + "ev6_ebox") + +; Motion video insns also issue only to U0, and take three ticks. +(define_insn_reservation "ev6_mvi" 3 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "mvi")) + "ev6_u0") + +; Shifts issue to upper units. +(define_insn_reservation "ev6_shift" 1 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "shift")) + "ev6_u") + +; Multiplies issue only to U1, and all take 7 ticks. +(define_insn_reservation "ev6_imul" 7 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "imul")) + "ev6_u1") + +; Conditional moves decompose into two independent primitives, each taking +; one cycle. Since ev6 is out-of-order, we can't see anything but two cycles. +(define_insn_reservation "ev6_icmov" 2 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "icmov")) + "ev6_ebox,ev6_ebox") + +; Integer branches issue to upper units +(define_insn_reservation "ev6_ibr" 1 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "ibr,callpal")) + "ev6_u") + +; Calls only issue to L0. +(define_insn_reservation "ev6_jsr" 1 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "jsr")) + "ev6_l0") + +; Ftoi/itof only issue to lower pipes. +(define_insn_reservation "ev6_itof" 3 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "itof")) + "ev6_l") + +(define_insn_reservation "ev6_ftoi" 3 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "ftoi")) + "ev6_fst,nothing,ev6_l") + +(define_insn_reservation "ev6_fmul" 4 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "fmul")) + "ev6_fm") + +(define_insn_reservation "ev6_fadd" 4 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "fadd,fcpys,fbr")) + "ev6_fa") + +(define_bypass 6 "ev6_fmul,ev6_fadd" "ev6_fst,ev6_ftoi") + +(define_insn_reservation "ev6_fcmov" 8 + (and (eq_attr "tune" "ev6") + (eq_attr "type" "fcmov")) + "ev6_fa,nothing*3,ev6_fa") + +(define_bypass 10 "ev6_fcmov" "ev6_fst,ev6_ftoi") + +(define_insn_reservation "ev6_fdivsf" 12 + (and (eq_attr "tune" "ev6") + (and (eq_attr "type" "fdiv") + (eq_attr "opsize" "si"))) + "ev6_fa*9") + +(define_insn_reservation "ev6_fdivdf" 15 + (and (eq_attr "tune" "ev6") + (and (eq_attr "type" "fdiv") + (eq_attr "opsize" "di"))) + "ev6_fa*12") + +(define_insn_reservation "ev6_sqrtsf" 18 + (and (eq_attr "tune" "ev6") + (and (eq_attr "type" "fsqrt") + (eq_attr "opsize" "si"))) + "ev6_fa*15") + +(define_insn_reservation "ev6_sqrtdf" 33 + (and (eq_attr "tune" "ev6") + (and (eq_attr "type" "fsqrt") + (eq_attr "opsize" "di"))) + "ev6_fa*30") diff --git a/gcc-4.9/gcc/config/alpha/freebsd.h b/gcc-4.9/gcc/config/alpha/freebsd.h new file mode 100644 index 000000000..9e52d33e4 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/freebsd.h @@ -0,0 +1,68 @@ +/* Definitions for DEC Alpha/AXP running FreeBSD using the ELF format + Copyright (C) 2000-2014 Free Software Foundation, Inc. + Contributed by David E. O'Brien <obrien@FreeBSD.org> and BSDi. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + + +#undef EXTRA_SPECS +#define EXTRA_SPECS \ + { "fbsd_dynamic_linker", FBSD_DYNAMIC_LINKER } + +/* Provide a CPP_SPEC appropriate for FreeBSD/alpha -- dealing with + the GCC option `-posix'. */ + +#undef CPP_SPEC +#define CPP_SPEC "%{posix:-D_POSIX_SOURCE}" + +#define LINK_SPEC "%{G*} %{relax:-relax} \ + %{p:%nconsider using '-pg' instead of '-p' with gprof(1)} \ + %{assert*} %{R*} %{rpath*} %{defsym*} \ + %{shared:-Bshareable %{h*} %{soname*}} \ + %{!shared: \ + %{!static: \ + %{rdynamic:-export-dynamic} \ + -dynamic-linker %(fbsd_dynamic_linker) } \ + %{static:-Bstatic}} \ + %{symbolic:-Bsymbolic}" + + +/************************[ Target stuff ]***********************************/ + +/* Define the actual types of some ANSI-mandated types. + Needs to agree with <machine/ansi.h>. GCC defaults come from c-decl.c, + c-common.c, and config/<arch>/<arch>.h. */ + +/* alpha.h gets this wrong for FreeBSD. We use the GCC defaults instead. */ +#undef WCHAR_TYPE + +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE 32 + +#define TARGET_ELF 1 + +#undef HAS_INIT_SECTION + +/* Show that we need a GP when profiling. */ +#undef TARGET_PROFILING_NEEDS_GP +#define TARGET_PROFILING_NEEDS_GP 1 + +/* Don't default to pcc-struct-return, we want to retain compatibility with + older FreeBSD releases AND pcc-struct-return may not be reentrant. */ + +#undef DEFAULT_PCC_STRUCT_RETURN +#define DEFAULT_PCC_STRUCT_RETURN 0 diff --git a/gcc-4.9/gcc/config/alpha/linux-elf.h b/gcc-4.9/gcc/config/alpha/linux-elf.h new file mode 100644 index 000000000..bdefe237f --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/linux-elf.h @@ -0,0 +1,53 @@ +/* Definitions of target machine for GNU compiler + for Alpha Linux-based GNU systems using ELF. + Copyright (C) 1996-2014 Free Software Foundation, Inc. + Contributed by Richard Henderson. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#undef EXTRA_SPECS +#define EXTRA_SPECS \ +{ "elf_dynamic_linker", ELF_DYNAMIC_LINKER }, + +#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" +#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0" +#if DEFAULT_LIBC == LIBC_UCLIBC +#define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:" G ";:" U "}" +#elif DEFAULT_LIBC == LIBC_GLIBC +#define CHOOSE_DYNAMIC_LINKER(G, U) "%{muclibc:" U ";:" G "}" +#else +#error "Unsupported DEFAULT_LIBC" +#endif +#define GNU_USER_DYNAMIC_LINKER \ + CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER, UCLIBC_DYNAMIC_LINKER) + +#define ELF_DYNAMIC_LINKER GNU_USER_DYNAMIC_LINKER + +#define LINK_SPEC "-m elf64alpha %{G*} %{relax:-relax} \ + %{O*:-O3} %{!O*:-O1} \ + %{shared:-shared} \ + %{!shared: \ + %{!static: \ + %{rdynamic:-export-dynamic} \ + -dynamic-linker %(elf_dynamic_linker)} \ + %{static:-static}}" + +#undef LIB_SPEC +#define LIB_SPEC \ +"%{pthread:-lpthread} %{shared:-lc}%{!shared:%{profile:-lc_p}%{!profile:-lc}} " + +#define TARGET_ASM_FILE_END file_end_indicate_exec_stack diff --git a/gcc-4.9/gcc/config/alpha/linux.h b/gcc-4.9/gcc/config/alpha/linux.h new file mode 100644 index 000000000..966e9b2d0 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/linux.h @@ -0,0 +1,102 @@ +/* Definitions of target machine for GNU compiler, + for Alpha Linux-based GNU systems. + Copyright (C) 1996-2014 Free Software Foundation, Inc. + Contributed by Richard Henderson. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#define TARGET_OS_CPP_BUILTINS() \ + do { \ + builtin_define ("__gnu_linux__"); \ + builtin_define ("_LONGLONG"); \ + builtin_define_std ("linux"); \ + builtin_define_std ("unix"); \ + builtin_assert ("system=linux"); \ + builtin_assert ("system=unix"); \ + builtin_assert ("system=posix"); \ + /* The GNU C++ standard library requires this. */ \ + if (c_dialect_cxx ()) \ + builtin_define ("_GNU_SOURCE"); \ + } while (0) + +#undef LIB_SPEC +#define LIB_SPEC \ + "%{pthread:-lpthread} \ + %{shared:-lc} \ + %{!shared: %{profile:-lc_p}%{!profile:-lc}}" + +#undef CPP_SPEC +#define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" + +/* Show that we need a GP when profiling. */ +#undef TARGET_PROFILING_NEEDS_GP +#define TARGET_PROFILING_NEEDS_GP 1 + +/* Don't care about faults in the prologue. */ +#undef TARGET_CAN_FAULT_IN_PROLOGUE +#define TARGET_CAN_FAULT_IN_PROLOGUE 1 + +/* OS fixes up EV5 data fault on prefetch. */ +#undef TARGET_FIXUP_EV5_PREFETCH +#define TARGET_FIXUP_EV5_PREFETCH 1 + +#undef WCHAR_TYPE +#define WCHAR_TYPE "int" + +#ifdef SINGLE_LIBC +#define OPTION_GLIBC (DEFAULT_LIBC == LIBC_GLIBC) +#define OPTION_UCLIBC (DEFAULT_LIBC == LIBC_UCLIBC) +#define OPTION_BIONIC (DEFAULT_LIBC == LIBC_BIONIC) +#else +#define OPTION_GLIBC (linux_libc == LIBC_GLIBC) +#define OPTION_UCLIBC (linux_libc == LIBC_UCLIBC) +#define OPTION_BIONIC (linux_libc == LIBC_BIONIC) +#endif + +/* Determine what functions are present at the runtime; + this includes full c99 runtime and sincos. */ +#undef TARGET_LIBC_HAS_FUNCTION +#define TARGET_LIBC_HAS_FUNCTION linux_libc_has_function + +#define TARGET_POSIX_IO + +#define LINK_GCC_C_SEQUENCE_SPEC \ + "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}" + +/* Use --as-needed -lgcc_s for eh support. */ +#ifdef HAVE_LD_AS_NEEDED +#define USE_LD_AS_NEEDED 1 +#endif + +/* Define if long doubles should be mangled as 'g'. */ +#define TARGET_ALTERNATE_LONG_DOUBLE_MANGLING + +/* -mcpu=native handling only makes sense with compiler running on + an Alpha chip. */ +#if defined(__alpha__) || defined(__alpha) +extern const char *host_detect_local_cpu (int argc, const char **argv); +# define EXTRA_SPEC_FUNCTIONS \ + { "local_cpu_detect", host_detect_local_cpu }, + +# define MCPU_MTUNE_NATIVE_SPECS \ + " %{mcpu=native:%<mcpu=native %:local_cpu_detect(cpu)}" \ + " %{mtune=native:%<mtune=native %:local_cpu_detect(tune)}" +#else +# define MCPU_MTUNE_NATIVE_SPECS "" +#endif + +#define DRIVER_SELF_SPECS MCPU_MTUNE_NATIVE_SPECS diff --git a/gcc-4.9/gcc/config/alpha/netbsd.h b/gcc-4.9/gcc/config/alpha/netbsd.h new file mode 100644 index 000000000..7c3ace6dd --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/netbsd.h @@ -0,0 +1,72 @@ +/* Definitions of target machine for GNU compiler, + for Alpha NetBSD systems. + Copyright (C) 1998-2014 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#define TARGET_OS_CPP_BUILTINS() \ + do { \ + NETBSD_OS_CPP_BUILTINS_ELF(); \ + } while (0) + + +/* NetBSD doesn't use the LANGUAGE* built-ins. */ +#undef SUBTARGET_LANGUAGE_CPP_BUILTINS +#define SUBTARGET_LANGUAGE_CPP_BUILTINS() /* nothing */ + + +/* Show that we need a GP when profiling. */ +#undef TARGET_PROFILING_NEEDS_GP +#define TARGET_PROFILING_NEEDS_GP 1 + + +/* Provide a CPP_SPEC appropriate for NetBSD/alpha. We use + this to pull in CPP specs that all NetBSD configurations need. */ + +#undef CPP_SPEC +#define CPP_SPEC NETBSD_CPP_SPEC + +#undef EXTRA_SPECS +#define EXTRA_SPECS \ + { "netbsd_link_spec", NETBSD_LINK_SPEC_ELF }, \ + { "netbsd_entry_point", NETBSD_ENTRY_POINT }, \ + { "netbsd_endfile_spec", NETBSD_ENDFILE_SPEC }, + + +/* Provide a LINK_SPEC appropriate for a NetBSD/alpha ELF target. */ + +#undef LINK_SPEC +#define LINK_SPEC \ + "%{G*} %{relax:-relax} \ + %{O*:-O3} %{!O*:-O1} \ + %(netbsd_link_spec)" + +#define NETBSD_ENTRY_POINT "__start" + + +/* Provide an ENDFILE_SPEC appropriate for NetBSD/alpha ELF. Here we + add crtend.o, which provides part of the support for getting + C++ file-scope static objects deconstructed after exiting "main". + + We also need to handle the GCC option `-ffast-math'. */ + +#undef ENDFILE_SPEC +#define ENDFILE_SPEC \ + "%{Ofast|ffast-math|funsafe-math-optimizations:crtfm%O%s} \ + %(netbsd_endfile_spec)" + +#define HAVE_ENABLE_EXECUTE_STACK diff --git a/gcc-4.9/gcc/config/alpha/openbsd.h b/gcc-4.9/gcc/config/alpha/openbsd.h new file mode 100644 index 000000000..74f16e134 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/openbsd.h @@ -0,0 +1,45 @@ +/* Configuration file for an alpha OpenBSD target. + Copyright (C) 1999-2014 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +/* Controlling the compilation driver. */ + +/* run-time target specifications */ +#define TARGET_OS_CPP_BUILTINS() \ + do { \ + OPENBSD_OS_CPP_BUILTINS_ELF(); \ + OPENBSD_OS_CPP_BUILTINS_LP64(); \ + } while (0) + +/* Layout of source language data types. */ + +/* This must agree with <machine/ansi.h> */ +#undef SIZE_TYPE +#define SIZE_TYPE "long unsigned int" + +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE "long int" + +#undef WCHAR_TYPE +#define WCHAR_TYPE "int" + +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE 32 + + +#define LOCAL_LABEL_PREFIX "." diff --git a/gcc-4.9/gcc/config/alpha/predicates.md b/gcc-4.9/gcc/config/alpha/predicates.md new file mode 100644 index 000000000..c68e83a70 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/predicates.md @@ -0,0 +1,653 @@ +;; Predicate definitions for DEC Alpha. +;; Copyright (C) 2004-2014 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. + +;; Return 1 if OP is the zero constant for MODE. +(define_predicate "const0_operand" + (and (match_code "const_int,const_double,const_vector") + (match_test "op == CONST0_RTX (mode)"))) + +;; Returns true if OP is either the constant zero or a register. +(define_predicate "reg_or_0_operand" + (ior (match_operand 0 "register_operand") + (match_operand 0 "const0_operand"))) + +;; Return 1 if OP is a constant in the range of 0-63 (for a shift) or +;; any register. +(define_predicate "reg_or_6bit_operand" + (if_then_else (match_code "const_int") + (match_test "INTVAL (op) >= 0 && INTVAL (op) < 64") + (match_operand 0 "register_operand"))) + +;; Return 1 if OP is an 8-bit constant. +(define_predicate "cint8_operand" + (and (match_code "const_int") + (match_test "INTVAL (op) >= 0 && INTVAL (op) < 256"))) + +;; Return 1 if OP is an 8-bit constant or any register. +(define_predicate "reg_or_8bit_operand" + (if_then_else (match_code "const_int") + (match_test "INTVAL (op) >= 0 && INTVAL (op) < 256") + (match_operand 0 "register_operand"))) + +;; Return 1 if OP is a constant or any register. +(define_predicate "reg_or_cint_operand" + (ior (match_operand 0 "register_operand") + (match_operand 0 "const_int_operand"))) + +;; Return 1 if the operand is a valid second operand to an add insn. +(define_predicate "add_operand" + (if_then_else (match_code "const_int") + (match_test "satisfies_constraint_K (op) || satisfies_constraint_L (op)") + (match_operand 0 "register_operand"))) + +;; Return 1 if the operand is a valid second operand to a +;; sign-extending add insn. +(define_predicate "sext_add_operand" + (if_then_else (match_code "const_int") + (match_test "satisfies_constraint_I (op) || satisfies_constraint_O (op)") + (match_operand 0 "register_operand"))) + +;; Return 1 if the operand is a non-symbolic constant operand that +;; does not satisfy add_operand. +(define_predicate "non_add_const_operand" + (and (match_code "const_int,const_double,const_vector") + (not (match_operand 0 "add_operand")))) + +;; Return 1 if the operand is a non-symbolic, nonzero constant operand. +(define_predicate "non_zero_const_operand" + (and (match_code "const_int,const_double,const_vector") + (match_test "op != CONST0_RTX (mode)"))) + +;; Return 1 if OP is the constant 4 or 8. +(define_predicate "const48_operand" + (and (match_code "const_int") + (match_test "INTVAL (op) == 4 || INTVAL (op) == 8"))) + +;; Return 1 if OP is a valid first operand to an AND insn. +(define_predicate "and_operand" + (if_then_else (match_code "const_int") + (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 0x100 + || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100 + || zap_mask (INTVAL (op))") + (if_then_else (match_code "const_double") + (match_test "GET_MODE (op) == VOIDmode + && zap_mask (CONST_DOUBLE_LOW (op)) + && zap_mask (CONST_DOUBLE_HIGH (op))") + (match_operand 0 "register_operand")))) + +;; Return 1 if OP is a valid first operand to an IOR or XOR insn. +(define_predicate "or_operand" + (if_then_else (match_code "const_int") + (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 0x100 + || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100") + (match_operand 0 "register_operand"))) + +;; Return 1 if OP is a constant that is the width, in bits, of an integral +;; mode not larger than DImode. +(define_predicate "mode_width_operand" + (match_code "const_int") +{ + HOST_WIDE_INT i = INTVAL (op); + return i == 8 || i == 16 || i == 32 || i == 64; +}) + +;; Return 1 if OP is a constant that is a mask of ones of width of an +;; integral machine mode not larger than DImode. +(define_predicate "mode_mask_operand" + (match_code "const_int,const_double") +{ + if (CONST_INT_P (op)) + { + HOST_WIDE_INT value = INTVAL (op); + + if (value == 0xff) + return 1; + if (value == 0xffff) + return 1; + if (value == 0xffffffff) + return 1; + if (value == -1) + return 1; + } + else if (HOST_BITS_PER_WIDE_INT == 32 && GET_CODE (op) == CONST_DOUBLE) + { + if (CONST_DOUBLE_LOW (op) == 0xffffffff && CONST_DOUBLE_HIGH (op) == 0) + return 1; + } + return 0; +}) + +;; Return 1 if OP is a multiple of 8 less than 64. +(define_predicate "mul8_operand" + (match_code "const_int") +{ + unsigned HOST_WIDE_INT i = INTVAL (op); + return i < 64 && i % 8 == 0; +}) + +;; Return 1 if OP is a hard floating-point register. +(define_predicate "hard_fp_register_operand" + (match_operand 0 "register_operand") +{ + if (GET_CODE (op) == SUBREG) + op = SUBREG_REG (op); + return REGNO_REG_CLASS (REGNO (op)) == FLOAT_REGS; +}) + +;; Return 1 if OP is a hard general register. +(define_predicate "hard_int_register_operand" + (match_operand 0 "register_operand") +{ + if (GET_CODE (op) == SUBREG) + op = SUBREG_REG (op); + return REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS; +}) + +;; Return 1 if OP is something that can be reloaded into a register; +;; if it is a MEM, it need not be valid. +(define_predicate "some_operand" + (ior (match_code "reg,mem,const_int,const_double,const_vector, + label_ref,symbol_ref,const,high") + (and (match_code "subreg") + (match_test "some_operand (SUBREG_REG (op), VOIDmode)")))) + +;; Likewise, but don't accept constants. +(define_predicate "some_ni_operand" + (ior (match_code "reg,mem") + (and (match_code "subreg") + (match_test "some_ni_operand (SUBREG_REG (op), VOIDmode)")))) + +;; Return 1 if OP is a valid operand for the source of a move insn. +(define_predicate "input_operand" + (match_code "label_ref,symbol_ref,const,high,reg,subreg,mem, + const_double,const_vector,const_int") +{ + switch (GET_CODE (op)) + { + case LABEL_REF: + case SYMBOL_REF: + case CONST: + if (TARGET_EXPLICIT_RELOCS) + { + /* We don't split symbolic operands into something unintelligable + until after reload, but we do not wish non-small, non-global + symbolic operands to be reconstructed from their high/lo_sum + form. */ + return (small_symbolic_operand (op, mode) + || global_symbolic_operand (op, mode) + || gotdtp_symbolic_operand (op, mode) + || gottp_symbolic_operand (op, mode)); + } + /* VMS still has a 32-bit mode. */ + return mode == ptr_mode || mode == Pmode; + + case HIGH: + return (TARGET_EXPLICIT_RELOCS + && local_symbolic_operand (XEXP (op, 0), mode)); + + case REG: + return 1; + + case SUBREG: + if (register_operand (op, mode)) + return 1; + /* ... fall through ... */ + case MEM: + return ((TARGET_BWX || (mode != HImode && mode != QImode)) + && general_operand (op, mode)); + + case CONST_DOUBLE: + return op == CONST0_RTX (mode); + + case CONST_VECTOR: + if (reload_in_progress || reload_completed) + return alpha_legitimate_constant_p (mode, op); + return op == CONST0_RTX (mode); + + case CONST_INT: + if (mode == QImode || mode == HImode) + return true; + if (reload_in_progress || reload_completed) + return alpha_legitimate_constant_p (mode, op); + return add_operand (op, mode); + + default: + gcc_unreachable (); + } + return 0; +}) + +;; Return 1 if OP is a SYMBOL_REF for a function known to be in this +;; file, and in the same section as the current function. + +(define_predicate "samegp_function_operand" + (match_code "symbol_ref") +{ + /* Easy test for recursion. */ + if (op == XEXP (DECL_RTL (current_function_decl), 0)) + return true; + + /* Functions that are not local can be overridden, and thus may + not share the same gp. */ + if (! SYMBOL_REF_LOCAL_P (op)) + return false; + + /* If -msmall-data is in effect, assume that there is only one GP + for the module, and so any local symbol has this property. We + need explicit relocations to be able to enforce this for symbols + not defined in this unit of translation, however. */ + if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA) + return true; + + /* Functions that are not external are defined in this UoT, + and thus must share the same gp. */ + return ! SYMBOL_REF_EXTERNAL_P (op); +}) + +;; Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr. +(define_predicate "direct_call_operand" + (match_operand 0 "samegp_function_operand") +{ + /* If profiling is implemented via linker tricks, we can't jump + to the nogp alternate entry point. Note that crtl->profile + would not be correct, since that doesn't indicate if the target + function uses profiling. */ + /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test, + but is approximately correct for the OSF ABIs. Don't know + what to do for VMS, NT, or UMK. */ + if (!TARGET_PROFILING_NEEDS_GP && profile_flag) + return false; + + /* Must be a function. In some cases folks create thunks in static + data structures and then make calls to them. If we allow the + direct call, we'll get an error from the linker about !samegp reloc + against a symbol without a .prologue directive. */ + if (!SYMBOL_REF_FUNCTION_P (op)) + return false; + + /* Must be "near" so that the branch is assumed to reach. With + -msmall-text, this is assumed true of all local symbols. Since + we've already checked samegp, locality is already assured. */ + if (TARGET_SMALL_TEXT) + return true; + + return false; +}) + +;; Return 1 if OP is a valid operand for the MEM of a CALL insn. +;; +;; For TARGET_ABI_OSF, we want to restrict to R27 or a pseudo. + +(define_predicate "call_operand" + (ior (match_code "symbol_ref") + (and (match_code "reg") + (ior (match_test "!TARGET_ABI_OSF") + (match_test "!HARD_REGISTER_P (op)") + (match_test "REGNO (op) == R27_REG"))))) + +;; Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing +;; a (non-tls) variable known to be defined in this file. +(define_predicate "local_symbolic_operand" + (match_code "label_ref,const,symbol_ref") +{ + if (GET_CODE (op) == CONST + && GET_CODE (XEXP (op, 0)) == PLUS + && CONST_INT_P (XEXP (XEXP (op, 0), 1))) + op = XEXP (XEXP (op, 0), 0); + + if (GET_CODE (op) == LABEL_REF) + return 1; + + if (GET_CODE (op) != SYMBOL_REF) + return 0; + + return (SYMBOL_REF_LOCAL_P (op) + && !SYMBOL_REF_WEAK (op) + && !SYMBOL_REF_TLS_MODEL (op)); +}) + +;; Return true if OP is a SYMBOL_REF or CONST referencing a variable +;; known to be defined in this file in the small data area. +(define_predicate "small_symbolic_operand" + (match_code "const,symbol_ref") +{ + HOST_WIDE_INT ofs = 0, max_ofs = 0; + + if (! TARGET_SMALL_DATA) + return false; + + if (GET_CODE (op) == CONST + && GET_CODE (XEXP (op, 0)) == PLUS + && CONST_INT_P (XEXP (XEXP (op, 0), 1))) + { + ofs = INTVAL (XEXP (XEXP (op, 0), 1)); + op = XEXP (XEXP (op, 0), 0); + } + + if (GET_CODE (op) != SYMBOL_REF) + return false; + + /* ??? There's no encode_section_info equivalent for the rtl + constant pool, so SYMBOL_FLAG_SMALL never gets set. */ + if (CONSTANT_POOL_ADDRESS_P (op)) + { + max_ofs = GET_MODE_SIZE (get_pool_mode (op)); + if (max_ofs > g_switch_value) + return false; + } + else if (SYMBOL_REF_LOCAL_P (op) + && SYMBOL_REF_SMALL_P (op) + && !SYMBOL_REF_WEAK (op) + && !SYMBOL_REF_TLS_MODEL (op)) + { + if (SYMBOL_REF_DECL (op)) + max_ofs = tree_to_uhwi (DECL_SIZE_UNIT (SYMBOL_REF_DECL (op))); + } + else + return false; + + /* Given that we know that the GP is always 8 byte aligned, we can + always adjust by 7 without overflowing. */ + if (max_ofs < 8) + max_ofs = 8; + + /* Since we know this is an object in a small data section, we know the + entire section is addressable via GP. We don't know where the section + boundaries are, but we know the entire object is within. */ + return IN_RANGE (ofs, 0, max_ofs - 1); +}) + +;; Return true if OP is a SYMBOL_REF or CONST referencing a variable +;; not known (or known not) to be defined in this file. +(define_predicate "global_symbolic_operand" + (match_code "const,symbol_ref") +{ + if (GET_CODE (op) == CONST + && GET_CODE (XEXP (op, 0)) == PLUS + && CONST_INT_P (XEXP (XEXP (op, 0), 1))) + op = XEXP (XEXP (op, 0), 0); + + if (GET_CODE (op) != SYMBOL_REF) + return 0; + + return ((!SYMBOL_REF_LOCAL_P (op) || SYMBOL_REF_WEAK (op)) + && !SYMBOL_REF_TLS_MODEL (op)); +}) + +;; Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref, +;; possibly with an offset. +(define_predicate "symbolic_operand" + (ior (match_code "symbol_ref,label_ref") + (and (match_code "const") + (match_test "GET_CODE (XEXP (op,0)) == PLUS + && (GET_CODE (XEXP (XEXP (op,0), 0)) == SYMBOL_REF + || GET_CODE (XEXP (XEXP (op,0), 0)) == LABEL_REF) + && CONST_INT_P (XEXP (XEXP (op,0), 1))")))) + +;; Return true if OP is valid for 16-bit DTP relative relocations. +(define_predicate "dtp16_symbolic_operand" + (and (match_code "const") + (match_test "tls_symbolic_operand_1 (op, 16, UNSPEC_DTPREL)"))) + +;; Return true if OP is valid for 32-bit DTP relative relocations. +(define_predicate "dtp32_symbolic_operand" + (and (match_code "const") + (match_test "tls_symbolic_operand_1 (op, 32, UNSPEC_DTPREL)"))) + +;; Return true if OP is valid for 64-bit DTP relative relocations. +(define_predicate "gotdtp_symbolic_operand" + (and (match_code "const") + (match_test "tls_symbolic_operand_1 (op, 64, UNSPEC_DTPREL)"))) + +;; Return true if OP is valid for 16-bit TP relative relocations. +(define_predicate "tp16_symbolic_operand" + (and (match_code "const") + (match_test "tls_symbolic_operand_1 (op, 16, UNSPEC_TPREL)"))) + +;; Return true if OP is valid for 32-bit TP relative relocations. +(define_predicate "tp32_symbolic_operand" + (and (match_code "const") + (match_test "tls_symbolic_operand_1 (op, 32, UNSPEC_TPREL)"))) + +;; Return true if OP is valid for 64-bit TP relative relocations. +(define_predicate "gottp_symbolic_operand" + (and (match_code "const") + (match_test "tls_symbolic_operand_1 (op, 64, UNSPEC_TPREL)"))) + +;; Return 1 if this memory address is a known aligned register plus +;; a constant. It must be a valid address. This means that we can do +;; this as an aligned reference plus some offset. +;; +;; Take into account what reload will do. Oh god this is awful. +;; The horrible comma-operator construct below is to prevent genrecog +;; from thinking that this predicate accepts REG and SUBREG. We don't +;; use recog during reload, so pretending these codes are accepted +;; pessimizes things a tad. + +(define_special_predicate "aligned_memory_operand" + (ior (match_test "op = resolve_reload_operand (op), 0") + (match_code "mem")) +{ + rtx base; + int offset; + + if (MEM_ALIGN (op) >= 32) + return 1; + + op = XEXP (op, 0); + + /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo) + sorts of constructs. Dig for the real base register. */ + if (reload_in_progress + && GET_CODE (op) == PLUS + && GET_CODE (XEXP (op, 0)) == PLUS) + { + base = XEXP (XEXP (op, 0), 0); + offset = INTVAL (XEXP (op, 1)); + } + else + { + if (! memory_address_p (mode, op)) + return 0; + if (GET_CODE (op) == PLUS) + { + base = XEXP (op, 0); + offset = INTVAL (XEXP (op, 1)); + } + else + { + base = op; + offset = 0; + } + } + + if (offset % GET_MODE_SIZE (mode)) + return 0; + + return (REG_P (base) && REGNO_POINTER_ALIGN (REGNO (base)) >= 32); +}) + +;; Similar, but return 1 if OP is a MEM which is not alignable. + +(define_special_predicate "unaligned_memory_operand" + (ior (match_test "op = resolve_reload_operand (op), 0") + (match_code "mem")) +{ + rtx base; + int offset; + + if (MEM_ALIGN (op) >= 32) + return 0; + + op = XEXP (op, 0); + + /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo) + sorts of constructs. Dig for the real base register. */ + if (reload_in_progress + && GET_CODE (op) == PLUS + && GET_CODE (XEXP (op, 0)) == PLUS) + { + base = XEXP (XEXP (op, 0), 0); + offset = INTVAL (XEXP (op, 1)); + } + else + { + if (! memory_address_p (mode, op)) + return 0; + if (GET_CODE (op) == PLUS) + { + base = XEXP (op, 0); + offset = INTVAL (XEXP (op, 1)); + } + else + { + base = op; + offset = 0; + } + } + + if (offset % GET_MODE_SIZE (mode)) + return 1; + + return (REG_P (base) && REGNO_POINTER_ALIGN (REGNO (base)) < 32); +}) + +;; Return 1 if OP is any memory location. During reload a pseudo matches. +(define_special_predicate "any_memory_operand" + (match_code "mem,reg,subreg") +{ + if (GET_CODE (op) == SUBREG) + op = SUBREG_REG (op); + + if (MEM_P (op)) + return true; + if (reload_in_progress && REG_P (op)) + { + unsigned regno = REGNO (op); + if (HARD_REGISTER_NUM_P (regno)) + return false; + else + return reg_renumber[regno] < 0; + } + + return false; +}) + +;; Return 1 is OP is a memory location that is not a reference +;; (using an AND) to an unaligned location. Take into account +;; what reload will do. +(define_special_predicate "normal_memory_operand" + (ior (match_test "op = resolve_reload_operand (op), 0") + (and (match_code "mem") + (match_test "GET_CODE (XEXP (op, 0)) != AND")))) + +;; Returns 1 if OP is not an eliminable register. +;; +;; This exists to cure a pathological failure in the s8addq (et al) patterns, +;; +;; long foo () { long t; bar(); return (long) &t * 26107; } +;; +;; which run afoul of a hack in reload to cure a (presumably) similar +;; problem with lea-type instructions on other targets. But there is +;; one of us and many of them, so work around the problem by selectively +;; preventing combine from making the optimization. + +(define_predicate "reg_not_elim_operand" + (match_operand 0 "register_operand") +{ + if (GET_CODE (op) == SUBREG) + op = SUBREG_REG (op); + return op != frame_pointer_rtx && op != arg_pointer_rtx; +}) + +;; Accept a register, but not a subreg of any kind. This allows us to +;; avoid pathological cases in reload wrt data movement common in +;; int->fp conversion. */ +(define_predicate "reg_no_subreg_operand" + (and (match_code "reg") + (match_operand 0 "register_operand"))) + +;; Return 1 if OP is a valid Alpha comparison operator for "cbranch" +;; instructions. +(define_predicate "alpha_cbranch_operator" + (ior (match_operand 0 "ordered_comparison_operator") + (match_code "ordered,unordered"))) + +;; Return 1 if OP is a valid Alpha comparison operator for "cmp" style +;; instructions. +(define_predicate "alpha_comparison_operator" + (match_code "eq,le,lt,leu,ltu")) + +;; Similarly, but with swapped operands. +(define_predicate "alpha_swapped_comparison_operator" + (match_code "eq,ge,gt,gtu")) + +;; Return 1 if OP is a valid Alpha comparison operator against zero +;; for "bcc" style instructions. +(define_predicate "alpha_zero_comparison_operator" + (match_code "eq,ne,le,lt,leu,ltu")) + +;; Return 1 if OP is a signed comparison operation. +(define_predicate "signed_comparison_operator" + (match_code "eq,ne,le,lt,ge,gt")) + +;; Return 1 if OP is a valid Alpha floating point comparison operator. +(define_predicate "alpha_fp_comparison_operator" + (match_code "eq,le,lt,unordered")) + +;; Return 1 if this is a divide or modulus operator. +(define_predicate "divmod_operator" + (match_code "div,mod,udiv,umod")) + +;; Return 1 if this is a float->int conversion operator. +(define_predicate "fix_operator" + (match_code "fix,unsigned_fix")) + +;; Recognize an addition operation that includes a constant. Used to +;; convince reload to canonize (plus (plus reg c1) c2) during register +;; elimination. + +(define_predicate "addition_operation" + (and (match_code "plus") + (match_test "register_operand (XEXP (op, 0), mode) + && satisfies_constraint_K (XEXP (op, 1))"))) + +;; For TARGET_EXPLICIT_RELOCS, we don't obfuscate a SYMBOL_REF to a +;; small symbolic operand until after reload. At which point we need +;; to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref)) +;; so that sched2 has the proper dependency information. */ +(define_predicate "some_small_symbolic_operand" + (match_code "set,parallel,prefetch,unspec,unspec_volatile") +{ + /* Avoid search unless necessary. */ + if (!TARGET_EXPLICIT_RELOCS || !reload_completed) + return false; + return for_each_rtx (&op, some_small_symbolic_operand_int, NULL); +}) + +;; Accept a register, or a memory if BWX is enabled. +(define_predicate "reg_or_bwx_memory_operand" + (ior (match_operand 0 "register_operand") + (and (match_test "TARGET_BWX") + (match_operand 0 "memory_operand")))) + +;; Accept a memory whose address is only a register. +(define_predicate "mem_noofs_operand" + (and (match_code "mem") + (match_code "reg" "0"))) diff --git a/gcc-4.9/gcc/config/alpha/sync.md b/gcc-4.9/gcc/config/alpha/sync.md new file mode 100644 index 000000000..2145fdf2b --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/sync.md @@ -0,0 +1,343 @@ +;; GCC machine description for Alpha synchronization instructions. +;; Copyright (C) 2005-2014 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. + +(define_code_iterator FETCHOP [plus minus ior xor and]) +(define_code_attr fetchop_name + [(plus "add") (minus "sub") (ior "or") (xor "xor") (and "and")]) +(define_code_attr fetchop_pred + [(plus "add_operand") (minus "reg_or_8bit_operand") + (ior "or_operand") (xor "or_operand") (and "and_operand")]) +(define_code_attr fetchop_constr + [(plus "rKL") (minus "rI") (ior "rIN") (xor "rIN") (and "riNHM")]) + + +(define_expand "memory_barrier" + [(set (match_dup 0) + (unspec:BLK [(match_dup 0)] UNSPEC_MB))] + "" +{ + operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); + MEM_VOLATILE_P (operands[0]) = 1; +}) + +(define_insn "*memory_barrier" + [(set (match_operand:BLK 0) + (unspec:BLK [(match_dup 0)] UNSPEC_MB))] + "" + "mb" + [(set_attr "type" "mb")]) + +(define_insn "load_locked_<mode>" + [(set (match_operand:I48MODE 0 "register_operand" "=r") + (unspec_volatile:I48MODE + [(match_operand:I48MODE 1 "memory_operand" "m")] + UNSPECV_LL))] + "" + "ld<modesuffix>_l %0,%1" + [(set_attr "type" "ld_l")]) + +(define_insn "store_conditional_<mode>" + [(set (match_operand:DI 0 "register_operand" "=r") + (unspec_volatile:DI [(const_int 0)] UNSPECV_SC)) + (set (match_operand:I48MODE 1 "memory_operand" "=m") + (match_operand:I48MODE 2 "reg_or_0_operand" "0"))] + "" + "st<modesuffix>_c %0,%1" + [(set_attr "type" "st_c")]) + +;; The Alpha Architecture Handbook says that it is UNPREDICTABLE whether +;; the lock is cleared by a normal load or store. This means we cannot +;; expand a ll/sc sequence before reload, lest a register spill is +;; inserted inside the sequence. It is also UNPREDICTABLE whether the +;; lock is cleared by a TAKEN branch. This means that we can not expand +;; a ll/sc sequence containing a branch (i.e. compare-and-swap) until after +;; the final basic-block reordering pass. + +(define_expand "atomic_compare_and_swap<mode>" + [(parallel + [(set (match_operand:DI 0 "register_operand") ;; bool out + (unspec_volatile:DI [(const_int 0)] UNSPECV_CMPXCHG)) + (set (match_operand:I48MODE 1 "register_operand") ;; val out + (unspec_volatile:I48MODE [(const_int 0)] UNSPECV_CMPXCHG)) + (set (match_operand:I48MODE 2 "memory_operand") ;; memory + (unspec_volatile:I48MODE + [(match_dup 2) + (match_operand:I48MODE 3 "reg_or_8bit_operand") ;; expected + (match_operand:I48MODE 4 "add_operand") ;; desired + (match_operand:SI 5 "const_int_operand") ;; is_weak + (match_operand:SI 6 "const_int_operand") ;; succ model + (match_operand:SI 7 "const_int_operand")] ;; fail model + UNSPECV_CMPXCHG))])] + "" +{ + if (<MODE>mode == SImode) + { + operands[3] = convert_modes (DImode, SImode, operands[3], 0); + operands[4] = convert_modes (DImode, SImode, operands[4], 0); + } +}) + +(define_insn_and_split "*atomic_compare_and_swap<mode>" + [(set (match_operand:DI 0 "register_operand" "=&r") ;; bool out + (unspec_volatile:DI [(const_int 0)] UNSPECV_CMPXCHG)) + (set (match_operand:I48MODE 1 "register_operand" "=&r") ;; val out + (unspec_volatile:I48MODE [(const_int 0)] UNSPECV_CMPXCHG)) + (set (match_operand:I48MODE 2 "memory_operand" "+m") ;; memory + (unspec_volatile:I48MODE + [(match_dup 2) + (match_operand:DI 3 "reg_or_8bit_operand" "rI") ;; expected + (match_operand:DI 4 "add_operand" "rKL") ;; desired + (match_operand:SI 5 "const_int_operand") ;; is_weak + (match_operand:SI 6 "const_int_operand") ;; succ model + (match_operand:SI 7 "const_int_operand")] ;; fail model + UNSPECV_CMPXCHG))] + "" + "#" + "epilogue_completed" + [(const_int 0)] +{ + alpha_split_compare_and_swap (operands); + DONE; +} + [(set_attr "type" "multi")]) + +(define_expand "atomic_compare_and_swap<mode>" + [(match_operand:DI 0 "register_operand") ;; bool out + (match_operand:I12MODE 1 "register_operand") ;; val out + (match_operand:I12MODE 2 "mem_noofs_operand") ;; memory + (match_operand:I12MODE 3 "register_operand") ;; expected + (match_operand:I12MODE 4 "add_operand") ;; desired + (match_operand:SI 5 "const_int_operand") ;; is_weak + (match_operand:SI 6 "const_int_operand") ;; succ model + (match_operand:SI 7 "const_int_operand")] ;; fail model + "" +{ + alpha_expand_compare_and_swap_12 (operands); + DONE; +}) + +(define_insn_and_split "atomic_compare_and_swap<mode>_1" + [(set (match_operand:DI 0 "register_operand" "=&r") ;; bool out + (unspec_volatile:DI [(const_int 0)] UNSPECV_CMPXCHG)) + (set (match_operand:DI 1 "register_operand" "=&r") ;; val out + (zero_extend:DI + (unspec_volatile:I12MODE [(const_int 0)] UNSPECV_CMPXCHG))) + (set (match_operand:I12MODE 2 "mem_noofs_operand" "+w") ;; memory + (unspec_volatile:I12MODE + [(match_dup 2) + (match_operand:DI 3 "reg_or_8bit_operand" "rI") ;; expected + (match_operand:DI 4 "reg_or_0_operand" "rJ") ;; desired + (match_operand:DI 5 "register_operand" "r") ;; align + (match_operand:SI 6 "const_int_operand") ;; is_weak + (match_operand:SI 7 "const_int_operand") ;; succ model + (match_operand:SI 8 "const_int_operand")] ;; fail model + UNSPECV_CMPXCHG)) + (clobber (match_scratch:DI 9 "=&r"))] + "" + "#" + "epilogue_completed" + [(const_int 0)] +{ + alpha_split_compare_and_swap_12 (operands); + DONE; +} + [(set_attr "type" "multi")]) + +(define_insn_and_split "atomic_exchange<mode>" + [(set (match_operand:I48MODE 0 "register_operand" "=&r") ;; output + (match_operand:I48MODE 1 "memory_operand" "+m")) ;; memory + (set (match_dup 1) + (unspec:I48MODE + [(match_operand:I48MODE 2 "add_operand" "rKL") ;; input + (match_operand:SI 3 "const_int_operand")] ;; model + UNSPEC_XCHG)) + (clobber (match_scratch:I48MODE 4 "=&r"))] + "" + "#" + "epilogue_completed" + [(const_int 0)] +{ + alpha_split_atomic_exchange (operands); + DONE; +} + [(set_attr "type" "multi")]) + +(define_expand "atomic_exchange<mode>" + [(match_operand:I12MODE 0 "register_operand") ;; output + (match_operand:I12MODE 1 "mem_noofs_operand") ;; memory + (match_operand:I12MODE 2 "reg_or_0_operand") ;; input + (match_operand:SI 3 "const_int_operand")] ;; model + "" +{ + alpha_expand_atomic_exchange_12 (operands); + DONE; +}) + +(define_insn_and_split "atomic_exchange<mode>_1" + [(set (match_operand:DI 0 "register_operand" "=&r") ;; output + (zero_extend:DI + (match_operand:I12MODE 1 "mem_noofs_operand" "+w"))) ;; memory + (set (match_dup 1) + (unspec:I12MODE + [(match_operand:DI 2 "reg_or_8bit_operand" "rI") ;; input + (match_operand:DI 3 "register_operand" "r") ;; align + (match_operand:SI 4 "const_int_operand")] ;; model + UNSPEC_XCHG)) + (clobber (match_scratch:DI 5 "=&r"))] + "" + "#" + "epilogue_completed" + [(const_int 0)] +{ + alpha_split_atomic_exchange_12 (operands); + DONE; +} + [(set_attr "type" "multi")]) + +(define_insn_and_split "atomic_<fetchop_name><mode>" + [(set (match_operand:I48MODE 0 "memory_operand" "+m") + (unspec:I48MODE + [(FETCHOP:I48MODE (match_dup 0) + (match_operand:I48MODE 1 "<fetchop_pred>" "<fetchop_constr>")) + (match_operand:SI 2 "const_int_operand")] + UNSPEC_ATOMIC)) + (clobber (match_scratch:I48MODE 3 "=&r"))] + "" + "#" + "epilogue_completed" + [(const_int 0)] +{ + alpha_split_atomic_op (<CODE>, operands[0], operands[1], + NULL, NULL, operands[3], + (enum memmodel) INTVAL (operands[2])); + DONE; +} + [(set_attr "type" "multi")]) + +(define_insn_and_split "atomic_nand<mode>" + [(set (match_operand:I48MODE 0 "memory_operand" "+m") + (unspec:I48MODE + [(not:I48MODE + (and:I48MODE (match_dup 0) + (match_operand:I48MODE 1 "register_operand" "r"))) + (match_operand:SI 2 "const_int_operand")] + UNSPEC_ATOMIC)) + (clobber (match_scratch:I48MODE 3 "=&r"))] + "" + "#" + "epilogue_completed" + [(const_int 0)] +{ + alpha_split_atomic_op (NOT, operands[0], operands[1], + NULL, NULL, operands[3], + (enum memmodel) INTVAL (operands[2])); + DONE; +} + [(set_attr "type" "multi")]) + +(define_insn_and_split "atomic_fetch_<fetchop_name><mode>" + [(set (match_operand:I48MODE 0 "register_operand" "=&r") + (match_operand:I48MODE 1 "memory_operand" "+m")) + (set (match_dup 1) + (unspec:I48MODE + [(FETCHOP:I48MODE (match_dup 1) + (match_operand:I48MODE 2 "<fetchop_pred>" "<fetchop_constr>")) + (match_operand:SI 3 "const_int_operand")] + UNSPEC_ATOMIC)) + (clobber (match_scratch:I48MODE 4 "=&r"))] + "" + "#" + "epilogue_completed" + [(const_int 0)] +{ + alpha_split_atomic_op (<CODE>, operands[1], operands[2], + operands[0], NULL, operands[4], + (enum memmodel) INTVAL (operands[3])); + DONE; +} + [(set_attr "type" "multi")]) + +(define_insn_and_split "atomic_fetch_nand<mode>" + [(set (match_operand:I48MODE 0 "register_operand" "=&r") + (match_operand:I48MODE 1 "memory_operand" "+m")) + (set (match_dup 1) + (unspec:I48MODE + [(not:I48MODE + (and:I48MODE (match_dup 1) + (match_operand:I48MODE 2 "register_operand" "r"))) + (match_operand:SI 3 "const_int_operand")] + UNSPEC_ATOMIC)) + (clobber (match_scratch:I48MODE 4 "=&r"))] + "" + "#" + "epilogue_completed" + [(const_int 0)] +{ + alpha_split_atomic_op (NOT, operands[1], operands[2], + operands[0], NULL, operands[4], + (enum memmodel) INTVAL (operands[3])); + DONE; +} + [(set_attr "type" "multi")]) + +(define_insn_and_split "atomic_<fetchop_name>_fetch<mode>" + [(set (match_operand:I48MODE 0 "register_operand" "=&r") + (FETCHOP:I48MODE + (match_operand:I48MODE 1 "memory_operand" "+m") + (match_operand:I48MODE 2 "<fetchop_pred>" "<fetchop_constr>"))) + (set (match_dup 1) + (unspec:I48MODE + [(FETCHOP:I48MODE (match_dup 1) (match_dup 2)) + (match_operand:SI 3 "const_int_operand")] + UNSPEC_ATOMIC)) + (clobber (match_scratch:I48MODE 4 "=&r"))] + "" + "#" + "epilogue_completed" + [(const_int 0)] +{ + alpha_split_atomic_op (<CODE>, operands[1], operands[2], + NULL, operands[0], operands[4], + (enum memmodel) INTVAL (operands[3])); + DONE; +} + [(set_attr "type" "multi")]) + +(define_insn_and_split "atomic_nand_fetch<mode>" + [(set (match_operand:I48MODE 0 "register_operand" "=&r") + (not:I48MODE + (and:I48MODE (match_operand:I48MODE 1 "memory_operand" "+m") + (match_operand:I48MODE 2 "register_operand" "r")))) + (set (match_dup 1) + (unspec:I48MODE + [(not:I48MODE (and:I48MODE (match_dup 1) (match_dup 2))) + (match_operand:SI 3 "const_int_operand")] + UNSPEC_ATOMIC)) + (clobber (match_scratch:I48MODE 4 "=&r"))] + "" + "#" + "epilogue_completed" + [(const_int 0)] +{ + alpha_split_atomic_op (NOT, operands[1], operands[2], + NULL, operands[0], operands[4], + (enum memmodel) INTVAL (operands[3])); + DONE; +} + [(set_attr "type" "multi")]) diff --git a/gcc-4.9/gcc/config/alpha/t-linux b/gcc-4.9/gcc/config/alpha/t-linux new file mode 100644 index 000000000..1b4a26f74 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/t-linux @@ -0,0 +1 @@ +MULTIARCH_DIRNAME = $(call if_multiarch,alpha-linux-gnu) diff --git a/gcc-4.9/gcc/config/alpha/t-vms b/gcc-4.9/gcc/config/alpha/t-vms new file mode 100644 index 000000000..12a702125 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/t-vms @@ -0,0 +1,21 @@ +# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# +# This file is part of GCC. +# +# GCC is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GCC is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# <http://www.gnu.org/licenses/>. + +MULTILIB_OPTIONS = mcpu=ev6 +MULTILIB_DIRNAMES = ev6 +MULTILIB_OSDIRNAMES = ev6 diff --git a/gcc-4.9/gcc/config/alpha/vms.h b/gcc-4.9/gcc/config/alpha/vms.h new file mode 100644 index 000000000..b2977784b --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/vms.h @@ -0,0 +1,306 @@ +/* Output variables, constants and external declarations, for GNU compiler. + Copyright (C) 1996-2014 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +/* Alpha/VMS object format is not really Elf, but this makes compiling + crtstuff.c and dealing with shared library initialization much easier. */ +#define OBJECT_FORMAT_ELF + +/* Do not use TM clone registry as it currently doesn't work. Alpha/VMS + object is too far from ELF for supporting TM out of the box. */ +#define USE_TM_CLONE_REGISTRY 0 + +/* This enables certain macros in alpha.h, which will make an indirect + reference to an external symbol an invalid address. This needs to be + defined before we include alpha.h, since it determines which macros + are used for GO_IF_*. */ + +#define NO_EXTERNAL_INDIRECT_ADDRESS + +#define SUBTARGET_OS_CPP_BUILTINS() \ + do { \ + builtin_define ("__ALPHA"); \ + if (TARGET_FLOAT_VAX) \ + builtin_define ("__G_FLOAT"); \ + else \ + builtin_define ("__IEEE_FLOAT"); \ + } while (0) + +#undef PCC_STATIC_STRUCT_RETURN + +#define MAX_OFILE_ALIGNMENT 524288 /* 8 x 2^16 by DEC Ada Test CD40VRA */ + +/* The maximum alignment 'malloc' honors. */ +#undef MALLOC_ABI_ALIGNMENT +#define MALLOC_ABI_ALIGNMENT \ + ((flag_vms_malloc64 && flag_vms_pointer_size != VMS_POINTER_SIZE_NONE \ + ? 16 : 8) * BITS_PER_UNIT) + +#undef FIXED_REGISTERS +#define FIXED_REGISTERS \ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } + +#undef CALL_USED_REGISTERS +#define CALL_USED_REGISTERS \ + {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ + 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } + +/* List the order in which to allocate registers. Each register must be + listed once, even those in FIXED_REGISTERS. + + We allocate in the following order: + $f1 (nonsaved floating-point register) + $f10-$f15 (likewise) + $f22-$f30 (likewise) + $f21-$f16 (likewise, but input args) + $f0 (nonsaved, but return value) + $f2-$f9 (saved floating-point registers) + $1 (nonsaved integer registers) + $22-$25 (likewise) + $28 (likewise) + $0 (likewise, but return value) + $21-$16 (likewise, but input args) + $27 (procedure value in OSF, nonsaved in NT) + $2-$8 (saved integer registers) + $9-$14 (saved integer registers) + $26 (return PC) + $15 (frame pointer) + $29 (global pointer) + $30, $31, $f31 (stack pointer and always zero/ap & fp) */ + +#undef REG_ALLOC_ORDER +#define REG_ALLOC_ORDER \ + {33, \ + 42, 43, 44, 45, 46, 47, \ + 54, 55, 56, 57, 58, 59, 60, 61, 62, \ + 53, 52, 51, 50, 49, 48, \ + 32, \ + 34, 35, 36, 37, 38, 39, 40, 41, \ + 1, \ + 22, 23, 24, 25, \ + 28, \ + 0, \ + 21, 20, 19, 18, 17, 16, \ + 27, \ + 2, 3, 4, 5, 6, 7, 8, \ + 9, 10, 11, 12, 13, 14, \ + 26, \ + 15, \ + 29, \ + 30, 31, 63 } + +#undef HARD_FRAME_POINTER_REGNUM +#define HARD_FRAME_POINTER_REGNUM 29 + +/* Define registers used by the epilogue and return instruction. */ +#undef EPILOGUE_USES +#define EPILOGUE_USES(REGNO) ((REGNO) == 26 || (REGNO) == 29) + +#undef INITIAL_ELIMINATION_OFFSET +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ + ((OFFSET) = alpha_vms_initial_elimination_offset(FROM, TO)) + + +/* Define a data type for recording info about an argument list + during the scan of that argument list. This data type should + hold all necessary information about the function itself + and about the args processed so far, enough to enable macros + such as FUNCTION_ARG to determine where the next arg should go. + + On Alpha/VMS, this is a structure that contains the number of + arguments and, for each argument, the datatype of that argument. + + The number of arguments is a number of words of arguments scanned so far. + Thus 6 or more means all following args should go on the stack. */ + +enum avms_arg_type {I64, FF, FD, FG, FS, FT}; +typedef struct {int num_args; enum avms_arg_type atypes[6];} avms_arg_info; + +#undef CUMULATIVE_ARGS +#define CUMULATIVE_ARGS avms_arg_info + +/* Initialize a variable CUM of type CUMULATIVE_ARGS + for a call to a function whose data type is FNTYPE. + For a library call, FNTYPE is 0. */ + +#undef INIT_CUMULATIVE_ARGS +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \ + (CUM).num_args = 0; \ + (CUM).atypes[0] = (CUM).atypes[1] = (CUM).atypes[2] = I64; \ + (CUM).atypes[3] = (CUM).atypes[4] = (CUM).atypes[5] = I64; + +#define DEFAULT_PCC_STRUCT_RETURN 0 + +/* Even though pointers are 64bits, only 32bit ever remain significant in code + addresses. */ +#define MASK_RETURN_ADDR \ + (flag_vms_pointer_size == VMS_POINTER_SIZE_NONE \ + ? constm1_rtx \ + : GEN_INT (0xffffffff)) + +#undef ASM_WEAKEN_LABEL +#define ASM_WEAKEN_LABEL(FILE, NAME) \ + do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \ + fputc ('\n', FILE); } while (0) + +#define READONLY_DATA_SECTION_ASM_OP "\t.rdata" +#define CTORS_SECTION_ASM_OP "\t.ctors" +#define DTORS_SECTION_ASM_OP "\t.dtors" +#define SDATA_SECTION_ASM_OP "\t.sdata" +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ + asm (SECTION_OP "\n\t.long " #FUNC"\n"); + +#undef ASM_OUTPUT_ADDR_DIFF_ELT + +#undef ASM_OUTPUT_ADDR_VEC_ELT +#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \ + fprintf (FILE, "\t.quad $L%d\n", (VALUE)) + +#undef CASE_VECTOR_MODE +#define CASE_VECTOR_MODE DImode +#undef CASE_VECTOR_PC_RELATIVE + +#undef ASM_OUTPUT_CASE_LABEL +#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,TABLEINSN) \ +{ ASM_OUTPUT_ALIGN (FILE, 3); (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); } + +/* This says how to output assembler code to declare an + uninitialized external linkage data object. */ + +#define COMMON_ASM_OP "\t.comm\t" + +#undef ASM_OUTPUT_ALIGNED_DECL_COMMON +#define ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN) \ + vms_output_aligned_decl_common (FILE, DECL, NAME, SIZE, ALIGN) + +/* Control how constructors and destructors are emitted. */ +#define TARGET_ASM_CONSTRUCTOR vms_asm_out_constructor +#define TARGET_ASM_DESTRUCTOR vms_asm_out_destructor + +#define DWARF2_DEBUGGING_INFO 1 +#define VMS_DEBUGGING_INFO 1 + +#define DWARF2_UNWIND_INFO 1 + +#undef EH_RETURN_HANDLER_RTX +#define EH_RETURN_HANDLER_RTX \ + gen_rtx_MEM (Pmode, plus_constant (Pmode, stack_pointer_rtx, 8)) + +#define LINK_EH_SPEC "vms-dwarf2eh.o%s " +#define LINK_GCC_C_SEQUENCE_SPEC "%G" + +/* This is how to output an assembler line + that says to advance the location counter + to a multiple of 2**LOG bytes. */ + +#define ASM_OUTPUT_ALIGN(FILE,LOG) \ + fprintf (FILE, "\t.align %d\n", LOG); + +/* This is how to advance the location counter by SIZE bytes. */ + +#define ASM_OUTPUT_SKIP(FILE,SIZE) \ + fprintf (FILE, "\t.space "HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE)) + +/* This says how to output an assembler line + to define a global common symbol. */ + +#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE,ROUNDED) \ +( fputs ("\t.lcomm ", (FILE)), \ + assemble_name ((FILE), (NAME)), \ + fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE))) + +/* Switch into a generic section. */ +#define TARGET_ASM_NAMED_SECTION vms_asm_named_section + +#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \ + do \ + { \ + fprintf ((FILE), "\t"); \ + assemble_name (FILE, LABEL1); \ + fprintf (FILE, " = "); \ + assemble_name (FILE, LABEL2); \ + fprintf (FILE, "\n"); \ + } \ + while (0) + +#undef PREFERRED_DEBUGGING_TYPE +#define PREFERRED_DEBUGGING_TYPE VMS_AND_DWARF2_DEBUG + +#define ASM_PN_FORMAT "%s___%lu" + +/* ??? VMS uses different linkage. */ +#undef TARGET_ASM_OUTPUT_MI_THUNK + +#undef ASM_SPEC +#undef ASM_FINAL_SPEC + +/* The VMS convention is to always provide minimal debug info + for a traceback unless specifically overridden. + + Because ASM_OUTPUT_ADDR_DIFF_ELT is not defined for alpha-vms, + jump tables cannot be output for PIC code, because you can't put + an absolute address in a readonly section. Putting the table in + a writable section is a security hole. Therefore, we unset the + flag_jump_tables flag, forcing switch statements to be expanded + using decision trees. There are probably other ways to address + this issue, but using a decision tree is clearly safe. */ + +#undef SUBTARGET_OVERRIDE_OPTIONS +#define SUBTARGET_OVERRIDE_OPTIONS \ +do { \ + if (write_symbols == NO_DEBUG \ + && debug_info_level == DINFO_LEVEL_NONE) \ + { \ + write_symbols = VMS_DEBUG; \ + debug_info_level = DINFO_LEVEL_TERSE; \ + } \ + if (flag_pic) \ + flag_jump_tables = 0; \ +} while (0) + +#undef LINK_SPEC +#if HAVE_GNU_LD +/* GNU-ld built-in linker script already handles the dwarf2 debug sections. */ +#define LINK_SPEC "%{shared} %{v}" +#else +/* Link with vms-dwarf2.o if -g (except -g0). This causes the + VMS link to pull all the dwarf2 debug sections together. */ +#define LINK_SPEC "%{g0} %{g*:-g vms-dwarf2.o%s} %{shared} %{v} %{map}" +#endif + +#undef STARTFILE_SPEC +#define STARTFILE_SPEC "%{!shared:crt0.o%s crtbegin.o%s} \ + %{!static:%{shared:crtbeginS.o%s}}" + +#define ENDFILE_SPEC "%{!shared:crtend.o%s} %{!static:%{shared:crtendS.o%s}}" + +#define INIT_SECTION_ASM_OP "\t.section LIB$INITIALIZE,GBL,NOWRT" + +#define LONGLONG_STANDALONE 1 + +#undef TARGET_VALID_POINTER_MODE +#define TARGET_VALID_POINTER_MODE vms_valid_pointer_mode + +/* Default values for _CRTL_VER and _VMS_VER. */ +#define VMS_DEFAULT_CRTL_VER 70320000 +#define VMS_DEFAULT_VMS_VER 70320000 diff --git a/gcc-4.9/gcc/config/alpha/x-alpha b/gcc-4.9/gcc/config/alpha/x-alpha new file mode 100644 index 000000000..2b22e5e32 --- /dev/null +++ b/gcc-4.9/gcc/config/alpha/x-alpha @@ -0,0 +1,3 @@ +driver-alpha.o: $(srcdir)/config/alpha/driver-alpha.c + $(COMPILE) $< + $(POSTCOMPILE) |