aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.8/gcc/config/vax
diff options
context:
space:
mode:
authorBen Cheng <bccheng@google.com>2013-03-28 11:14:20 -0700
committerBen Cheng <bccheng@google.com>2013-03-28 12:40:33 -0700
commitaf0c51ac87ab2a87caa03fa108f0d164987a2764 (patch)
tree4b8b470f7c5b69642fdab8d0aa1fbc148d02196b /gcc-4.8/gcc/config/vax
parentd87cae247d39ebf4f5a6bf25c932a14d2fdb9384 (diff)
downloadtoolchain_gcc-af0c51ac87ab2a87caa03fa108f0d164987a2764.tar.gz
toolchain_gcc-af0c51ac87ab2a87caa03fa108f0d164987a2764.tar.bz2
toolchain_gcc-af0c51ac87ab2a87caa03fa108f0d164987a2764.zip
[GCC 4.8] Initial check-in of GCC 4.8.0
Change-Id: I0719d8a6d0f69b367a6ab6f10eb75622dbf12771
Diffstat (limited to 'gcc-4.8/gcc/config/vax')
-rw-r--r--gcc-4.8/gcc/config/vax/builtins.md192
-rw-r--r--gcc-4.8/gcc/config/vax/constraints.md118
-rw-r--r--gcc-4.8/gcc/config/vax/elf.h112
-rw-r--r--gcc-4.8/gcc/config/vax/elf.opt29
-rw-r--r--gcc-4.8/gcc/config/vax/linux.h51
-rw-r--r--gcc-4.8/gcc/config/vax/netbsd-elf.h68
-rw-r--r--gcc-4.8/gcc/config/vax/openbsd.h50
-rw-r--r--gcc-4.8/gcc/config/vax/openbsd1.h22
-rw-r--r--gcc-4.8/gcc/config/vax/predicates.md111
-rw-r--r--gcc-4.8/gcc/config/vax/vax-modes.def22
-rw-r--r--gcc-4.8/gcc/config/vax/vax-protos.h39
-rw-r--r--gcc-4.8/gcc/config/vax/vax.c2175
-rw-r--r--gcc-4.8/gcc/config/vax/vax.h708
-rw-r--r--gcc-4.8/gcc/config/vax/vax.md1662
-rw-r--r--gcc-4.8/gcc/config/vax/vax.opt51
15 files changed, 5410 insertions, 0 deletions
diff --git a/gcc-4.8/gcc/config/vax/builtins.md b/gcc-4.8/gcc/config/vax/builtins.md
new file mode 100644
index 000000000..3212d69e0
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/builtins.md
@@ -0,0 +1,192 @@
+;; builtin definitions for DEC VAX.
+;; Copyright (C) 2007-2013 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it under
+;; the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 3, or (at your option) any later
+;; version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+;; for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_constants
+ [
+ (VUNSPEC_LOCK 100) ; sync lock and test
+ (VUNSPEC_UNLOCK 101) ; sync lock release
+ ]
+)
+
+(define_expand "ffssi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (ffs:SI (match_operand:SI 1 "general_operand" "")))]
+ ""
+ "
+{
+ rtx label = gen_label_rtx ();
+ emit_insn (gen_ffssi2_internal (operands[0], operands[1]));
+ emit_jump_insn (gen_bne (label));
+ emit_insn (gen_negsi2 (operands[0], const1_rtx));
+ emit_label (label);
+ emit_insn (gen_addsi3 (operands[0], operands[0], const1_rtx));
+ DONE;
+}")
+
+(define_insn "ffssi2_internal"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rQ")
+ (ffs:SI (match_operand:SI 1 "general_operand" "nrmT")))
+ (set (cc0) (match_dup 0))]
+ ""
+ "ffs $0,$32,%1,%0")
+
+(define_expand "sync_lock_test_and_set<mode>"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=&g")
+ (unspec:VAXint [(match_operand:VAXint 1 "memory_operand" "+m")
+ (match_operand:VAXint 2 "const_int_operand" "n")
+ ] VUNSPEC_LOCK))]
+ ""
+ "
+{
+ rtx label;
+
+ if (operands[2] != const1_rtx)
+ FAIL;
+
+ label = gen_label_rtx ();
+ emit_move_insn (operands[0], const1_rtx);
+ emit_jump_insn (gen_jbbssi<mode> (operands[1], const0_rtx, label, operands[1]));
+ emit_move_insn (operands[0], const0_rtx);
+ emit_label (label);
+ DONE;
+}")
+
+(define_insn "jbbssiqi"
+ [(parallel
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:QI 0 "memory_operand" "g")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "nrm"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (zero_extract:SI (match_operand:QI 3 "memory_operand" "+0")
+ (const_int 1)
+ (match_dup 1))
+ (const_int 1))])]
+ ""
+ "jbssi %1,%0,%l2")
+
+(define_insn "jbbssihi"
+ [(parallel
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:HI 0 "memory_operand" "Q")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "nrm"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (zero_extract:SI (match_operand:HI 3 "memory_operand" "+0")
+ (const_int 1)
+ (match_dup 1))
+ (const_int 1))])]
+ ""
+ "jbssi %1,%0,%l2")
+
+(define_insn "jbbssisi"
+ [(parallel
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:SI 0 "memory_operand" "Q")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "nrm"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (zero_extract:SI (match_operand:SI 3 "memory_operand" "+0")
+ (const_int 1)
+ (match_dup 1))
+ (const_int 1))])]
+ ""
+ "jbssi %1,%0,%l2")
+
+
+(define_expand "sync_lock_release<mode>"
+ [(set (match_operand:VAXint 0 "memory_operand" "+m")
+ (unspec:VAXint [(match_operand:VAXint 1 "const_int_operand" "n")
+ ] VUNSPEC_UNLOCK))]
+ ""
+ "
+{
+ rtx label;
+ if (operands[1] != const0_rtx)
+ FAIL;
+#if 1
+ label = gen_label_rtx ();
+ emit_jump_insn (gen_jbbcci<mode> (operands[0], const0_rtx, label, operands[0]));
+ emit_label (label);
+#else
+ emit_move_insn (operands[0], const0_rtx);
+#endif
+ DONE;
+}")
+
+(define_insn "jbbcciqi"
+ [(parallel
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:QI 0 "memory_operand" "g")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "nrm"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (zero_extract:SI (match_operand:QI 3 "memory_operand" "+0")
+ (const_int 1)
+ (match_dup 1))
+ (const_int 0))])]
+ ""
+ "jbcci %1,%0,%l2")
+
+(define_insn "jbbccihi"
+ [(parallel
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:HI 0 "memory_operand" "Q")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "nrm"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (zero_extract:SI (match_operand:HI 3 "memory_operand" "+0")
+ (const_int 1)
+ (match_dup 1))
+ (const_int 0))])]
+ ""
+ "jbcci %1,%0,%l2")
+
+(define_insn "jbbccisi"
+ [(parallel
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:SI 0 "memory_operand" "Q")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "nrm"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (zero_extract:SI (match_operand:SI 3 "memory_operand" "+0")
+ (const_int 1)
+ (match_dup 1))
+ (const_int 0))])]
+ ""
+ "jbcci %1,%0,%l2")
+
diff --git a/gcc-4.8/gcc/config/vax/constraints.md b/gcc-4.8/gcc/config/vax/constraints.md
new file mode 100644
index 000000000..a4774d4d5
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/constraints.md
@@ -0,0 +1,118 @@
+;; Constraints for the DEC VAX port.
+;; Copyright (C) 2007-2013 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it under
+;; the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 3, or (at your option) any later
+;; version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+;; for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>. */
+
+(define_constraint "Z0"
+ "Match a CONST_INT of 0"
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "U06"
+ "unsigned 6 bit value (0..63)"
+ (and (match_code "const_int")
+ (match_test "0 <= ival && ival < 64")))
+
+(define_constraint "U08"
+ "Unsigned 8 bit value"
+ (and (match_code "const_int")
+ (match_test "0 <= ival && ival < 256")))
+
+(define_constraint "U16"
+ "Unsigned 16 bit value"
+ (and (match_code "const_int")
+ (match_test "0 <= ival && ival < 65536")))
+
+(define_constraint "CN6"
+ "negative 6 bit value (-63..-1)"
+ (and (match_code "const_int")
+ (match_test "-63 <= ival && ival < 0")))
+
+(define_constraint "S08"
+ "signed 8 bit value [old]"
+ (and (match_code "const_int")
+ (match_test "-128 <= ival && ival < 128")))
+
+(define_constraint "S16"
+ "signed 16 bit value [old]"
+ (and (match_code "const_int")
+ (match_test "-32768 <= ival && ival < 32768")))
+
+(define_constraint "I"
+ "Match a CONST_INT of 0 [old]"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_Z0 (GEN_INT (ival))")))
+
+(define_constraint "J"
+ "unsigned 6 bit value [old]"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_U06 (GEN_INT (ival))")))
+
+(define_constraint "K"
+ "signed 8 bit value [old]"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_S08 (GEN_INT (ival))")))
+
+(define_constraint "L"
+ "signed 16 bit value [old]"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_S16 (GEN_INT (ival))")))
+
+(define_constraint "M"
+ "Unsigned 8 bit value [old]"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_U08 (GEN_INT (ival))")))
+
+(define_constraint "N"
+ "Unsigned 16 bit value [old]"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_U16 (GEN_INT (ival))")))
+
+(define_constraint "O"
+ "Negative short literals (-63..-1) [old]"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_CN6 (GEN_INT (ival))")))
+
+/* Similar, but for floating constants, and defining letters G and H. */
+
+(define_constraint "G"
+ "Match a floating-point zero"
+ (and (match_code "const_double")
+ (match_test "op == CONST0_RTX (DFmode) || op == CONST0_RTX (SFmode)")))
+
+/* Optional extra constraints for this machine. */
+
+(define_memory_constraint "Q"
+ "operand is a MEM that does not have a mode-dependent address."
+ (and (match_code "mem")
+ (match_test "!mode_dependent_address_p (XEXP (op, 0),
+ MEM_ADDR_SPACE (op))")))
+
+(define_memory_constraint "B"
+ ""
+ (and (match_operand:BLK 0 "memory_operand")
+ (not (match_operand:BLK 0 "illegal_blk_memory_operand" ""))))
+
+(define_memory_constraint "R"
+ ""
+ (and (match_operand:DI 0 "memory_operand")
+ (not (match_operand:DI 0 "illegal_addsub_di_memory_operand" ""))))
+
+(define_constraint "T"
+ "@internal satisfies CONSTANT_P and, if pic is enabled, is not a SYMBOL_REF, LABEL_REF, or CONST."
+ (ior (not (match_code "const,symbol_ref,label_ref"))
+ (match_test "!flag_pic")))
diff --git a/gcc-4.8/gcc/config/vax/elf.h b/gcc-4.8/gcc/config/vax/elf.h
new file mode 100644
index 000000000..e7fc9ae1e
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/elf.h
@@ -0,0 +1,112 @@
+/* Target definitions for GNU compiler for VAX using ELF
+ Copyright (C) 2002-2013 Free Software Foundation, Inc.
+ Contributed by Matt Thomas <matt@3am-software.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef TARGET_ELF
+#define TARGET_ELF 1
+
+#undef REGISTER_PREFIX
+#undef REGISTER_NAMES
+#define REGISTER_PREFIX "%"
+#define REGISTER_NAMES \
+ { "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", \
+ "%r8", "%r9", "%r10", "%r11", "%ap", "%fp", "%sp", "%pc", }
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+/* Profiling routine. */
+#undef VAX_FUNCTION_PROFILER_NAME
+#define VAX_FUNCTION_PROFILER_NAME "__mcount"
+
+/* Let's be re-entrant. */
+#undef PCC_STATIC_STRUCT_RETURN
+
+/* Before the prologue, the top of the frame is below the argument
+ count pushed by the CALLS and before the start of the saved registers. */
+#define INCOMING_FRAME_SP_OFFSET 0
+
+/* Offset from the frame pointer register value to the top of the stack. */
+#define FRAME_POINTER_CFA_OFFSET(FNDECL) 0
+
+/* We use R2-R5 (call-clobbered) registers for exceptions. */
+#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 2 : INVALID_REGNUM)
+
+/* Place the top of the stack for the DWARF2 EH stackadj value. */
+#define EH_RETURN_STACKADJ_RTX \
+ gen_rtx_MEM (SImode, \
+ plus_constant (Pmode, \
+ gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM),\
+ -4))
+
+/* Simple store the return handler into the call frame. */
+#define EH_RETURN_HANDLER_RTX \
+ gen_rtx_MEM (Pmode, \
+ plus_constant (Pmode, \
+ gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM),\
+ 16))
+
+
+/* Reserve the top of the stack for exception handler stackadj value. */
+#undef STARTING_FRAME_OFFSET
+#define STARTING_FRAME_OFFSET -4
+
+/* The VAX wants no space between the case instruction and the jump table. */
+#undef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE)
+
+#undef SUBTARGET_OVERRIDE_OPTIONS
+#define SUBTARGET_OVERRIDE_OPTIONS \
+ do \
+ { \
+ /* Turn off function CSE if we're doing PIC. */ \
+ if (flag_pic) \
+ flag_no_function_cse = 1; \
+ } \
+ while (0)
+
+/* Don't allow *foo which foo is non-local */
+#define NO_EXTERNAL_INDIRECT_ADDRESS
+
+#undef VAX_CC1_AND_CC1PLUS_SPEC
+#define VAX_CC1_AND_CC1PLUS_SPEC \
+ "%{!fno-pic: \
+ %{!fpic: \
+ %{!fPIC:-fPIC}}}"
+
+/* VAX ELF is always gas; override the generic VAX ASM_SPEC. */
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{!fno-pic: %{!mno-asm-pic:-k}}"
+
+/* We want PCREL dwarf output. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
+ ((GLOBAL ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
+
+/* Emit a PC-relative relocation. */
+#define ASM_OUTPUT_DWARF_PCREL(FILE, SIZE, LABEL) \
+ do { \
+ fputs (integer_asm_op (SIZE, FALSE), FILE); \
+ fprintf (FILE, "%%pcrel%d(", SIZE * 8); \
+ assemble_name (FILE, LABEL); \
+ fputc (')', FILE); \
+ } while (0)
diff --git a/gcc-4.8/gcc/config/vax/elf.opt b/gcc-4.8/gcc/config/vax/elf.opt
new file mode 100644
index 000000000..97daf43fb
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/elf.opt
@@ -0,0 +1,29 @@
+; VAX ELF options.
+
+; Copyright (C) 2011-2013 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+; See the GCC internals manual (options.texi) for a description of
+; this file's format.
+
+; Please try to keep this file in ASCII collating order.
+
+mno-asm-pic
+Target RejectNegative
+
+; This comment is to ensure we retain the blank line above.
diff --git a/gcc-4.8/gcc/config/vax/linux.h b/gcc-4.8/gcc/config/vax/linux.h
new file mode 100644
index 000000000..2eff1c307
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/linux.h
@@ -0,0 +1,51 @@
+/* Definitions for VAX running Linux-based GNU systems with ELF format.
+ Copyright (C) 2007-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#define TARGET_OS_CPP_BUILTINS() GNU_USER_TARGET_OS_CPP_BUILTINS()
+
+/* We use GAS, G-float double and want new DI patterns. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_QMATH | MASK_G_FLOAT)
+
+/* Use standard names for udiv and umod libgcc calls. */
+#undef TARGET_BSD_DIVMOD
+#define TARGET_BSD_DIVMOD 0
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{fpic|fPIC:-k}"
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%(endian_spec) \
+ %{shared:-shared} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker /lib/ld.so.1} \
+ %{static:-static}}"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
diff --git a/gcc-4.8/gcc/config/vax/netbsd-elf.h b/gcc-4.8/gcc/config/vax/netbsd-elf.h
new file mode 100644
index 000000000..ffc904695
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/netbsd-elf.h
@@ -0,0 +1,68 @@
+/* Definitions of target machine for GNU compiler,
+ for NetBSD/vax ELF systems.
+ Copyright (C) 2002-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Names to predefine in the preprocessor for this target OS. */
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ } \
+ while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC NETBSD_CPP_SPEC
+
+#ifndef NETBSD_CC1_AND_CC1PLUS_SPEC
+#define NETBSD_CC1_AND_CC1PLUS_SPEC ""
+#endif
+
+#undef CC1_SPEC
+#define CC1_SPEC NETBSD_CC1_AND_CC1PLUS_SPEC VAX_CC1_AND_CC1PLUS_SPEC
+
+#undef CC1PLUS_SPEC
+#define CC1PLUS_SPEC NETBSD_CC1_AND_CC1PLUS_SPEC VAX_CC1_AND_CC1PLUS_SPEC
+
+#define NETBSD_ENTRY_POINT "__start"
+
+#undef LINK_SPEC
+#if 0
+/* FIXME: We must link all executables statically until PIC support
+ is added to the compiler. */
+#define LINK_SPEC \
+ "%{assert*} %{R*} %{rpath*} \
+ %{shared:%ethe -shared option is not currently supported for VAX ELF} \
+ %{!shared: \
+ -dc -dp \
+ %{!nostdlib: \
+ %{!r: \
+ %{!e*:-e %(netbsd_entry_point)}}} \
+ %{!static:-static} \
+ %{static:-static}}"
+#else
+#define LINK_SPEC NETBSD_LINK_SPEC_ELF
+#endif
+
+#define EXTRA_SPECS \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT },
+
+/* We use gas, not the UNIX assembler. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT MASK_QMATH
diff --git a/gcc-4.8/gcc/config/vax/openbsd.h b/gcc-4.8/gcc/config/vax/openbsd.h
new file mode 100644
index 000000000..121dd57fe
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/openbsd.h
@@ -0,0 +1,50 @@
+/* Configuration fragment for a VAX OpenBSD target.
+ Copyright (C) 2000-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Amend common OpenBSD definitions for VAX target. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__unix__"); \
+ builtin_define ("__OpenBSD__"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=OpenBSD"); \
+ } \
+ while (0)
+
+/* Layout of source language data types. */
+
+/* This must agree with <machine/ansi.h> */
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef WINT_TYPE
+#define WINT_TYPE "int"
+
+#define TARGET_HAVE_NAMED_SECTIONS false
diff --git a/gcc-4.8/gcc/config/vax/openbsd1.h b/gcc-4.8/gcc/config/vax/openbsd1.h
new file mode 100644
index 000000000..7c5cdb68e
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/openbsd1.h
@@ -0,0 +1,22 @@
+/* Configuration fragment for a VAX OpenBSD target.
+ Copyright (C) 2000-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Set up definitions before picking up the common openbsd.h file. */
+#define OBSD_OLD_GAS
+#define OBSD_NO_DYNAMIC_LIBRARIES
diff --git a/gcc-4.8/gcc/config/vax/predicates.md b/gcc-4.8/gcc/config/vax/predicates.md
new file mode 100644
index 000000000..73b1a9055
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/predicates.md
@@ -0,0 +1,111 @@
+;; Predicate definitions for DEC VAX.
+;; Copyright (C) 2007-2013 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it under
+;; the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 3, or (at your option) any later
+;; version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+;; for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Special case of a symbolic operand that's used as a
+;; operand.
+
+(define_predicate "symbolic_operand"
+ (match_code "const,symbol_ref,label_ref"))
+
+(define_predicate "local_symbolic_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ if (GET_CODE (op) == LABEL_REF)
+ return 1;
+ if (GET_CODE (op) == SYMBOL_REF)
+ return !flag_pic || SYMBOL_REF_LOCAL_P (op);
+ if (GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF)
+ return 1;
+ return !flag_pic || SYMBOL_REF_LOCAL_P (XEXP (XEXP (op, 0), 0));
+})
+
+(define_predicate "external_symbolic_operand"
+ (and (match_code "symbol_ref")
+ (not (match_operand 0 "local_symbolic_operand" ""))))
+
+(define_predicate "external_const_operand"
+ (and (match_code "const")
+ (match_test "GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (XEXP (XEXP (op, 0), 0))")))
+
+(define_predicate "nonsymbolic_operand"
+ (and (ior (match_test "!flag_pic")
+ (not (match_operand 0 "symbolic_operand")))
+ (match_operand 0 "general_operand" "")))
+
+(define_predicate "external_memory_operand"
+ (match_code "mem")
+{
+ rtx addr = XEXP (op, 0);
+ if (MEM_P (addr))
+ addr = XEXP (addr, 0);
+ if (GET_CODE (addr) == PLUS)
+ addr = XEXP (addr, 1);
+ if (MEM_P (addr))
+ addr = XEXP (addr, 0);
+ if (GET_CODE (addr) == PLUS)
+ addr = XEXP (addr, 1);
+ return external_symbolic_operand (addr, SImode)
+ || external_const_operand (addr, SImode);
+})
+
+(define_predicate "indirect_memory_operand"
+ (match_code "mem")
+{
+ op = XEXP (op, 0);
+ if (MEM_P (op))
+ return 1;
+ if (GET_CODE (op) == PLUS)
+ op = XEXP (op, 1);
+ return MEM_P (op);
+})
+
+(define_predicate "indexed_memory_operand"
+ (match_code "mem")
+{
+ rtx addr = XEXP (op, 0);
+ return GET_CODE (addr) != PRE_DEC && GET_CODE (addr) != POST_INC
+ && mode_dependent_address_p (addr, MEM_ADDR_SPACE (op));
+})
+
+(define_predicate "illegal_blk_memory_operand"
+ (and (match_code "mem")
+ (ior (and (match_test "flag_pic")
+ (match_operand 0 "external_memory_operand" ""))
+ (ior (match_operand 0 "indexed_memory_operand" "")
+ (ior (match_operand 0 "indirect_memory_operand" "")
+ (match_test "GET_CODE (XEXP (op, 0)) == PRE_DEC"))))))
+
+(define_predicate "illegal_addsub_di_memory_operand"
+ (and (match_code "mem")
+ (ior (and (match_test "flag_pic")
+ (match_operand 0 "external_memory_operand" ""))
+ (ior (match_operand 0 "indexed_memory_operand" "")
+ (ior (match_operand 0 "indirect_memory_operand" "")
+ (match_test "GET_CODE (XEXP (op, 0)) == PRE_DEC"))))))
+
+(define_predicate "nonimmediate_addsub_di_operand"
+ (and (match_code "subreg,reg,mem")
+ (and (match_operand:DI 0 "nonimmediate_operand" "")
+ (not (match_operand:DI 0 "illegal_addsub_di_memory_operand")))))
+
+(define_predicate "general_addsub_di_operand"
+ (and (match_code "const_int,const_double,subreg,reg,mem")
+ (and (match_operand:DI 0 "general_operand" "")
+ (not (match_operand:DI 0 "illegal_addsub_di_memory_operand")))))
diff --git a/gcc-4.8/gcc/config/vax/vax-modes.def b/gcc-4.8/gcc/config/vax/vax-modes.def
new file mode 100644
index 000000000..a998762e2
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/vax-modes.def
@@ -0,0 +1,22 @@
+/* VAX extra machine modes.
+ Copyright (C) 2003-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* We just need to reset the floating point formats. */
+RESET_FLOAT_FORMAT (SF, vax_f_format);
+RESET_FLOAT_FORMAT (DF, vax_d_format);
diff --git a/gcc-4.8/gcc/config/vax/vax-protos.h b/gcc-4.8/gcc/config/vax/vax-protos.h
new file mode 100644
index 000000000..5a4adc1cd
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/vax-protos.h
@@ -0,0 +1,39 @@
+/* Definitions of target machine for GNU compiler. VAX version.
+ Copyright (C) 2000-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+extern bool legitimate_constant_address_p (rtx);
+extern void vax_expand_prologue (void);
+
+#ifdef RTX_CODE
+extern const char *cond_name (rtx);
+extern bool adjacent_operands_p (rtx, rtx, enum machine_mode);
+extern const char *rev_cond_name (rtx);
+extern void print_operand_address (FILE *, rtx);
+extern void print_operand (FILE *, rtx, int);
+extern void vax_notice_update_cc (rtx, rtx);
+extern void vax_expand_addsub_di_operands (rtx *, enum rtx_code);
+extern const char * vax_output_int_move (rtx, rtx *, enum machine_mode);
+extern const char * vax_output_int_add (rtx, rtx *, enum machine_mode);
+extern const char * vax_output_int_subtract (rtx, rtx *, enum machine_mode);
+extern const char * vax_output_movmemsi (rtx, rtx *);
+#endif /* RTX_CODE */
+
+#ifdef REAL_VALUE_TYPE
+extern int check_float_value (enum machine_mode, REAL_VALUE_TYPE *, int);
+#endif /* REAL_VALUE_TYPE */
diff --git a/gcc-4.8/gcc/config/vax/vax.c b/gcc-4.8/gcc/config/vax/vax.c
new file mode 100644
index 000000000..53189a7e7
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/vax.c
@@ -0,0 +1,2175 @@
+/* Subroutines for insn-output.c for VAX.
+ Copyright (C) 1987-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "df.h"
+#include "tree.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "function.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "recog.h"
+#include "expr.h"
+#include "optabs.h"
+#include "flags.h"
+#include "debug.h"
+#include "diagnostic-core.h"
+#include "reload.h"
+#include "tm-preds.h"
+#include "tm-constrs.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+
+static void vax_option_override (void);
+static bool vax_legitimate_address_p (enum machine_mode, rtx, bool);
+static void vax_file_start (void);
+static void vax_init_libfuncs (void);
+static void vax_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
+ HOST_WIDE_INT, tree);
+static int vax_address_cost_1 (rtx);
+static int vax_address_cost (rtx, enum machine_mode, addr_space_t, bool);
+static bool vax_rtx_costs (rtx, int, int, int, int *, bool);
+static rtx vax_function_arg (cumulative_args_t, enum machine_mode,
+ const_tree, bool);
+static void vax_function_arg_advance (cumulative_args_t, enum machine_mode,
+ const_tree, bool);
+static rtx vax_struct_value_rtx (tree, int);
+static rtx vax_builtin_setjmp_frame_value (void);
+static void vax_asm_trampoline_template (FILE *);
+static void vax_trampoline_init (rtx, tree, rtx);
+static int vax_return_pops_args (tree, tree, int);
+static bool vax_mode_dependent_address_p (const_rtx, addr_space_t);
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START vax_file_start
+#undef TARGET_ASM_FILE_START_APP_OFF
+#define TARGET_ASM_FILE_START_APP_OFF true
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS vax_init_libfuncs
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK vax_output_mi_thunk
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS vax_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST vax_address_cost
+
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
+
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG vax_function_arg
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE vax_function_arg_advance
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX vax_struct_value_rtx
+
+#undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
+#define TARGET_BUILTIN_SETJMP_FRAME_VALUE vax_builtin_setjmp_frame_value
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P vax_legitimate_address_p
+#undef TARGET_MODE_DEPENDENT_ADDRESS_P
+#define TARGET_MODE_DEPENDENT_ADDRESS_P vax_mode_dependent_address_p
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
+
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define TARGET_ASM_TRAMPOLINE_TEMPLATE vax_asm_trampoline_template
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT vax_trampoline_init
+#undef TARGET_RETURN_POPS_ARGS
+#define TARGET_RETURN_POPS_ARGS vax_return_pops_args
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE vax_option_override
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* Set global variables as needed for the options enabled. */
+
+static void
+vax_option_override (void)
+{
+ /* We're VAX floating point, not IEEE floating point. */
+ if (TARGET_G_FLOAT)
+ REAL_MODE_FORMAT (DFmode) = &vax_g_format;
+
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+}
+
+static void
+vax_add_reg_cfa_offset (rtx insn, int offset, rtx src)
+{
+ rtx x;
+
+ x = plus_constant (Pmode, frame_pointer_rtx, offset);
+ x = gen_rtx_MEM (SImode, x);
+ x = gen_rtx_SET (VOIDmode, x, src);
+ add_reg_note (insn, REG_CFA_OFFSET, x);
+}
+
+/* Generate the assembly code for function entry. FILE is a stdio
+ stream to output the code to. SIZE is an int: how many units of
+ temporary storage to allocate.
+
+ Refer to the array `regs_ever_live' to determine which registers to
+ save; `regs_ever_live[I]' is nonzero if register number I is ever
+ used in the function. This function is responsible for knowing
+ which registers should not be saved even if used. */
+
+void
+vax_expand_prologue (void)
+{
+ int regno, offset;
+ int mask = 0;
+ HOST_WIDE_INT size;
+ rtx insn;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
+ mask |= 1 << regno;
+
+ insn = emit_insn (gen_procedure_entry_mask (GEN_INT (mask)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* The layout of the CALLG/S stack frame is follows:
+
+ <- CFA, AP
+ r11
+ r10
+ ... Registers saved as specified by MASK
+ r3
+ r2
+ return-addr
+ old fp
+ old ap
+ old psw
+ zero
+ <- FP, SP
+
+ The rest of the prologue will adjust the SP for the local frame. */
+
+ vax_add_reg_cfa_offset (insn, 4, arg_pointer_rtx);
+ vax_add_reg_cfa_offset (insn, 8, frame_pointer_rtx);
+ vax_add_reg_cfa_offset (insn, 12, pc_rtx);
+
+ offset = 16;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (mask & (1 << regno))
+ {
+ vax_add_reg_cfa_offset (insn, offset, gen_rtx_REG (SImode, regno));
+ offset += 4;
+ }
+
+ /* Because add_reg_note pushes the notes, adding this last means that
+ it will be processed first. This is required to allow the other
+ notes be interpreted properly. */
+ add_reg_note (insn, REG_CFA_DEF_CFA,
+ plus_constant (Pmode, frame_pointer_rtx, offset));
+
+ /* Allocate the local stack frame. */
+ size = get_frame_size ();
+ size -= STARTING_FRAME_OFFSET;
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx, GEN_INT (-size)));
+
+ /* Do not allow instructions referencing local stack memory to be
+ scheduled before the frame is allocated. This is more pedantic
+ than anything else, given that VAX does not currently have a
+ scheduling description. */
+ emit_insn (gen_blockage ());
+}
+
+/* When debugging with stabs, we want to output an extra dummy label
+ so that gas can distinguish between D_float and G_float prior to
+ processing the .stabs directive identifying type double. */
+static void
+vax_file_start (void)
+{
+ default_file_start ();
+
+ if (write_symbols == DBX_DEBUG)
+ fprintf (asm_out_file, "___vax_%c_doubles:\n", ASM_DOUBLE_CHAR);
+}
+
+/* We can use the BSD C library routines for the libgcc calls that are
+ still generated, since that's what they boil down to anyways. When
+ ELF, avoid the user's namespace. */
+
+static void
+vax_init_libfuncs (void)
+{
+ if (TARGET_BSD_DIVMOD)
+ {
+ set_optab_libfunc (udiv_optab, SImode, TARGET_ELF ? "*__udiv" : "*udiv");
+ set_optab_libfunc (umod_optab, SImode, TARGET_ELF ? "*__urem" : "*urem");
+ }
+}
+
+/* This is like nonimmediate_operand with a restriction on the type of MEM. */
+
+static void
+split_quadword_operands (rtx insn, enum rtx_code code, rtx * operands,
+ rtx * low, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ low[i] = 0;
+
+ for (i = 0; i < n; i++)
+ {
+ if (MEM_P (operands[i])
+ && (GET_CODE (XEXP (operands[i], 0)) == PRE_DEC
+ || GET_CODE (XEXP (operands[i], 0)) == POST_INC))
+ {
+ rtx addr = XEXP (operands[i], 0);
+ operands[i] = low[i] = gen_rtx_MEM (SImode, addr);
+ }
+ else if (optimize_size && MEM_P (operands[i])
+ && REG_P (XEXP (operands[i], 0))
+ && (code != MINUS || operands[1] != const0_rtx)
+ && find_regno_note (insn, REG_DEAD,
+ REGNO (XEXP (operands[i], 0))))
+ {
+ low[i] = gen_rtx_MEM (SImode,
+ gen_rtx_POST_INC (Pmode,
+ XEXP (operands[i], 0)));
+ operands[i] = gen_rtx_MEM (SImode, XEXP (operands[i], 0));
+ }
+ else
+ {
+ low[i] = operand_subword (operands[i], 0, 0, DImode);
+ operands[i] = operand_subword (operands[i], 1, 0, DImode);
+ }
+ }
+}
+
+void
+print_operand_address (FILE * file, rtx addr)
+{
+ rtx orig = addr;
+ rtx reg1, breg, ireg;
+ rtx offset;
+
+ retry:
+ switch (GET_CODE (addr))
+ {
+ case MEM:
+ fprintf (file, "*");
+ addr = XEXP (addr, 0);
+ goto retry;
+
+ case REG:
+ fprintf (file, "(%s)", reg_names[REGNO (addr)]);
+ break;
+
+ case PRE_DEC:
+ fprintf (file, "-(%s)", reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ case POST_INC:
+ fprintf (file, "(%s)+", reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ case PLUS:
+ /* There can be either two or three things added here. One must be a
+ REG. One can be either a REG or a MULT of a REG and an appropriate
+ constant, and the third can only be a constant or a MEM.
+
+ We get these two or three things and put the constant or MEM in
+ OFFSET, the MULT or REG in IREG, and the REG in BREG. If we have
+ a register and can't tell yet if it is a base or index register,
+ put it into REG1. */
+
+ reg1 = 0; ireg = 0; breg = 0; offset = 0;
+
+ if (CONSTANT_ADDRESS_P (XEXP (addr, 0))
+ || MEM_P (XEXP (addr, 0)))
+ {
+ offset = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ }
+ else if (CONSTANT_ADDRESS_P (XEXP (addr, 1))
+ || MEM_P (XEXP (addr, 1)))
+ {
+ offset = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+ else if (GET_CODE (XEXP (addr, 1)) == MULT)
+ {
+ ireg = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+ else if (GET_CODE (XEXP (addr, 0)) == MULT)
+ {
+ ireg = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ }
+ else if (REG_P (XEXP (addr, 1)))
+ {
+ reg1 = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+ else if (REG_P (XEXP (addr, 0)))
+ {
+ reg1 = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ }
+ else
+ gcc_unreachable ();
+
+ if (REG_P (addr))
+ {
+ if (reg1)
+ ireg = addr;
+ else
+ reg1 = addr;
+ }
+ else if (GET_CODE (addr) == MULT)
+ ireg = addr;
+ else
+ {
+ gcc_assert (GET_CODE (addr) == PLUS);
+ if (CONSTANT_ADDRESS_P (XEXP (addr, 0))
+ || MEM_P (XEXP (addr, 0)))
+ {
+ if (offset)
+ {
+ if (CONST_INT_P (offset))
+ offset = plus_constant (Pmode, XEXP (addr, 0),
+ INTVAL (offset));
+ else
+ {
+ gcc_assert (CONST_INT_P (XEXP (addr, 0)));
+ offset = plus_constant (Pmode, offset,
+ INTVAL (XEXP (addr, 0)));
+ }
+ }
+ offset = XEXP (addr, 0);
+ }
+ else if (REG_P (XEXP (addr, 0)))
+ {
+ if (reg1)
+ ireg = reg1, breg = XEXP (addr, 0), reg1 = 0;
+ else
+ reg1 = XEXP (addr, 0);
+ }
+ else
+ {
+ gcc_assert (GET_CODE (XEXP (addr, 0)) == MULT);
+ gcc_assert (!ireg);
+ ireg = XEXP (addr, 0);
+ }
+
+ if (CONSTANT_ADDRESS_P (XEXP (addr, 1))
+ || MEM_P (XEXP (addr, 1)))
+ {
+ if (offset)
+ {
+ if (CONST_INT_P (offset))
+ offset = plus_constant (Pmode, XEXP (addr, 1),
+ INTVAL (offset));
+ else
+ {
+ gcc_assert (CONST_INT_P (XEXP (addr, 1)));
+ offset = plus_constant (Pmode, offset,
+ INTVAL (XEXP (addr, 1)));
+ }
+ }
+ offset = XEXP (addr, 1);
+ }
+ else if (REG_P (XEXP (addr, 1)))
+ {
+ if (reg1)
+ ireg = reg1, breg = XEXP (addr, 1), reg1 = 0;
+ else
+ reg1 = XEXP (addr, 1);
+ }
+ else
+ {
+ gcc_assert (GET_CODE (XEXP (addr, 1)) == MULT);
+ gcc_assert (!ireg);
+ ireg = XEXP (addr, 1);
+ }
+ }
+
+ /* If REG1 is nonzero, figure out if it is a base or index register. */
+ if (reg1)
+ {
+ if (breg
+ || (flag_pic && GET_CODE (addr) == SYMBOL_REF)
+ || (offset
+ && (MEM_P (offset)
+ || (flag_pic && symbolic_operand (offset, SImode)))))
+ {
+ gcc_assert (!ireg);
+ ireg = reg1;
+ }
+ else
+ breg = reg1;
+ }
+
+ if (offset != 0)
+ {
+ if (flag_pic && symbolic_operand (offset, SImode))
+ {
+ if (breg && ireg)
+ {
+ debug_rtx (orig);
+ output_operand_lossage ("symbol used with both base and indexed registers");
+ }
+
+#ifdef NO_EXTERNAL_INDIRECT_ADDRESS
+ if (flag_pic > 1 && GET_CODE (offset) == CONST
+ && GET_CODE (XEXP (XEXP (offset, 0), 0)) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (XEXP (XEXP (offset, 0), 0)))
+ {
+ debug_rtx (orig);
+ output_operand_lossage ("symbol with offset used in PIC mode");
+ }
+#endif
+
+ /* symbol(reg) isn't PIC, but symbol[reg] is. */
+ if (breg)
+ {
+ ireg = breg;
+ breg = 0;
+ }
+
+ }
+
+ output_address (offset);
+ }
+
+ if (breg != 0)
+ fprintf (file, "(%s)", reg_names[REGNO (breg)]);
+
+ if (ireg != 0)
+ {
+ if (GET_CODE (ireg) == MULT)
+ ireg = XEXP (ireg, 0);
+ gcc_assert (REG_P (ireg));
+ fprintf (file, "[%s]", reg_names[REGNO (ireg)]);
+ }
+ break;
+
+ default:
+ output_addr_const (file, addr);
+ }
+}
+
+void
+print_operand (FILE *file, rtx x, int code)
+{
+ if (code == '#')
+ fputc (ASM_DOUBLE_CHAR, file);
+ else if (code == '|')
+ fputs (REGISTER_PREFIX, file);
+ else if (code == 'c')
+ fputs (cond_name (x), file);
+ else if (code == 'C')
+ fputs (rev_cond_name (x), file);
+ else if (code == 'D' && CONST_INT_P (x) && INTVAL (x) < 0)
+ fprintf (file, "$" NEG_HWI_PRINT_HEX16, INTVAL (x));
+ else if (code == 'P' && CONST_INT_P (x))
+ fprintf (file, "$" HOST_WIDE_INT_PRINT_DEC, INTVAL (x) + 1);
+ else if (code == 'N' && CONST_INT_P (x))
+ fprintf (file, "$" HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
+ /* rotl instruction cannot deal with negative arguments. */
+ else if (code == 'R' && CONST_INT_P (x))
+ fprintf (file, "$" HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
+ else if (code == 'H' && CONST_INT_P (x))
+ fprintf (file, "$%d", (int) (0xffff & ~ INTVAL (x)));
+ else if (code == 'h' && CONST_INT_P (x))
+ fprintf (file, "$%d", (short) - INTVAL (x));
+ else if (code == 'B' && CONST_INT_P (x))
+ fprintf (file, "$%d", (int) (0xff & ~ INTVAL (x)));
+ else if (code == 'b' && CONST_INT_P (x))
+ fprintf (file, "$%d", (int) (0xff & - INTVAL (x)));
+ else if (code == 'M' && CONST_INT_P (x))
+ fprintf (file, "$%d", ~((1 << INTVAL (x)) - 1));
+ else if (code == 'x' && CONST_INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
+ else if (REG_P (x))
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ else if (MEM_P (x))
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
+ {
+ char dstr[30];
+ real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x),
+ sizeof (dstr), 0, 1);
+ fprintf (file, "$0f%s", dstr);
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
+ {
+ char dstr[30];
+ real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x),
+ sizeof (dstr), 0, 1);
+ fprintf (file, "$0%c%s", ASM_DOUBLE_CHAR, dstr);
+ }
+ else
+ {
+ if (flag_pic > 1 && symbolic_operand (x, SImode))
+ {
+ debug_rtx (x);
+ output_operand_lossage ("symbol used as immediate operand");
+ }
+ putc ('$', file);
+ output_addr_const (file, x);
+ }
+}
+
+const char *
+cond_name (rtx op)
+{
+ switch (GET_CODE (op))
+ {
+ case NE:
+ return "neq";
+ case EQ:
+ return "eql";
+ case GE:
+ return "geq";
+ case GT:
+ return "gtr";
+ case LE:
+ return "leq";
+ case LT:
+ return "lss";
+ case GEU:
+ return "gequ";
+ case GTU:
+ return "gtru";
+ case LEU:
+ return "lequ";
+ case LTU:
+ return "lssu";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+const char *
+rev_cond_name (rtx op)
+{
+ switch (GET_CODE (op))
+ {
+ case EQ:
+ return "neq";
+ case NE:
+ return "eql";
+ case LT:
+ return "geq";
+ case LE:
+ return "gtr";
+ case GT:
+ return "leq";
+ case GE:
+ return "lss";
+ case LTU:
+ return "gequ";
+ case LEU:
+ return "gtru";
+ case GTU:
+ return "lequ";
+ case GEU:
+ return "lssu";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+static bool
+vax_float_literal (rtx c)
+{
+ enum machine_mode mode;
+ REAL_VALUE_TYPE r, s;
+ int i;
+
+ if (GET_CODE (c) != CONST_DOUBLE)
+ return false;
+
+ mode = GET_MODE (c);
+
+ if (c == const_tiny_rtx[(int) mode][0]
+ || c == const_tiny_rtx[(int) mode][1]
+ || c == const_tiny_rtx[(int) mode][2])
+ return true;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, c);
+
+ for (i = 0; i < 7; i++)
+ {
+ int x = 1 << i;
+ bool ok;
+ REAL_VALUE_FROM_INT (s, x, 0, mode);
+
+ if (REAL_VALUES_EQUAL (r, s))
+ return true;
+ ok = exact_real_inverse (mode, &s);
+ gcc_assert (ok);
+ if (REAL_VALUES_EQUAL (r, s))
+ return true;
+ }
+ return false;
+}
+
+
+/* Return the cost in cycles of a memory address, relative to register
+ indirect.
+
+ Each of the following adds the indicated number of cycles:
+
+ 1 - symbolic address
+ 1 - pre-decrement
+ 1 - indexing and/or offset(register)
+ 2 - indirect */
+
+
+static int
+vax_address_cost_1 (rtx addr)
+{
+ int reg = 0, indexed = 0, indir = 0, offset = 0, predec = 0;
+ rtx plus_op0 = 0, plus_op1 = 0;
+ restart:
+ switch (GET_CODE (addr))
+ {
+ case PRE_DEC:
+ predec = 1;
+ case REG:
+ case SUBREG:
+ case POST_INC:
+ reg = 1;
+ break;
+ case MULT:
+ indexed = 1; /* 2 on VAX 2 */
+ break;
+ case CONST_INT:
+ /* byte offsets cost nothing (on a VAX 2, they cost 1 cycle) */
+ if (offset == 0)
+ offset = (unsigned HOST_WIDE_INT)(INTVAL(addr)+128) > 256;
+ break;
+ case CONST:
+ case SYMBOL_REF:
+ offset = 1; /* 2 on VAX 2 */
+ break;
+ case LABEL_REF: /* this is probably a byte offset from the pc */
+ if (offset == 0)
+ offset = 1;
+ break;
+ case PLUS:
+ if (plus_op0)
+ plus_op1 = XEXP (addr, 0);
+ else
+ plus_op0 = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ goto restart;
+ case MEM:
+ indir = 2; /* 3 on VAX 2 */
+ addr = XEXP (addr, 0);
+ goto restart;
+ default:
+ break;
+ }
+
+ /* Up to 3 things can be added in an address. They are stored in
+ plus_op0, plus_op1, and addr. */
+
+ if (plus_op0)
+ {
+ addr = plus_op0;
+ plus_op0 = 0;
+ goto restart;
+ }
+ if (plus_op1)
+ {
+ addr = plus_op1;
+ plus_op1 = 0;
+ goto restart;
+ }
+ /* Indexing and register+offset can both be used (except on a VAX 2)
+ without increasing execution time over either one alone. */
+ if (reg && indexed && offset)
+ return reg + indir + offset + predec;
+ return reg + indexed + indir + offset + predec;
+}
+
+static int
+vax_address_cost (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED,
+ bool speed ATTRIBUTE_UNUSED)
+{
+ return (1 + (REG_P (x) ? 0 : vax_address_cost_1 (x)));
+}
+
+/* Cost of an expression on a VAX. This version has costs tuned for the
+ CVAX chip (found in the VAX 3 series) with comments for variations on
+ other models.
+
+ FIXME: The costs need review, particularly for TRUNCATE, FLOAT_EXTEND
+ and FLOAT_TRUNCATE. We need a -mcpu option to allow provision of
+ costs on a per cpu basis. */
+
+static bool
+vax_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
+ int *total, bool speed ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode = GET_MODE (x);
+ int i = 0; /* may be modified in switch */
+ const char *fmt = GET_RTX_FORMAT (code); /* may be modified in switch */
+
+ switch (code)
+ {
+ /* On a VAX, constants from 0..63 are cheap because they can use the
+ 1 byte literal constant format. Compare to -1 should be made cheap
+ so that decrement-and-branch insns can be formed more easily (if
+ the value -1 is copied to a register some decrement-and-branch
+ patterns will not match). */
+ case CONST_INT:
+ if (INTVAL (x) == 0)
+ {
+ *total = 0;
+ return true;
+ }
+ if (outer_code == AND)
+ {
+ *total = ((unsigned HOST_WIDE_INT) ~INTVAL (x) <= 077) ? 1 : 2;
+ return true;
+ }
+ if ((unsigned HOST_WIDE_INT) INTVAL (x) <= 077
+ || (outer_code == COMPARE
+ && INTVAL (x) == -1)
+ || ((outer_code == PLUS || outer_code == MINUS)
+ && (unsigned HOST_WIDE_INT) -INTVAL (x) <= 077))
+ {
+ *total = 1;
+ return true;
+ }
+ /* FALLTHRU */
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ *total = 3;
+ return true;
+
+ case CONST_DOUBLE:
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ *total = vax_float_literal (x) ? 5 : 8;
+ else
+ *total = ((CONST_DOUBLE_HIGH (x) == 0
+ && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (x) < 64)
+ || (outer_code == PLUS
+ && CONST_DOUBLE_HIGH (x) == -1
+ && (unsigned HOST_WIDE_INT)-CONST_DOUBLE_LOW (x) < 64))
+ ? 2 : 5;
+ return true;
+
+ case POST_INC:
+ *total = 2;
+ return true; /* Implies register operand. */
+
+ case PRE_DEC:
+ *total = 3;
+ return true; /* Implies register operand. */
+
+ case MULT:
+ switch (mode)
+ {
+ case DFmode:
+ *total = 16; /* 4 on VAX 9000 */
+ break;
+ case SFmode:
+ *total = 9; /* 4 on VAX 9000, 12 on VAX 2 */
+ break;
+ case DImode:
+ *total = 16; /* 6 on VAX 9000, 28 on VAX 2 */
+ break;
+ case SImode:
+ case HImode:
+ case QImode:
+ *total = 10; /* 3-4 on VAX 9000, 20-28 on VAX 2 */
+ break;
+ default:
+ *total = MAX_COST; /* Mode is not supported. */
+ return true;
+ }
+ break;
+
+ case UDIV:
+ if (mode != SImode)
+ {
+ *total = MAX_COST; /* Mode is not supported. */
+ return true;
+ }
+ *total = 17;
+ break;
+
+ case DIV:
+ if (mode == DImode)
+ *total = 30; /* Highly variable. */
+ else if (mode == DFmode)
+ /* divide takes 28 cycles if the result is not zero, 13 otherwise */
+ *total = 24;
+ else
+ *total = 11; /* 25 on VAX 2 */
+ break;
+
+ case MOD:
+ *total = 23;
+ break;
+
+ case UMOD:
+ if (mode != SImode)
+ {
+ *total = MAX_COST; /* Mode is not supported. */
+ return true;
+ }
+ *total = 29;
+ break;
+
+ case FLOAT:
+ *total = (6 /* 4 on VAX 9000 */
+ + (mode == DFmode) + (GET_MODE (XEXP (x, 0)) != SImode));
+ break;
+
+ case FIX:
+ *total = 7; /* 17 on VAX 2 */
+ break;
+
+ case ASHIFT:
+ case LSHIFTRT:
+ case ASHIFTRT:
+ if (mode == DImode)
+ *total = 12;
+ else
+ *total = 10; /* 6 on VAX 9000 */
+ break;
+
+ case ROTATE:
+ case ROTATERT:
+ *total = 6; /* 5 on VAX 2, 4 on VAX 9000 */
+ if (CONST_INT_P (XEXP (x, 1)))
+ fmt = "e"; /* all constant rotate counts are short */
+ break;
+
+ case PLUS:
+ case MINUS:
+ *total = (mode == DFmode) ? 13 : 8; /* 6/8 on VAX 9000, 16/15 on VAX 2 */
+ /* Small integer operands can use subl2 and addl2. */
+ if ((CONST_INT_P (XEXP (x, 1)))
+ && (unsigned HOST_WIDE_INT)(INTVAL (XEXP (x, 1)) + 63) < 127)
+ fmt = "e";
+ break;
+
+ case IOR:
+ case XOR:
+ *total = 3;
+ break;
+
+ case AND:
+ /* AND is special because the first operand is complemented. */
+ *total = 3;
+ if (CONST_INT_P (XEXP (x, 0)))
+ {
+ if ((unsigned HOST_WIDE_INT)~INTVAL (XEXP (x, 0)) > 63)
+ *total = 4;
+ fmt = "e";
+ i = 1;
+ }
+ break;
+
+ case NEG:
+ if (mode == DFmode)
+ *total = 9;
+ else if (mode == SFmode)
+ *total = 6;
+ else if (mode == DImode)
+ *total = 4;
+ else
+ *total = 2;
+ break;
+
+ case NOT:
+ *total = 2;
+ break;
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ *total = 15;
+ break;
+
+ case MEM:
+ if (mode == DImode || mode == DFmode)
+ *total = 5; /* 7 on VAX 2 */
+ else
+ *total = 3; /* 4 on VAX 2 */
+ x = XEXP (x, 0);
+ if (!REG_P (x) && GET_CODE (x) != POST_INC)
+ *total += vax_address_cost_1 (x);
+ return true;
+
+ case FLOAT_EXTEND:
+ case FLOAT_TRUNCATE:
+ case TRUNCATE:
+ *total = 3; /* FIXME: Costs need to be checked */
+ break;
+
+ default:
+ return false;
+ }
+
+ /* Now look inside the expression. Operands which are not registers or
+ short constants add to the cost.
+
+ FMT and I may have been adjusted in the switch above for instructions
+ which require special handling. */
+
+ while (*fmt++ == 'e')
+ {
+ rtx op = XEXP (x, i);
+
+ i += 1;
+ code = GET_CODE (op);
+
+ /* A NOT is likely to be found as the first operand of an AND
+ (in which case the relevant cost is of the operand inside
+ the not) and not likely to be found anywhere else. */
+ if (code == NOT)
+ op = XEXP (op, 0), code = GET_CODE (op);
+
+ switch (code)
+ {
+ case CONST_INT:
+ if ((unsigned HOST_WIDE_INT)INTVAL (op) > 63
+ && GET_MODE (x) != QImode)
+ *total += 1; /* 2 on VAX 2 */
+ break;
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ *total += 1; /* 2 on VAX 2 */
+ break;
+ case CONST_DOUBLE:
+ if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT)
+ {
+ /* Registers are faster than floating point constants -- even
+ those constants which can be encoded in a single byte. */
+ if (vax_float_literal (op))
+ *total += 1;
+ else
+ *total += (GET_MODE (x) == DFmode) ? 3 : 2;
+ }
+ else
+ {
+ if (CONST_DOUBLE_HIGH (op) != 0
+ || (unsigned HOST_WIDE_INT)CONST_DOUBLE_LOW (op) > 63)
+ *total += 2;
+ }
+ break;
+ case MEM:
+ *total += 1; /* 2 on VAX 2 */
+ if (!REG_P (XEXP (op, 0)))
+ *total += vax_address_cost_1 (XEXP (op, 0));
+ break;
+ case REG:
+ case SUBREG:
+ break;
+ default:
+ *total += 1;
+ break;
+ }
+ }
+ return true;
+}
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance.
+ .mask ^m<r2,r3,r4,r5,r6,r7,r8,r9,r10,r11> #conservative entry mask
+ addl2 $DELTA, 4(ap) #adjust first argument
+ jmp FUNCTION+2 #jump beyond FUNCTION's entry mask
+*/
+
+static void
+vax_output_mi_thunk (FILE * file,
+ tree thunk ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
+ tree function)
+{
+ fprintf (file, "\t.word 0x0ffc\n\taddl2 $" HOST_WIDE_INT_PRINT_DEC, delta);
+ asm_fprintf (file, ",4(%Rap)\n");
+ fprintf (file, "\tjmp ");
+ assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
+ fprintf (file, "+2\n");
+}
+
+static rtx
+vax_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
+ int incoming ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (Pmode, VAX_STRUCT_VALUE_REGNUM);
+}
+
+static rtx
+vax_builtin_setjmp_frame_value (void)
+{
+ return hard_frame_pointer_rtx;
+}
+
+/* Worker function for NOTICE_UPDATE_CC. */
+
+void
+vax_notice_update_cc (rtx exp, rtx insn ATTRIBUTE_UNUSED)
+{
+ if (GET_CODE (exp) == SET)
+ {
+ if (GET_CODE (SET_SRC (exp)) == CALL)
+ CC_STATUS_INIT;
+ else if (GET_CODE (SET_DEST (exp)) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (exp)) != PC)
+ {
+ cc_status.flags = 0;
+ /* The integer operations below don't set carry or
+ set it in an incompatible way. That's ok though
+ as the Z bit is all we need when doing unsigned
+ comparisons on the result of these insns (since
+ they're always with 0). Set CC_NO_OVERFLOW to
+ generate the correct unsigned branches. */
+ switch (GET_CODE (SET_SRC (exp)))
+ {
+ case NEG:
+ if (GET_MODE_CLASS (GET_MODE (exp)) == MODE_FLOAT)
+ break;
+ case AND:
+ case IOR:
+ case XOR:
+ case NOT:
+ case MEM:
+ case REG:
+ cc_status.flags = CC_NO_OVERFLOW;
+ break;
+ default:
+ break;
+ }
+ cc_status.value1 = SET_DEST (exp);
+ cc_status.value2 = SET_SRC (exp);
+ }
+ }
+ else if (GET_CODE (exp) == PARALLEL
+ && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
+ {
+ if (GET_CODE (SET_SRC (XVECEXP (exp, 0, 0))) == CALL)
+ CC_STATUS_INIT;
+ else if (GET_CODE (SET_DEST (XVECEXP (exp, 0, 0))) != PC)
+ {
+ cc_status.flags = 0;
+ cc_status.value1 = SET_DEST (XVECEXP (exp, 0, 0));
+ cc_status.value2 = SET_SRC (XVECEXP (exp, 0, 0));
+ }
+ else
+ /* PARALLELs whose first element sets the PC are aob,
+ sob insns. They do change the cc's. */
+ CC_STATUS_INIT;
+ }
+ else
+ CC_STATUS_INIT;
+ if (cc_status.value1 && REG_P (cc_status.value1)
+ && cc_status.value2
+ && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
+ cc_status.value2 = 0;
+ if (cc_status.value1 && MEM_P (cc_status.value1)
+ && cc_status.value2
+ && MEM_P (cc_status.value2))
+ cc_status.value2 = 0;
+ /* Actual condition, one line up, should be that value2's address
+ depends on value1, but that is too much of a pain. */
+}
+
+/* Output integer move instructions. */
+
+const char *
+vax_output_int_move (rtx insn ATTRIBUTE_UNUSED, rtx *operands,
+ enum machine_mode mode)
+{
+ rtx hi[3], lo[3];
+ const char *pattern_hi, *pattern_lo;
+
+ switch (mode)
+ {
+ case DImode:
+ if (operands[1] == const0_rtx)
+ return "clrq %0";
+ if (TARGET_QMATH && optimize_size
+ && (CONST_INT_P (operands[1])
+ || GET_CODE (operands[1]) == CONST_DOUBLE))
+ {
+ unsigned HOST_WIDE_INT hval, lval;
+ int n;
+
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ gcc_assert (HOST_BITS_PER_WIDE_INT != 64);
+
+ /* Make sure only the low 32 bits are valid. */
+ lval = CONST_DOUBLE_LOW (operands[1]) & 0xffffffff;
+ hval = CONST_DOUBLE_HIGH (operands[1]) & 0xffffffff;
+ }
+ else
+ {
+ lval = INTVAL (operands[1]);
+ hval = 0;
+ }
+
+ /* Here we see if we are trying to see if the 64bit value is really
+ a 6bit shifted some arbitrary amount. If so, we can use ashq to
+ shift it to the correct value saving 7 bytes (1 addr-mode-byte +
+ 8 bytes - 1 shift byte - 1 short literal byte. */
+ if (lval != 0
+ && (n = exact_log2 (lval & (- lval))) != -1
+ && (lval >> n) < 64)
+ {
+ lval >>= n;
+
+ /* On 32bit platforms, if the 6bits didn't overflow into the
+ upper 32bit value that value better be 0. If we have
+ overflowed, make sure it wasn't too much. */
+ if (HOST_BITS_PER_WIDE_INT == 32 && hval != 0)
+ {
+ if (n <= 26 || hval >= ((unsigned)1 << (n - 26)))
+ n = 0; /* failure */
+ else
+ lval |= hval << (32 - n);
+ }
+ /* If n is 0, then ashq is not the best way to emit this. */
+ if (n > 0)
+ {
+ operands[1] = GEN_INT (lval);
+ operands[2] = GEN_INT (n);
+ return "ashq %2,%1,%0";
+ }
+#if HOST_BITS_PER_WIDE_INT == 32
+ }
+ /* On 32bit platforms, if the low 32bit value is 0, checkout the
+ upper 32bit value. */
+ else if (hval != 0
+ && (n = exact_log2 (hval & (- hval)) - 1) != -1
+ && (hval >> n) < 64)
+ {
+ operands[1] = GEN_INT (hval >> n);
+ operands[2] = GEN_INT (n + 32);
+ return "ashq %2,%1,%0";
+#endif
+ }
+ }
+
+ if (TARGET_QMATH
+ && (!MEM_P (operands[0])
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
+ || GET_CODE (XEXP (operands[0], 0)) == POST_INC
+ || !illegal_addsub_di_memory_operand (operands[0], DImode))
+ && ((CONST_INT_P (operands[1])
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[1]) >= 64)
+ || GET_CODE (operands[1]) == CONST_DOUBLE))
+ {
+ hi[0] = operands[0];
+ hi[1] = operands[1];
+
+ split_quadword_operands (insn, SET, hi, lo, 2);
+
+ pattern_lo = vax_output_int_move (NULL, lo, SImode);
+ pattern_hi = vax_output_int_move (NULL, hi, SImode);
+
+ /* The patterns are just movl/movl or pushl/pushl then a movq will
+ be shorter (1 opcode byte + 1 addrmode byte + 8 immediate value
+ bytes .vs. 2 opcode bytes + 2 addrmode bytes + 8 immediate value
+ value bytes. */
+ if ((!strncmp (pattern_lo, "movl", 4)
+ && !strncmp (pattern_hi, "movl", 4))
+ || (!strncmp (pattern_lo, "pushl", 5)
+ && !strncmp (pattern_hi, "pushl", 5)))
+ return "movq %1,%0";
+
+ if (MEM_P (operands[0])
+ && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
+ {
+ output_asm_insn (pattern_hi, hi);
+ operands[0] = lo[0];
+ operands[1] = lo[1];
+ operands[2] = lo[2];
+ return pattern_lo;
+ }
+ else
+ {
+ output_asm_insn (pattern_lo, lo);
+ operands[0] = hi[0];
+ operands[1] = hi[1];
+ operands[2] = hi[2];
+ return pattern_hi;
+ }
+ }
+ return "movq %1,%0";
+
+ case SImode:
+ if (symbolic_operand (operands[1], SImode))
+ {
+ if (push_operand (operands[0], SImode))
+ return "pushab %a1";
+ return "movab %a1,%0";
+ }
+
+ if (operands[1] == const0_rtx)
+ {
+ if (push_operand (operands[1], SImode))
+ return "pushl %1";
+ return "clrl %0";
+ }
+
+ if (CONST_INT_P (operands[1])
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[1]) >= 64)
+ {
+ HOST_WIDE_INT i = INTVAL (operands[1]);
+ int n;
+ if ((unsigned HOST_WIDE_INT)(~i) < 64)
+ return "mcoml %N1,%0";
+ if ((unsigned HOST_WIDE_INT)i < 0x100)
+ return "movzbl %1,%0";
+ if (i >= -0x80 && i < 0)
+ return "cvtbl %1,%0";
+ if (optimize_size
+ && (n = exact_log2 (i & (-i))) != -1
+ && ((unsigned HOST_WIDE_INT)i >> n) < 64)
+ {
+ operands[1] = GEN_INT ((unsigned HOST_WIDE_INT)i >> n);
+ operands[2] = GEN_INT (n);
+ return "ashl %2,%1,%0";
+ }
+ if ((unsigned HOST_WIDE_INT)i < 0x10000)
+ return "movzwl %1,%0";
+ if (i >= -0x8000 && i < 0)
+ return "cvtwl %1,%0";
+ }
+ if (push_operand (operands[0], SImode))
+ return "pushl %1";
+ return "movl %1,%0";
+
+ case HImode:
+ if (CONST_INT_P (operands[1]))
+ {
+ HOST_WIDE_INT i = INTVAL (operands[1]);
+ if (i == 0)
+ return "clrw %0";
+ else if ((unsigned HOST_WIDE_INT)i < 64)
+ return "movw %1,%0";
+ else if ((unsigned HOST_WIDE_INT)~i < 64)
+ return "mcomw %H1,%0";
+ else if ((unsigned HOST_WIDE_INT)i < 256)
+ return "movzbw %1,%0";
+ else if (i >= -0x80 && i < 0)
+ return "cvtbw %1,%0";
+ }
+ return "movw %1,%0";
+
+ case QImode:
+ if (CONST_INT_P (operands[1]))
+ {
+ HOST_WIDE_INT i = INTVAL (operands[1]);
+ if (i == 0)
+ return "clrb %0";
+ else if ((unsigned HOST_WIDE_INT)~i < 64)
+ return "mcomb %B1,%0";
+ }
+ return "movb %1,%0";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Output integer add instructions.
+
+ The space-time-opcode tradeoffs for addition vary by model of VAX.
+
+ On a VAX 3 "movab (r1)[r2],r3" is faster than "addl3 r1,r2,r3",
+ but it not faster on other models.
+
+ "movab #(r1),r2" is usually shorter than "addl3 #,r1,r2", and is
+ faster on a VAX 3, but some VAXen (e.g. VAX 9000) will stall if
+ a register is used in an address too soon after it is set.
+ Compromise by using movab only when it is shorter than the add
+ or the base register in the address is one of sp, ap, and fp,
+ which are not modified very often. */
+
+const char *
+vax_output_int_add (rtx insn, rtx *operands, enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case DImode:
+ {
+ rtx low[3];
+ const char *pattern;
+ int carry = 1;
+ bool sub;
+
+ if (TARGET_QMATH && 0)
+ debug_rtx (insn);
+
+ split_quadword_operands (insn, PLUS, operands, low, 3);
+
+ if (TARGET_QMATH)
+ {
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+#ifdef NO_EXTERNAL_INDIRECT_ADDRESSS
+ gcc_assert (!flag_pic || !external_memory_operand (low[2], SImode));
+ gcc_assert (!flag_pic || !external_memory_operand (low[0], SImode));
+#endif
+
+ /* No reason to add a 0 to the low part and thus no carry, so just
+ emit the appropriate add/sub instruction. */
+ if (low[2] == const0_rtx)
+ return vax_output_int_add (NULL, operands, SImode);
+
+ /* Are we doing addition or subtraction? */
+ sub = CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 0;
+
+ /* We can't use vax_output_int_add since some the patterns don't
+ modify the carry bit. */
+ if (sub)
+ {
+ if (low[2] == constm1_rtx)
+ pattern = "decl %0";
+ else
+ pattern = "subl2 $%n2,%0";
+ }
+ else
+ {
+ if (low[2] == const1_rtx)
+ pattern = "incl %0";
+ else
+ pattern = "addl2 %2,%0";
+ }
+ output_asm_insn (pattern, low);
+
+ /* In 2's complement, -n = ~n + 1. Since we are dealing with
+ two 32bit parts, we complement each and then add one to
+ low part. We know that the low part can't overflow since
+ it's value can never be 0. */
+ if (sub)
+ return "sbwc %N2,%0";
+ return "adwc %2,%0";
+ }
+
+ /* Add low parts. */
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (low[2] == const0_rtx)
+ /* Should examine operand, punt if not POST_INC. */
+ pattern = "tstl %0", carry = 0;
+ else if (low[2] == const1_rtx)
+ pattern = "incl %0";
+ else
+ pattern = "addl2 %2,%0";
+ }
+ else
+ {
+ if (low[2] == const0_rtx)
+ pattern = "movl %1,%0", carry = 0;
+ else
+ pattern = "addl3 %2,%1,%0";
+ }
+ if (pattern)
+ output_asm_insn (pattern, low);
+ if (!carry)
+ /* If CARRY is 0, we don't have any carry value to worry about. */
+ return get_insn_template (CODE_FOR_addsi3, insn);
+ /* %0 = C + %1 + %2 */
+ if (!rtx_equal_p (operands[0], operands[1]))
+ output_asm_insn ((operands[1] == const0_rtx
+ ? "clrl %0"
+ : "movl %1,%0"), operands);
+ return "adwc %2,%0";
+ }
+
+ case SImode:
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (operands[2] == const1_rtx)
+ return "incl %0";
+ if (operands[2] == constm1_rtx)
+ return "decl %0";
+ if (CONST_INT_P (operands[2])
+ && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
+ return "subl2 $%n2,%0";
+ if (CONST_INT_P (operands[2])
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 64
+ && REG_P (operands[1])
+ && ((INTVAL (operands[2]) < 32767 && INTVAL (operands[2]) > -32768)
+ || REGNO (operands[1]) > 11))
+ return "movab %c2(%1),%0";
+ if (REG_P (operands[0]) && symbolic_operand (operands[2], SImode))
+ return "movab %a2[%0],%0";
+ return "addl2 %2,%0";
+ }
+
+ if (rtx_equal_p (operands[0], operands[2]))
+ {
+ if (REG_P (operands[0]) && symbolic_operand (operands[1], SImode))
+ return "movab %a1[%0],%0";
+ return "addl2 %1,%0";
+ }
+
+ if (CONST_INT_P (operands[2])
+ && INTVAL (operands[2]) < 32767
+ && INTVAL (operands[2]) > -32768
+ && REG_P (operands[1])
+ && push_operand (operands[0], SImode))
+ return "pushab %c2(%1)";
+
+ if (CONST_INT_P (operands[2])
+ && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
+ return "subl3 $%n2,%1,%0";
+
+ if (CONST_INT_P (operands[2])
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 64
+ && REG_P (operands[1])
+ && ((INTVAL (operands[2]) < 32767 && INTVAL (operands[2]) > -32768)
+ || REGNO (operands[1]) > 11))
+ return "movab %c2(%1),%0";
+
+ /* Add this if using gcc on a VAX 3xxx:
+ if (REG_P (operands[1]) && REG_P (operands[2]))
+ return "movab (%1)[%2],%0";
+ */
+
+ if (REG_P (operands[1]) && symbolic_operand (operands[2], SImode))
+ {
+ if (push_operand (operands[0], SImode))
+ return "pushab %a2[%1]";
+ return "movab %a2[%1],%0";
+ }
+
+ if (REG_P (operands[2]) && symbolic_operand (operands[1], SImode))
+ {
+ if (push_operand (operands[0], SImode))
+ return "pushab %a1[%2]";
+ return "movab %a1[%2],%0";
+ }
+
+ if (flag_pic && REG_P (operands[0])
+ && symbolic_operand (operands[2], SImode))
+ return "movab %a2,%0;addl2 %1,%0";
+
+ if (flag_pic
+ && (symbolic_operand (operands[1], SImode)
+ || symbolic_operand (operands[1], SImode)))
+ debug_rtx (insn);
+
+ return "addl3 %1,%2,%0";
+
+ case HImode:
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (operands[2] == const1_rtx)
+ return "incw %0";
+ if (operands[2] == constm1_rtx)
+ return "decw %0";
+ if (CONST_INT_P (operands[2])
+ && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
+ return "subw2 $%n2,%0";
+ return "addw2 %2,%0";
+ }
+ if (rtx_equal_p (operands[0], operands[2]))
+ return "addw2 %1,%0";
+ if (CONST_INT_P (operands[2])
+ && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
+ return "subw3 $%n2,%1,%0";
+ return "addw3 %1,%2,%0";
+
+ case QImode:
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (operands[2] == const1_rtx)
+ return "incb %0";
+ if (operands[2] == constm1_rtx)
+ return "decb %0";
+ if (CONST_INT_P (operands[2])
+ && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
+ return "subb2 $%n2,%0";
+ return "addb2 %2,%0";
+ }
+ if (rtx_equal_p (operands[0], operands[2]))
+ return "addb2 %1,%0";
+ if (CONST_INT_P (operands[2])
+ && (unsigned HOST_WIDE_INT) (- INTVAL (operands[2])) < 64)
+ return "subb3 $%n2,%1,%0";
+ return "addb3 %1,%2,%0";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+const char *
+vax_output_int_subtract (rtx insn, rtx *operands, enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case DImode:
+ {
+ rtx low[3];
+ const char *pattern;
+ int carry = 1;
+
+ if (TARGET_QMATH && 0)
+ debug_rtx (insn);
+
+ split_quadword_operands (insn, MINUS, operands, low, 3);
+
+ if (TARGET_QMATH)
+ {
+ if (operands[1] == const0_rtx && low[1] == const0_rtx)
+ {
+ /* Negation is tricky. It's basically complement and increment.
+ Negate hi, then lo, and subtract the carry back. */
+ if ((MEM_P (low[0]) && GET_CODE (XEXP (low[0], 0)) == POST_INC)
+ || (MEM_P (operands[0])
+ && GET_CODE (XEXP (operands[0], 0)) == POST_INC))
+ fatal_insn ("illegal operand detected", insn);
+ output_asm_insn ("mnegl %2,%0", operands);
+ output_asm_insn ("mnegl %2,%0", low);
+ return "sbwc $0,%0";
+ }
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ gcc_assert (rtx_equal_p (low[0], low[1]));
+ if (low[2] == const1_rtx)
+ output_asm_insn ("decl %0", low);
+ else
+ output_asm_insn ("subl2 %2,%0", low);
+ return "sbwc %2,%0";
+ }
+
+ /* Subtract low parts. */
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (low[2] == const0_rtx)
+ pattern = 0, carry = 0;
+ else if (low[2] == constm1_rtx)
+ pattern = "decl %0";
+ else
+ pattern = "subl2 %2,%0";
+ }
+ else
+ {
+ if (low[2] == constm1_rtx)
+ pattern = "decl %0";
+ else if (low[2] == const0_rtx)
+ pattern = get_insn_template (CODE_FOR_movsi, insn), carry = 0;
+ else
+ pattern = "subl3 %2,%1,%0";
+ }
+ if (pattern)
+ output_asm_insn (pattern, low);
+ if (carry)
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ return "movl %1,%0;sbwc %2,%0";
+ return "sbwc %2,%0";
+ /* %0 = %2 - %1 - C */
+ }
+ return get_insn_template (CODE_FOR_subsi3, insn);
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* True if X is an rtx for a constant that is a valid address. */
+
+bool
+legitimate_constant_address_p (rtx x)
+{
+ if (GET_CODE (x) == LABEL_REF || GET_CODE (x) == SYMBOL_REF
+ || CONST_INT_P (x) || GET_CODE (x) == HIGH)
+ return true;
+ if (GET_CODE (x) != CONST)
+ return false;
+#ifdef NO_EXTERNAL_INDIRECT_ADDRESS
+ if (flag_pic
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (XEXP (XEXP (x, 0), 0)))
+ return false;
+#endif
+ return true;
+}
+
+/* The other macros defined here are used only in legitimate_address_p (). */
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or, if not strict, if it is a pseudo reg. */
+#define INDEX_REGISTER_P(X, STRICT) \
+(REG_P (X) && (!(STRICT) || REGNO_OK_FOR_INDEX_P (REGNO (X))))
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or, if not strict, if it is a pseudo reg. */
+#define BASE_REGISTER_P(X, STRICT) \
+(REG_P (X) && (!(STRICT) || REGNO_OK_FOR_BASE_P (REGNO (X))))
+
+#ifdef NO_EXTERNAL_INDIRECT_ADDRESS
+
+/* Re-definition of CONSTANT_ADDRESS_P, which is true only when there
+ are no SYMBOL_REFs for external symbols present. */
+
+static bool
+indirectable_constant_address_p (rtx x, bool indirect)
+{
+ if (GET_CODE (x) == SYMBOL_REF)
+ return !flag_pic || SYMBOL_REF_LOCAL_P (x) || !indirect;
+
+ if (GET_CODE (x) == CONST)
+ return !flag_pic
+ || GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
+ || SYMBOL_REF_LOCAL_P (XEXP (XEXP (x, 0), 0));
+
+ return CONSTANT_ADDRESS_P (x);
+}
+
+#else /* not NO_EXTERNAL_INDIRECT_ADDRESS */
+
+static bool
+indirectable_constant_address_p (rtx x, bool indirect ATTRIBUTE_UNUSED)
+{
+ return CONSTANT_ADDRESS_P (x);
+}
+
+#endif /* not NO_EXTERNAL_INDIRECT_ADDRESS */
+
+/* True if X is an address which can be indirected. External symbols
+ could be in a sharable image library, so we disallow those. */
+
+static bool
+indirectable_address_p (rtx x, bool strict, bool indirect)
+{
+ if (indirectable_constant_address_p (x, indirect)
+ || BASE_REGISTER_P (x, strict))
+ return true;
+ if (GET_CODE (x) != PLUS
+ || !BASE_REGISTER_P (XEXP (x, 0), strict)
+ || (flag_pic && !CONST_INT_P (XEXP (x, 1))))
+ return false;
+ return indirectable_constant_address_p (XEXP (x, 1), indirect);
+}
+
+/* Return true if x is a valid address not using indexing.
+ (This much is the easy part.) */
+static bool
+nonindexed_address_p (rtx x, bool strict)
+{
+ rtx xfoo0;
+ if (REG_P (x))
+ {
+ if (! reload_in_progress
+ || reg_equiv_mem (REGNO (x)) == 0
+ || indirectable_address_p (reg_equiv_mem (REGNO (x)), strict, false))
+ return true;
+ }
+ if (indirectable_constant_address_p (x, false))
+ return true;
+ if (indirectable_address_p (x, strict, false))
+ return true;
+ xfoo0 = XEXP (x, 0);
+ if (MEM_P (x) && indirectable_address_p (xfoo0, strict, true))
+ return true;
+ if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
+ && BASE_REGISTER_P (xfoo0, strict))
+ return true;
+ return false;
+}
+
+/* True if PROD is either a reg times size of mode MODE and MODE is less
+ than or equal 8 bytes, or just a reg if MODE is one byte. */
+
+static bool
+index_term_p (rtx prod, enum machine_mode mode, bool strict)
+{
+ rtx xfoo0, xfoo1;
+
+ if (GET_MODE_SIZE (mode) == 1)
+ return BASE_REGISTER_P (prod, strict);
+
+ if (GET_CODE (prod) != MULT || GET_MODE_SIZE (mode) > 8)
+ return false;
+
+ xfoo0 = XEXP (prod, 0);
+ xfoo1 = XEXP (prod, 1);
+
+ if (CONST_INT_P (xfoo0)
+ && INTVAL (xfoo0) == (int)GET_MODE_SIZE (mode)
+ && INDEX_REGISTER_P (xfoo1, strict))
+ return true;
+
+ if (CONST_INT_P (xfoo1)
+ && INTVAL (xfoo1) == (int)GET_MODE_SIZE (mode)
+ && INDEX_REGISTER_P (xfoo0, strict))
+ return true;
+
+ return false;
+}
+
+/* Return true if X is the sum of a register
+ and a valid index term for mode MODE. */
+static bool
+reg_plus_index_p (rtx x, enum machine_mode mode, bool strict)
+{
+ rtx xfoo0, xfoo1;
+
+ if (GET_CODE (x) != PLUS)
+ return false;
+
+ xfoo0 = XEXP (x, 0);
+ xfoo1 = XEXP (x, 1);
+
+ if (BASE_REGISTER_P (xfoo0, strict) && index_term_p (xfoo1, mode, strict))
+ return true;
+
+ if (BASE_REGISTER_P (xfoo1, strict) && index_term_p (xfoo0, mode, strict))
+ return true;
+
+ return false;
+}
+
+/* Return true if xfoo0 and xfoo1 constitute a valid indexed address. */
+static bool
+indexable_address_p (rtx xfoo0, rtx xfoo1, enum machine_mode mode, bool strict)
+{
+ if (!CONSTANT_ADDRESS_P (xfoo0))
+ return false;
+ if (BASE_REGISTER_P (xfoo1, strict))
+ return !flag_pic || mode == QImode;
+ if (flag_pic && symbolic_operand (xfoo0, SImode))
+ return false;
+ return reg_plus_index_p (xfoo1, mode, strict);
+}
+
+/* legitimate_address_p returns true if it recognizes an RTL expression "x"
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address. */
+bool
+vax_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
+{
+ rtx xfoo0, xfoo1;
+
+ if (nonindexed_address_p (x, strict))
+ return true;
+
+ if (GET_CODE (x) != PLUS)
+ return false;
+
+ /* Handle <address>[index] represented with index-sum outermost */
+
+ xfoo0 = XEXP (x, 0);
+ xfoo1 = XEXP (x, 1);
+
+ if (index_term_p (xfoo0, mode, strict)
+ && nonindexed_address_p (xfoo1, strict))
+ return true;
+
+ if (index_term_p (xfoo1, mode, strict)
+ && nonindexed_address_p (xfoo0, strict))
+ return true;
+
+ /* Handle offset(reg)[index] with offset added outermost */
+
+ if (indexable_address_p (xfoo0, xfoo1, mode, strict)
+ || indexable_address_p (xfoo1, xfoo0, mode, strict))
+ return true;
+
+ return false;
+}
+
+/* Return true if x (a legitimate address expression) has an effect that
+ depends on the machine mode it is used for. On the VAX, the predecrement
+ and postincrement address depend thus (the amount of decrement or
+ increment being the length of the operand) and all indexed address depend
+ thus (because the index scale factor is the length of the operand). */
+
+static bool
+vax_mode_dependent_address_p (const_rtx x, addr_space_t as ATTRIBUTE_UNUSED)
+{
+ rtx xfoo0, xfoo1;
+
+ /* Auto-increment cases are now dealt with generically in recog.c. */
+ if (GET_CODE (x) != PLUS)
+ return false;
+
+ xfoo0 = XEXP (x, 0);
+ xfoo1 = XEXP (x, 1);
+
+ if (CONST_INT_P (xfoo0) && REG_P (xfoo1))
+ return false;
+ if (CONST_INT_P (xfoo1) && REG_P (xfoo0))
+ return false;
+ if (!flag_pic && CONSTANT_ADDRESS_P (xfoo0) && REG_P (xfoo1))
+ return false;
+ if (!flag_pic && CONSTANT_ADDRESS_P (xfoo1) && REG_P (xfoo0))
+ return false;
+
+ return true;
+}
+
+static rtx
+fixup_mathdi_operand (rtx x, enum machine_mode mode)
+{
+ if (illegal_addsub_di_memory_operand (x, mode))
+ {
+ rtx addr = XEXP (x, 0);
+ rtx temp = gen_reg_rtx (Pmode);
+ rtx offset = 0;
+#ifdef NO_EXTERNAL_INDIRECT_ADDRESS
+ if (GET_CODE (addr) == CONST && flag_pic)
+ {
+ offset = XEXP (XEXP (addr, 0), 1);
+ addr = XEXP (XEXP (addr, 0), 0);
+ }
+#endif
+ emit_move_insn (temp, addr);
+ if (offset)
+ temp = gen_rtx_PLUS (Pmode, temp, offset);
+ x = gen_rtx_MEM (DImode, temp);
+ }
+ return x;
+}
+
+void
+vax_expand_addsub_di_operands (rtx * operands, enum rtx_code code)
+{
+ int hi_only = operand_subword (operands[2], 0, 0, DImode) == const0_rtx;
+ rtx temp;
+
+ rtx (*gen_old_insn)(rtx, rtx, rtx);
+ rtx (*gen_si_insn)(rtx, rtx, rtx);
+ rtx (*gen_insn)(rtx, rtx, rtx);
+
+ if (code == PLUS)
+ {
+ gen_old_insn = gen_adddi3_old;
+ gen_si_insn = gen_addsi3;
+ gen_insn = gen_adcdi3;
+ }
+ else if (code == MINUS)
+ {
+ gen_old_insn = gen_subdi3_old;
+ gen_si_insn = gen_subsi3;
+ gen_insn = gen_sbcdi3;
+ }
+ else
+ gcc_unreachable ();
+
+ /* If this is addition (thus operands are commutative) and if there is one
+ addend that duplicates the desination, we want that addend to be the
+ first addend. */
+ if (code == PLUS
+ && rtx_equal_p (operands[0], operands[2])
+ && !rtx_equal_p (operands[1], operands[2]))
+ {
+ temp = operands[2];
+ operands[2] = operands[1];
+ operands[1] = temp;
+ }
+
+ if (!TARGET_QMATH)
+ {
+ emit_insn ((*gen_old_insn) (operands[0], operands[1], operands[2]));
+ }
+ else if (hi_only)
+ {
+ if (!rtx_equal_p (operands[0], operands[1])
+ && (REG_P (operands[0]) && MEM_P (operands[1])))
+ {
+ emit_move_insn (operands[0], operands[1]);
+ operands[1] = operands[0];
+ }
+
+ operands[0] = fixup_mathdi_operand (operands[0], DImode);
+ operands[1] = fixup_mathdi_operand (operands[1], DImode);
+ operands[2] = fixup_mathdi_operand (operands[2], DImode);
+
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operand_subword (operands[0], 0, 0, DImode),
+ operand_subword (operands[1], 0, 0, DImode));
+
+ emit_insn ((*gen_si_insn) (operand_subword (operands[0], 1, 0, DImode),
+ operand_subword (operands[1], 1, 0, DImode),
+ operand_subword (operands[2], 1, 0, DImode)));
+ }
+ else
+ {
+ /* If are adding the same value together, that's really a multiply by 2,
+ and that's just a left shift of 1. */
+ if (rtx_equal_p (operands[1], operands[2]))
+ {
+ gcc_assert (code != MINUS);
+ emit_insn (gen_ashldi3 (operands[0], operands[1], const1_rtx));
+ return;
+ }
+
+ operands[0] = fixup_mathdi_operand (operands[0], DImode);
+
+ /* If an operand is the same as operand[0], use the operand[0] rtx
+ because fixup will an equivalent rtx but not an equal one. */
+
+ if (rtx_equal_p (operands[0], operands[1]))
+ operands[1] = operands[0];
+ else
+ operands[1] = fixup_mathdi_operand (operands[1], DImode);
+
+ if (rtx_equal_p (operands[0], operands[2]))
+ operands[2] = operands[0];
+ else
+ operands[2] = fixup_mathdi_operand (operands[2], DImode);
+
+ /* If we are subtracting not from ourselves [d = a - b], and because the
+ carry ops are two operand only, we would need to do a move prior to
+ the subtract. And if d == b, we would need a temp otherwise
+ [d = a, d -= d] and we end up with 0. Instead we rewrite d = a - b
+ into d = -b, d += a. Since -b can never overflow, even if b == d,
+ no temp is needed.
+
+ If we are doing addition, since the carry ops are two operand, if
+ we aren't adding to ourselves, move the first addend to the
+ destination first. */
+
+ gcc_assert (operands[1] != const0_rtx || code == MINUS);
+ if (!rtx_equal_p (operands[0], operands[1]) && operands[1] != const0_rtx)
+ {
+ if (code == MINUS && CONSTANT_P (operands[1]))
+ {
+ temp = gen_reg_rtx (DImode);
+ emit_insn (gen_sbcdi3 (operands[0], const0_rtx, operands[2]));
+ code = PLUS;
+ gen_insn = gen_adcdi3;
+ operands[2] = operands[1];
+ operands[1] = operands[0];
+ }
+ else
+ emit_move_insn (operands[0], operands[1]);
+ }
+
+ /* Subtracting a constant will have been rewritten to an addition of the
+ negative of that constant before we get here. */
+ gcc_assert (!CONSTANT_P (operands[2]) || code == PLUS);
+ emit_insn ((*gen_insn) (operands[0], operands[1], operands[2]));
+ }
+}
+
+bool
+adjacent_operands_p (rtx lo, rtx hi, enum machine_mode mode)
+{
+ HOST_WIDE_INT lo_offset;
+ HOST_WIDE_INT hi_offset;
+
+ if (GET_CODE (lo) != GET_CODE (hi))
+ return false;
+
+ if (REG_P (lo))
+ return mode == SImode && REGNO (lo) + 1 == REGNO (hi);
+ if (CONST_INT_P (lo))
+ return INTVAL (hi) == 0 && 0 <= INTVAL (lo) && INTVAL (lo) < 64;
+ if (CONST_INT_P (lo))
+ return mode != SImode;
+
+ if (!MEM_P (lo))
+ return false;
+
+ if (MEM_VOLATILE_P (lo) || MEM_VOLATILE_P (hi))
+ return false;
+
+ lo = XEXP (lo, 0);
+ hi = XEXP (hi, 0);
+
+ if (GET_CODE (lo) == POST_INC /* || GET_CODE (lo) == PRE_DEC */)
+ return rtx_equal_p (lo, hi);
+
+ switch (GET_CODE (lo))
+ {
+ case REG:
+ case SYMBOL_REF:
+ lo_offset = 0;
+ break;
+ case CONST:
+ lo = XEXP (lo, 0);
+ /* FALLTHROUGH */
+ case PLUS:
+ if (!CONST_INT_P (XEXP (lo, 1)))
+ return false;
+ lo_offset = INTVAL (XEXP (lo, 1));
+ lo = XEXP (lo, 0);
+ break;
+ default:
+ return false;
+ }
+
+ switch (GET_CODE (hi))
+ {
+ case REG:
+ case SYMBOL_REF:
+ hi_offset = 0;
+ break;
+ case CONST:
+ hi = XEXP (hi, 0);
+ /* FALLTHROUGH */
+ case PLUS:
+ if (!CONST_INT_P (XEXP (hi, 1)))
+ return false;
+ hi_offset = INTVAL (XEXP (hi, 1));
+ hi = XEXP (hi, 0);
+ break;
+ default:
+ return false;
+ }
+
+ if (GET_CODE (lo) == MULT || GET_CODE (lo) == PLUS)
+ return false;
+
+ return rtx_equal_p (lo, hi)
+ && hi_offset - lo_offset == GET_MODE_SIZE (mode);
+}
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts. */
+
+/* On the VAX, the trampoline contains an entry mask and two instructions:
+ .word NN
+ movl $STATIC,r0 (store the functions static chain)
+ jmp *$FUNCTION (jump to function code at address FUNCTION) */
+
+static void
+vax_asm_trampoline_template (FILE *f ATTRIBUTE_UNUSED)
+{
+ assemble_aligned_integer (2, const0_rtx);
+ assemble_aligned_integer (2, GEN_INT (0x8fd0));
+ assemble_aligned_integer (4, const0_rtx);
+ assemble_aligned_integer (1, GEN_INT (0x50 + STATIC_CHAIN_REGNUM));
+ assemble_aligned_integer (2, GEN_INT (0x9f17));
+ assemble_aligned_integer (4, const0_rtx);
+}
+
+/* We copy the register-mask from the function's pure code
+ to the start of the trampoline. */
+
+static void
+vax_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
+{
+ rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
+ rtx mem;
+
+ emit_block_move (m_tramp, assemble_trampoline_template (),
+ GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
+
+ mem = adjust_address (m_tramp, HImode, 0);
+ emit_move_insn (mem, gen_const_mem (HImode, fnaddr));
+
+ mem = adjust_address (m_tramp, SImode, 4);
+ emit_move_insn (mem, cxt);
+ mem = adjust_address (m_tramp, SImode, 11);
+ emit_move_insn (mem, plus_constant (Pmode, fnaddr, 2));
+ emit_insn (gen_sync_istream ());
+}
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the VAX, the RET insn pops a maximum of 255 args for any function. */
+
+static int
+vax_return_pops_args (tree fundecl ATTRIBUTE_UNUSED,
+ tree funtype ATTRIBUTE_UNUSED, int size)
+{
+ return size > 255 * 4 ? 0 : size;
+}
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+/* On the VAX all args are pushed. */
+
+static rtx
+vax_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ return NULL_RTX;
+}
+
+/* Update the data in CUM to advance over an argument of mode MODE and
+ data type TYPE. (TYPE is null for libcalls where that information
+ may not be available.) */
+
+static void
+vax_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+
+ *cum += (mode != BLKmode
+ ? (GET_MODE_SIZE (mode) + 3) & ~3
+ : (int_size_in_bytes (type) + 3) & ~3);
+}
diff --git a/gcc-4.8/gcc/config/vax/vax.h b/gcc-4.8/gcc/config/vax/vax.h
new file mode 100644
index 000000000..2f1890b85
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/vax.h
@@ -0,0 +1,708 @@
+/* Definitions of target machine for GNU compiler. VAX version.
+ Copyright (C) 1987-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* Target CPU builtins. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__vax__"); \
+ builtin_assert ("cpu=vax"); \
+ builtin_assert ("machine=vax"); \
+ if (TARGET_G_FLOAT) \
+ { \
+ builtin_define ("__GFLOAT"); \
+ builtin_define ("__GFLOAT__"); \
+ } \
+ } \
+ while (0)
+
+/* Use -J option for long branch support with Unix assembler. */
+
+#define ASM_SPEC "-J"
+
+/* Choose proper libraries depending on float format.
+ Note that there are no profiling libraries for g-format.
+ Also use -lg for the sake of dbx. */
+
+#define LIB_SPEC "%{g:-lg}\
+ %{mg:%{lm:-lmg} -lcg \
+ %{p:%eprofiling not supported with -mg\n}\
+ %{pg:%eprofiling not supported with -mg\n}}\
+ %{!mg:%{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}}"
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+/* Nonzero if ELF. Redefined by vax/elf.h. */
+#define TARGET_ELF 0
+
+/* Use BSD names for udiv and umod libgcc calls. */
+#define TARGET_BSD_DIVMOD 1
+
+/* Default target_flags if no switches specified. */
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_UNIX_ASM)
+#endif
+
+
+/* Target machine storage layout */
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields.
+ This is not true on the VAX. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* That is not true on the VAX. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+/* This is not true on the VAX. */
+#define WORDS_BIG_ENDIAN 0
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 16
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY (TARGET_VAXC_ALIGNMENT ? 8 : 32)
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bit-field declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS (! TARGET_VAXC_ALIGNMENT)
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 32
+
+/* No structure field wants to be aligned rounder than this. */
+#define BIGGEST_FIELD_ALIGNMENT (TARGET_VAXC_ALIGNMENT ? 8 : 32)
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 0
+
+/* Let's keep the stack somewhat aligned. */
+#define STACK_BOUNDARY 32
+
+/* The table of an ADDR_DIFF_VEC must be contiguous with the case
+ opcode, it is part of the case instruction. */
+#define ADDR_VEC_ALIGN(ADDR_VEC) 0
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers. */
+#define FIRST_PSEUDO_REGISTER 16
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+ On the VAX, these are the AP, FP, SP and PC. */
+#define FIXED_REGISTERS {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+#define CALL_USED_REGISTERS {1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+ On the VAX, all registers are one word long. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ On the VAX, all registers can hold all modes. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) 1
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) 1
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* VAX pc is overloaded on a register. */
+#define PC_REGNUM VAX_PC_REGNUM
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM VAX_SP_REGNUM
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM VAX_FP_REGNUM
+
+/* Offset from the frame pointer register value to the top of stack. */
+#define FRAME_POINTER_CFA_OFFSET(FNDECL) 0
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM VAX_AP_REGNUM
+
+/* Register in which static-chain is passed to a function. */
+#define STATIC_CHAIN_REGNUM 0
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define VAX_STRUCT_VALUE_REGNUM 1
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The VAX has only one kind of registers, so NO_REGS and ALL_REGS
+ are the only classes. */
+
+enum reg_class { NO_REGS, ALL_REGS, LIM_REG_CLASSES };
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Since GENERAL_REGS is the same class as ALL_REGS,
+ don't give it a different class number; just make it an alias. */
+
+#define GENERAL_REGS ALL_REGS
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+ { "NO_REGS", "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS {{0}, {0xffff}}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) ALL_REGS
+
+/* The class value for index registers, and the one for base regs. */
+
+#define INDEX_REG_CLASS ALL_REGS
+#define BASE_REG_CLASS ALL_REGS
+
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this to nonzero if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* Given an rtx for the address of a frame,
+ return an rtx for the address of the word in the frame
+ that holds the dynamic chain--the previous frame's address. */
+#define DYNAMIC_CHAIN_ADDRESS(FRAME) plus_constant (Pmode, (FRAME), 12)
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On the VAX, -(sp) pushes only the bytes of the operands. */
+#define PUSH_ROUNDING(BYTES) (BYTES)
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+
+/* On the VAX the return value is in R0 regardless. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx_REG (TYPE_MODE (VALTYPE), 0)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+/* On the VAX the return value is in R0 regardless. */
+
+#define LIBCALL_VALUE(MODE) gen_rtx_REG (MODE, 0)
+
+/* Define this if PCC uses the nonreentrant convention for returning
+ structure and union values. */
+
+#define PCC_STATIC_STRUCT_RETURN
+
+/* 1 if N is a possible register number for a function value.
+ On the VAX, R0 is the only register thus used. */
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
+
+/* 1 if N is a possible register number for function argument passing.
+ On the VAX, no registers are used in this way. */
+
+#define FUNCTION_ARG_REGNO_P(N) 0
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On the VAX, this is a single integer, which is a number of bytes
+ of arguments scanned so far. */
+
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+
+ On the VAX, the offset starts at 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ ((CUM) = 0)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define VAX_FUNCTION_PROFILER_NAME "mcount"
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ do \
+ { \
+ char label[256]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "LP", (LABELNO)); \
+ fprintf (FILE, "\tmovab "); \
+ assemble_name (FILE, label); \
+ asm_fprintf (FILE, ",%Rr0\n\tjsb %s\n", \
+ VAX_FUNCTION_PROFILER_NAME); \
+ } \
+ while (0)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+/* Store in the variable DEPTH the initial difference between the
+ frame pointer reg contents and the stack pointer reg contents,
+ as of the start of the function body. This depends on the layout
+ of the fixed parts of the stack frame and on how registers are saved.
+
+ On the VAX, FRAME_POINTER_REQUIRED is always 1, so the definition of this
+ macro doesn't matter. But it must be defined. */
+
+#define INITIAL_FRAME_POINTER_OFFSET(DEPTH) (DEPTH) = 0;
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 15
+
+/* Byte offset of return address in a stack frame. The "saved PC" field
+ is in element [4] when treating the frame as an array of longwords. */
+
+#define RETURN_ADDRESS_OFFSET (4 * UNITS_PER_WORD) /* 16 */
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame.
+ FRAMEADDR is already the frame pointer of the COUNT frame, so we
+ can ignore COUNT. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx_MEM (Pmode, plus_constant (Pmode, FRAME, \
+ RETURN_ADDRESS_OFFSET)) \
+ : (rtx) 0)
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in reginfo.c during register
+ allocation. */
+
+#define REGNO_OK_FOR_INDEX_P(regno) \
+ ((regno) < FIRST_PSEUDO_REGISTER || reg_renumber[regno] >= 0)
+#define REGNO_OK_FOR_BASE_P(regno) \
+ ((regno) < FIRST_PSEUDO_REGISTER || reg_renumber[regno] >= 0)
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* 1 if X is an rtx for a constant that is a valid address. */
+
+#define CONSTANT_ADDRESS_P(X) legitimate_constant_address_p (X)
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) 1
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) 1
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#endif
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE HImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+#define CASE_VECTOR_PC_RELATIVE 1
+
+/* Indicate that jump tables go in the text section. This is
+ necessary when compiling PIC code. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 8
+
+/* If a memory-to-memory move would take MOVE_RATIO or more simple
+ move-instruction pairs, we will do a movmem or libcall instead. */
+#define MOVE_RATIO(speed) ((speed) ? 6 : 3)
+#define CLEAR_RATIO(speed) ((speed) ? 6 : 2)
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Define if shifts truncate the shift count
+ which implies one can omit a sign-extension or zero-extension
+ of a shift count. */
+/* #define SHIFT_COUNT_TRUNCATED */
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode SImode
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE QImode
+
+/* Specify the cost of a branch insn; roughly the number of extra insns that
+ should be added to avoid a branch.
+
+ Branches are extremely cheap on the VAX while the shift insns often
+ used to replace branches can be expensive. */
+
+#define BRANCH_COST(speed_p, predictable_p) 0
+
+/* Tell final.c how to eliminate redundant test instructions. */
+
+/* Here we define machine-dependent flags and fields in cc_status
+ (see `conditions.h'). No extra ones are needed for the VAX. */
+
+/* Store in cc_status the expressions
+ that the condition codes will describe
+ after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+#define NOTICE_UPDATE_CC(EXP, INSN) \
+ vax_notice_update_cc ((EXP), (INSN))
+
+#define OUTPUT_JUMP(NORMAL, FLOAT, NO_OV) \
+ { if (cc_status.flags & CC_NO_OVERFLOW) \
+ return NO_OV; \
+ return NORMAL; \
+ }
+
+/* Control the assembler format that we output. */
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will end at
+ the end of the line. */
+
+#define ASM_COMMENT_START "#"
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON "#APP\n"
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* Output before read-only data. */
+
+#define TEXT_SECTION_ASM_OP "\t.text"
+
+/* Output before writable data. */
+
+#define DATA_SECTION_ASM_OP "\t.data"
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above).
+ The register names will be prefixed by REGISTER_PREFIX, if any. */
+
+#define REGISTER_PREFIX ""
+#define REGISTER_NAMES \
+ { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "ap", "fp", "sp", "pc", }
+
+/* This is BSD, so it wants DBX format. */
+
+#define DBX_DEBUGGING_INFO 1
+
+/* Do not break .stabs pseudos into continuations. */
+
+#define DBX_CONTIN_LENGTH 0
+
+/* This is the char to use for continuation (in case we need to turn
+ continuation back on). */
+
+#define DBX_CONTIN_CHAR '?'
+
+/* Don't use the `xsfoo;' construct in DBX output; this system
+ doesn't support it. */
+
+#define DBX_NO_XREFS
+
+/* Output the .stabs for a C `static' variable in the data section. */
+#define DBX_STATIC_STAB_DATA_SECTION
+
+/* VAX specific: which type character is used for type double? */
+
+#define ASM_DOUBLE_CHAR (TARGET_G_FLOAT ? 'g' : 'd')
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP ".globl "
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#define USER_LABEL_PREFIX "_"
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*%s%ld", PREFIX, (long)(NUM))
+
+/* This is how to output an insn to push a register on the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
+ fprintf (FILE, "\tpushl %s\n", reg_names[REGNO])
+
+/* This is how to output an insn to pop a register from the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
+ fprintf (FILE, "\tmovl (%s)+,%s\n", reg_names[STACK_POINTER_REGNUM], \
+ reg_names[REGNO])
+
+/* This is how to output an element of a case-vector that is absolute.
+ (The VAX does not use such vectors,
+ but we must define this macro anyway.) */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ do \
+ { \
+ char label[256]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", (VALUE));\
+ fprintf (FILE, "\t.long "); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "\n"); \
+ } \
+ while (0)
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ do \
+ { \
+ char label[256]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", (VALUE)); \
+ fprintf (FILE, "\t.word "); \
+ assemble_name (FILE, label); \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", (REL)); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "\n"); \
+ } \
+ while (0)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ fprintf (FILE, "\t.align %d\n", (LOG))
+
+/* This is how to output an assembler line
+ that says to advance the location counter by SIZE bytes. */
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.space %u\n", (int)(SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+ ( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (int)(ROUNDED)))
+
+/* This says how to output an assembler line
+ to define a local common symbol. */
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+ ( fputs (".lcomm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (int)(ROUNDED)))
+
+/* Print an instruction operand X on file FILE.
+ CODE is the code from the %-spec that requested printing this operand;
+ if `%z3' was used to print operand 3, then CODE is 'z'.
+
+VAX operand formatting codes:
+
+ letter print
+ c direct branch condition
+ C reverse branch condition
+ D 64-bit immediate operand
+ B the low 8 bits of the complement of a constant operand
+ H the low 16 bits of the complement of a constant operand
+ M a mask for the N highest bits of a word
+ N the complement of a constant integer operand
+ P constant operand plus 1
+ R 32 - constant operand
+ b the low 8 bits of a negated constant operand
+ h the low 16 bits of a negated constant operand
+ # 'd' or 'g' depending on whether dfloat or gfloat is used
+ | register prefix */
+
+/* The purpose of D is to get around a quirk or bug in VAX assembler
+ whereby -1 in a 64-bit immediate operand means 0x00000000ffffffff,
+ which is not a 64-bit minus one. As a workaround, we output negative
+ values in hex. */
+#if HOST_BITS_PER_WIDE_INT == 64
+# define NEG_HWI_PRINT_HEX16 HOST_WIDE_INT_PRINT_HEX
+#else
+# define NEG_HWI_PRINT_HEX16 "0xffffffff%08lx"
+#endif
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '#' || (CODE) == '|')
+
+#define PRINT_OPERAND(FILE, X, CODE) \
+ print_operand (FILE, X, CODE)
+
+/* Print a memory operand whose address is X, on file FILE.
+ This uses a function in output-vax.c. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ print_operand_address (FILE, ADDR)
+
+/* This is a blatent lie. However, it's good enough, since we don't
+ actually have any code whatsoever for which this isn't overridden
+ by the proper FDE definition. */
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, PC_REGNUM)
+
diff --git a/gcc-4.8/gcc/config/vax/vax.md b/gcc-4.8/gcc/config/vax/vax.md
new file mode 100644
index 000000000..eadde18ad
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/vax.md
@@ -0,0 +1,1662 @@
+;; Machine description for GNU compiler, VAX Version
+;; Copyright (C) 1987-2013 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;;- Instruction patterns. When multiple patterns apply,
+;;- the first one in the file is chosen.
+;;-
+;;- See file "rtl.def" for documentation on define_insn, match_*, et al.
+;;-
+;;- cpp macro #define NOTICE_UPDATE_CC in file tm.h handles condition code
+;;- updates for most instructions.
+
+;; UNSPEC_VOLATILE usage:
+
+(define_c_enum "unspecv" [
+ VUNSPEC_BLOCKAGE ; 'blockage' insn to prevent scheduling across an
+ ; insn in the code.
+ VUNSPEC_SYNC_ISTREAM ; sequence of insns to sync the I-stream
+ VUNSPEC_PEM ; 'procedure_entry_mask' insn.
+])
+
+(define_constants
+ [(VAX_AP_REGNUM 12) ; Register 12 contains the argument pointer
+ (VAX_FP_REGNUM 13) ; Register 13 contains the frame pointer
+ (VAX_SP_REGNUM 14) ; Register 14 contains the stack pointer
+ (VAX_PC_REGNUM 15) ; Register 15 contains the program counter
+ ]
+)
+
+;; Integer modes supported on VAX, with a mapping from machine mode
+;; to mnemonic suffix. DImode is always a special case.
+(define_mode_iterator VAXint [QI HI SI])
+(define_mode_iterator VAXintQH [QI HI])
+(define_mode_iterator VAXintQHSD [QI HI SI DI])
+(define_mode_attr isfx [(QI "b") (HI "w") (SI "l") (DI "q")])
+
+;; Similar for float modes supported on VAX.
+(define_mode_iterator VAXfp [SF DF])
+(define_mode_attr fsfx [(SF "f") (DF "%#")])
+
+;; Some output patterns want integer immediates with a prefix...
+(define_mode_attr iprefx [(QI "B") (HI "H") (SI "N")])
+
+;;
+(include "constraints.md")
+(include "predicates.md")
+
+(define_insn "*cmp<mode>"
+ [(set (cc0)
+ (compare (match_operand:VAXint 0 "nonimmediate_operand" "nrmT,nrmT")
+ (match_operand:VAXint 1 "general_operand" "I,nrmT")))]
+ ""
+ "@
+ tst<VAXint:isfx> %0
+ cmp<VAXint:isfx> %0,%1")
+
+(define_insn "*cmp<mode>"
+ [(set (cc0)
+ (compare (match_operand:VAXfp 0 "general_operand" "gF,gF")
+ (match_operand:VAXfp 1 "general_operand" "G,gF")))]
+ ""
+ "@
+ tst<VAXfp:fsfx> %0
+ cmp<VAXfp:fsfx> %0,%1")
+
+(define_insn "*bit<mode>"
+ [(set (cc0)
+ (compare (and:VAXint (match_operand:VAXint 0 "general_operand" "nrmT")
+ (match_operand:VAXint 1 "general_operand" "nrmT"))
+ (const_int 0)))]
+ ""
+ "bit<VAXint:isfx> %0,%1")
+
+;; The VAX has no sCOND insns. It does have add/subtract with carry
+;; which could be used to implement the sltu and sgeu patterns. However,
+;; to do this properly requires a complete rewrite of the compare insns
+;; to keep them together with the sltu/sgeu insns until after the
+;; reload pass is complete. The previous implementation didn't do this
+;; and has been deleted.
+
+
+(define_insn "mov<mode>"
+ [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g")
+ (match_operand:VAXfp 1 "general_operand" "G,gF"))]
+ ""
+ "@
+ clr<VAXfp:fsfx> %0
+ mov<VAXfp:fsfx> %1,%0")
+
+;; Some VAXen don't support this instruction.
+;;(define_insn "movti"
+;; [(set (match_operand:TI 0 "general_operand" "=g")
+;; (match_operand:TI 1 "general_operand" "g"))]
+;; ""
+;; "movh %1,%0")
+
+(define_insn "movdi"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
+ (match_operand:DI 1 "general_operand" "g"))]
+ ""
+ "* return vax_output_int_move (insn, operands, DImode);")
+
+;; The VAX move instructions have space-time tradeoffs. On a MicroVAX
+;; register-register mov instructions take 3 bytes and 2 CPU cycles. clrl
+;; takes 2 bytes and 3 cycles. mov from constant to register takes 2 cycles
+;; if the constant is smaller than 4 bytes, 3 cycles for a longword
+;; constant. movz, mneg, and mcom are as fast as mov, so movzwl is faster
+;; than movl for positive constants that fit in 16 bits but not 6 bits. cvt
+;; instructions take 4 cycles. inc takes 3 cycles. The machine description
+;; is willing to trade 1 byte for 1 cycle (clrl instead of movl $0; cvtwl
+;; instead of movl).
+
+;; Cycle counts for other models may vary (on a VAX 750 they are similar,
+;; but on a VAX 9000 most move and add instructions with one constant
+;; operand take 1 cycle).
+
+;; Loads of constants between 64 and 128 used to be done with
+;; "addl3 $63,#,dst" but this is slower than movzbl and takes as much space.
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+#ifdef NO_EXTERNAL_INDIRECT_ADDRESS
+ if (flag_pic
+ && GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (XEXP (XEXP (operands[1], 0), 0)))
+ {
+ rtx symbol_ref = XEXP (XEXP (operands[1], 0), 0);
+ rtx const_int = XEXP (XEXP (operands[1], 0), 1);
+ rtx temp = reload_in_progress ? operands[0] : gen_reg_rtx (Pmode);
+ emit_move_insn (temp, symbol_ref);
+ emit_move_insn (operands[0], gen_rtx_PLUS (SImode, temp, const_int));
+ DONE;
+ }
+#endif
+}")
+
+(define_insn "movsi_2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (match_operand:SI 1 "nonsymbolic_operand" "nrmT"))]
+ ""
+ "* return vax_output_int_move (insn, operands, SImode);")
+
+(define_insn "mov<mode>"
+ [(set (match_operand:VAXintQH 0 "nonimmediate_operand" "=g")
+ (match_operand:VAXintQH 1 "general_operand" "g"))]
+ ""
+ "* return vax_output_int_move (insn, operands, <MODE>mode);")
+
+(define_insn "movstricthi"
+ [(set (strict_low_part (match_operand:HI 0 "register_operand" "+g"))
+ (match_operand:HI 1 "general_operand" "g"))]
+ ""
+ "*
+{
+ if (CONST_INT_P (operands[1]))
+ {
+ int i = INTVAL (operands[1]);
+ if (i == 0)
+ return \"clrw %0\";
+ else if ((unsigned int)i < 64)
+ return \"movw %1,%0\";
+ else if ((unsigned int)~i < 64)
+ return \"mcomw %H1,%0\";
+ else if ((unsigned int)i < 256)
+ return \"movzbw %1,%0\";
+ }
+ return \"movw %1,%0\";
+}")
+
+(define_insn "movstrictqi"
+ [(set (strict_low_part (match_operand:QI 0 "register_operand" "+g"))
+ (match_operand:QI 1 "general_operand" "g"))]
+ ""
+ "*
+{
+ if (CONST_INT_P (operands[1]))
+ {
+ int i = INTVAL (operands[1]);
+ if (i == 0)
+ return \"clrb %0\";
+ else if ((unsigned int)~i < 64)
+ return \"mcomb %B1,%0\";
+ }
+ return \"movb %1,%0\";
+}")
+
+;; This is here to accept 4 arguments and pass the first 3 along
+;; to the movmemhi1 pattern that really does the work.
+(define_expand "movmemhi"
+ [(set (match_operand:BLK 0 "general_operand" "=g")
+ (match_operand:BLK 1 "general_operand" "g"))
+ (use (match_operand:HI 2 "general_operand" "g"))
+ (match_operand 3 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_movmemhi1 (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+;; The definition of this insn does not really explain what it does,
+;; but it should suffice
+;; that anything generated as this insn will be recognized as one
+;; and that it won't successfully combine with anything.
+
+(define_insn "movmemhi1"
+ [(set (match_operand:BLK 0 "memory_operand" "=o")
+ (match_operand:BLK 1 "memory_operand" "o"))
+ (use (match_operand:HI 2 "general_operand" "g"))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))
+ (clobber (reg:SI 4))
+ (clobber (reg:SI 5))]
+ ""
+ "movc3 %2,%1,%0")
+
+;; Extension and truncation insns.
+
+(define_insn "truncsiqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=g")
+ (truncate:QI (match_operand:SI 1 "nonimmediate_operand" "nrmT")))]
+ ""
+ "cvtlb %1,%0")
+
+(define_insn "truncsihi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=g")
+ (truncate:HI (match_operand:SI 1 "nonimmediate_operand" "nrmT")))]
+ ""
+ "cvtlw %1,%0")
+
+(define_insn "trunchiqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=g")
+ (truncate:QI (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtwb %1,%0")
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtwl %1,%0")
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=g")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtbw %1,%0")
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtbl %1,%0")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=g")
+ (float_extend:DF (match_operand:SF 1 "general_operand" "gF")))]
+ ""
+ "cvtf%# %1,%0")
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=g")
+ (float_truncate:SF (match_operand:DF 1 "general_operand" "gF")))]
+ ""
+ "cvt%#f %1,%0")
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "movzwl %1,%0")
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=g")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "movzbw %1,%0")
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "movzbl %1,%0")
+
+;; Fix-to-float conversion insns.
+
+(define_insn "float<VAXint:mode><VAXfp:mode>2"
+ [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g")
+ (float:VAXfp (match_operand:VAXint 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvt<VAXint:isfx><VAXfp:fsfx> %1,%0")
+
+;; Float-to-fix conversion insns.
+
+(define_insn "fix_trunc<VAXfp:mode><VAXint:mode>2"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g")
+ (fix:VAXint (match_operand:VAXfp 1 "general_operand" "gF")))]
+ ""
+ "cvt<VAXfp:fsfx><VAXint:isfx> %1,%0")
+
+(define_expand "fixuns_trunc<VAXfp:mode><VAXint:mode>2"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "")
+ (fix:VAXint (match_operand:VAXfp 1 "general_operand")))]
+ "")
+
+;;- All kinds of add instructions.
+
+(define_insn "add<mode>3"
+ [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g,g")
+ (plus:VAXfp (match_operand:VAXfp 1 "general_operand" "0,gF,gF")
+ (match_operand:VAXfp 2 "general_operand" "gF,0,gF")))]
+ ""
+ "@
+ add<VAXfp:fsfx>2 %2,%0
+ add<VAXfp:fsfx>2 %1,%0
+ add<VAXfp:fsfx>3 %1,%2,%0")
+
+(define_insn "pushlclsymreg"
+ [(set (match_operand:SI 0 "push_operand" "=g")
+ (plus:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "local_symbolic_operand" "i")))]
+ "flag_pic"
+ "pushab %a2[%1]")
+
+(define_insn "pushextsymreg"
+ [(set (match_operand:SI 0 "push_operand" "=g")
+ (plus:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "external_symbolic_operand" "i")))]
+ "flag_pic"
+ "pushab %a2[%1]")
+
+(define_insn "movlclsymreg"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (plus:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "local_symbolic_operand" "i")))]
+ "flag_pic"
+ "movab %a2[%1],%0")
+
+(define_insn "movextsymreg"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (plus:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "external_symbolic_operand" "i")))]
+ "flag_pic"
+ "movab %a2[%1],%0")
+
+(define_insn "add<mode>3"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g")
+ (plus:VAXint (match_operand:VAXint 1 "general_operand" "nrmT")
+ (match_operand:VAXint 2 "general_operand" "nrmT")))]
+ ""
+ "* return vax_output_int_add (insn, operands, <MODE>mode);")
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
+ (plus:DI (match_operand:DI 1 "general_operand" "g")
+ (match_operand:DI 2 "general_operand" "g")))]
+ "!reload_in_progress"
+ "vax_expand_addsub_di_operands (operands, PLUS); DONE;")
+
+(define_insn "adcdi3"
+ [(set (match_operand:DI 0 "nonimmediate_addsub_di_operand" "=Rr")
+ (plus:DI (match_operand:DI 1 "general_addsub_di_operand" "%0")
+ (match_operand:DI 2 "general_addsub_di_operand" "nRr")))]
+ "TARGET_QMATH"
+ "* return vax_output_int_add (insn, operands, DImode);")
+
+;; The add-with-carry (adwc) instruction only accepts two operands.
+(define_insn "adddi3_old"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro>,ro>")
+ (plus:DI (match_operand:DI 1 "general_operand" "%0,ro>")
+ (match_operand:DI 2 "general_operand" "Fsro,Fs")))]
+ "!TARGET_QMATH"
+ "* return vax_output_int_add (insn, operands, DImode);")
+
+;;- All kinds of subtract instructions.
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g")
+ (minus:VAXfp (match_operand:VAXfp 1 "general_operand" "0,gF")
+ (match_operand:VAXfp 2 "general_operand" "gF,gF")))]
+ ""
+ "@
+ sub<VAXfp:fsfx>2 %2,%0
+ sub<VAXfp:fsfx>3 %2,%1,%0")
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g")
+ (minus:VAXint (match_operand:VAXint 1 "general_operand" "0,nrmT")
+ (match_operand:VAXint 2 "general_operand" "nrmT,nrmT")))]
+ ""
+ "@
+ sub<VAXint:isfx>2 %2,%0
+ sub<VAXint:isfx>3 %2,%1,%0")
+
+(define_expand "subdi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
+ (minus:DI (match_operand:DI 1 "general_operand" "g")
+ (match_operand:DI 2 "general_operand" "g")))]
+ "!reload_in_progress"
+ "vax_expand_addsub_di_operands (operands, MINUS); DONE;")
+
+(define_insn "sbcdi3"
+ [(set (match_operand:DI 0 "nonimmediate_addsub_di_operand" "=Rr,=Rr")
+ (minus:DI (match_operand:DI 1 "general_addsub_di_operand" "0,I")
+ (match_operand:DI 2 "general_addsub_di_operand" "nRr,Rr")))]
+ "TARGET_QMATH"
+ "* return vax_output_int_subtract (insn, operands, DImode);")
+
+;; The subtract-with-carry (sbwc) instruction only takes two operands.
+(define_insn "subdi3_old"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=or>,or>")
+ (minus:DI (match_operand:DI 1 "general_operand" "0,or>")
+ (match_operand:DI 2 "general_operand" "Fsor,Fs")))]
+ "!TARGET_QMATH"
+ "* return vax_output_int_subtract (insn, operands, DImode);")
+
+;;- Multiply instructions.
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g,g")
+ (mult:VAXfp (match_operand:VAXfp 1 "general_operand" "0,gF,gF")
+ (match_operand:VAXfp 2 "general_operand" "gF,0,gF")))]
+ ""
+ "@
+ mul<VAXfp:fsfx>2 %2,%0
+ mul<VAXfp:fsfx>2 %1,%0
+ mul<VAXfp:fsfx>3 %1,%2,%0")
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g,g")
+ (mult:VAXint (match_operand:VAXint 1 "general_operand" "0,nrmT,nrmT")
+ (match_operand:VAXint 2 "general_operand" "nrmT,0,nrmT")))]
+ ""
+ "@
+ mul<VAXint:isfx>2 %2,%0
+ mul<VAXint:isfx>2 %1,%0
+ mul<VAXint:isfx>3 %1,%2,%0")
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "nrmT"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "nrmT"))))]
+ ""
+ "emul %1,%2,$0,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
+ (plus:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "nrmT"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "nrmT")))
+ (sign_extend:DI (match_operand:SI 3 "nonimmediate_operand" "g"))))]
+ ""
+ "emul %1,%2,%3,%0")
+
+;; 'F' constraint means type CONST_DOUBLE
+(define_insn ""
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
+ (plus:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "nrmT"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "nrmT")))
+ (match_operand:DI 3 "immediate_operand" "F")))]
+ "GET_CODE (operands[3]) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (operands[3]) == (CONST_DOUBLE_LOW (operands[3]) >> 31)"
+ "*
+{
+ if (CONST_DOUBLE_HIGH (operands[3]))
+ operands[3] = GEN_INT (CONST_DOUBLE_LOW (operands[3]));
+ return \"emul %1,%2,%3,%0\";
+}")
+
+;;- Divide instructions.
+
+(define_insn "div<mode>3"
+ [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g")
+ (div:VAXfp (match_operand:VAXfp 1 "general_operand" "0,gF")
+ (match_operand:VAXfp 2 "general_operand" "gF,gF")))]
+ ""
+ "@
+ div<VAXfp:fsfx>2 %2,%0
+ div<VAXfp:fsfx>3 %2,%1,%0")
+
+(define_insn "div<mode>3"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g")
+ (div:VAXint (match_operand:VAXint 1 "general_operand" "0,nrmT")
+ (match_operand:VAXint 2 "general_operand" "nrmT,nrmT")))]
+ ""
+ "@
+ div<VAXint:isfx>2 %2,%0
+ div<VAXint:isfx>3 %2,%1,%0")
+
+;This is left out because it is very slow;
+;we are better off programming around the "lack" of this insn.
+;(define_insn "divmoddisi4"
+; [(set (match_operand:SI 0 "general_operand" "=g")
+; (div:SI (match_operand:DI 1 "general_operand" "g")
+; (match_operand:SI 2 "general_operand" "g")))
+; (set (match_operand:SI 3 "general_operand" "=g")
+; (mod:SI (match_operand:DI 1 "general_operand" "g")
+; (match_operand:SI 2 "general_operand" "g")))]
+; ""
+; "ediv %2,%1,%0,%3")
+
+;; Bit-and on the VAX is done with a clear-bits insn.
+(define_expand "and<mode>3"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "")
+ (and:VAXint (not:VAXint (match_operand:VAXint 1 "general_operand" ""))
+ (match_operand:VAXint 2 "general_operand" "")))]
+ ""
+ "
+{
+ rtx op1 = operands[1];
+
+ /* If there is a constant argument, complement that one. */
+ if (CONST_INT_P (operands[2]) && ! CONST_INT_P (op1))
+ {
+ operands[1] = operands[2];
+ operands[2] = op1;
+ op1 = operands[1];
+ }
+
+ if (CONST_INT_P (op1))
+ operands[1] = GEN_INT (~INTVAL (op1));
+ else
+ operands[1] = expand_unop (<MODE>mode, one_cmpl_optab, op1, 0, 1);
+}")
+
+(define_insn "*and<mode>"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g")
+ (and:VAXint (not:VAXint (match_operand:VAXint 1 "general_operand" "nrmT,nrmT"))
+ (match_operand:VAXint 2 "general_operand" "0,nrmT")))]
+ ""
+ "@
+ bic<VAXint:isfx>2 %1,%0
+ bic<VAXint:isfx>3 %1,%2,%0")
+
+;; The following used to be needed because constant propagation can
+;; create them starting from the bic insn patterns above. This is no
+;; longer a problem. However, having these patterns allows optimization
+;; opportunities in combine.c.
+
+(define_insn "*and<mode>_const_int"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g")
+ (and:VAXint (match_operand:VAXint 1 "general_operand" "0,nrmT")
+ (match_operand:VAXint 2 "const_int_operand" "n,n")))]
+ ""
+ "@
+ bic<VAXint:isfx>2 %<VAXint:iprefx>2,%0
+ bic<VAXint:isfx>3 %<VAXint:iprefx>2,%1,%0")
+
+
+;;- Bit set instructions.
+
+(define_insn "ior<mode>3"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g,g")
+ (ior:VAXint (match_operand:VAXint 1 "general_operand" "0,nrmT,nrmT")
+ (match_operand:VAXint 2 "general_operand" "nrmT,0,nrmT")))]
+ ""
+ "@
+ bis<VAXint:isfx>2 %2,%0
+ bis<VAXint:isfx>2 %1,%0
+ bis<VAXint:isfx>3 %2,%1,%0")
+
+;;- xor instructions.
+
+(define_insn "xor<mode>3"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g,g")
+ (xor:VAXint (match_operand:VAXint 1 "general_operand" "0,nrmT,nrmT")
+ (match_operand:VAXint 2 "general_operand" "nrmT,0,nrmT")))]
+ ""
+ "@
+ xor<VAXint:isfx>2 %2,%0
+ xor<VAXint:isfx>2 %1,%0
+ xor<VAXint:isfx>3 %2,%1,%0")
+
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g")
+ (neg:VAXfp (match_operand:VAXfp 1 "general_operand" "gF")))]
+ ""
+ "mneg<VAXfp:fsfx> %1,%0")
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g")
+ (neg:VAXint (match_operand:VAXint 1 "general_operand" "nrmT")))]
+ ""
+ "mneg<VAXint:isfx> %1,%0")
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g")
+ (not:VAXint (match_operand:VAXint 1 "general_operand" "nrmT")))]
+ ""
+ "mcom<VAXint:isfx> %1,%0")
+
+
+;; Arithmetic right shift on the VAX works by negating the shift count,
+;; then emitting a right shift with the shift count negated. This means
+;; that all actual shift counts in the RTL will be positive. This
+;; prevents converting shifts to ZERO_EXTRACTs with negative positions,
+;; which isn't valid.
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ if (! CONST_INT_P(operands[2]))
+ operands[2] = gen_rtx_NEG (QImode, negate_rtx (QImode, operands[2]));
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "nrmT")
+ (match_operand:QI 2 "const_int_operand" "n")))]
+ ""
+ "ashl $%n2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "nrmT")
+ (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
+ ""
+ "ashl %2,%1,%0")
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (ashift:SI (match_operand:SI 1 "general_operand" "nrmT")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (operands[2] == const1_rtx && rtx_equal_p (operands[0], operands[1]))
+ return \"addl2 %0,%0\";
+ if (REG_P (operands[1]) && CONST_INT_P (operands[2]))
+ {
+ int i = INTVAL (operands[2]);
+ if (i == 1)
+ return \"addl3 %1,%1,%0\";
+ if (i == 2 && !optimize_size)
+ {
+ if (push_operand (operands[0], SImode))
+ return \"pushal 0[%1]\";
+ return \"moval 0[%1],%0\";
+ }
+ if (i == 3 && !optimize_size)
+ {
+ if (push_operand (operands[0], SImode))
+ return \"pushaq 0[%1]\";
+ return \"movaq 0[%1],%0\";
+ }
+ }
+ return \"ashl %2,%1,%0\";
+}")
+
+;; Arithmetic right shift on the VAX works by negating the shift count.
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (ashiftrt:DI (match_operand:DI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ operands[2] = gen_rtx_NEG (QImode, negate_rtx (QImode, operands[2]));
+}")
+
+(define_insn "ashldi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
+ (ashift:DI (match_operand:DI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "ashq %2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
+ (ashiftrt:DI (match_operand:DI 1 "general_operand" "g")
+ (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
+ ""
+ "ashq %2,%1,%0")
+
+;; We used to have expand_shift handle logical right shifts by using extzv,
+;; but this make it very difficult to do lshrdi3. Since the VAX is the
+;; only machine with this kludge, it's better to just do this with a
+;; define_expand and remove that case from expand_shift.
+
+(define_expand "lshrsi3"
+ [(set (match_dup 3)
+ (minus:QI (const_int 32)
+ (match_dup 4)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (match_dup 3)
+ (match_operand:SI 2 "register_operand" "g")))]
+ ""
+ "
+{
+ operands[3] = gen_reg_rtx (QImode);
+ operands[4] = gen_lowpart (QImode, operands[2]);
+}")
+
+;; Rotate right on the VAX works by negating the shift count.
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (rotatert:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ if (! CONST_INT_P (operands[2]))
+ operands[2] = gen_rtx_NEG (QImode, negate_rtx (QImode, operands[2]));
+}")
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (rotate:SI (match_operand:SI 1 "general_operand" "nrmT")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "rotl %2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (rotatert:SI (match_operand:SI 1 "general_operand" "nrmT")
+ (match_operand:QI 2 "const_int_operand" "n")))]
+ ""
+ "rotl %R2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (rotatert:SI (match_operand:SI 1 "general_operand" "nrmT")
+ (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
+ ""
+ "rotl %2,%1,%0")
+
+;This insn is probably slower than a multiply and an add.
+;(define_insn ""
+; [(set (match_operand:SI 0 "general_operand" "=g")
+; (mult:SI (plus:SI (match_operand:SI 1 "general_operand" "g")
+; (match_operand:SI 2 "general_operand" "g"))
+; (match_operand:SI 3 "general_operand" "g")))]
+; ""
+; "index %1,$0x80000000,$0x7fffffff,%3,%2,%0")
+
+;; Special cases of bit-field insns which we should
+;; recognize in preference to the general case.
+;; These handle aligned 8-bit and 16-bit fields,
+;; which can usually be done with move instructions.
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+ro")
+ (match_operand:QI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "general_operand" "g"))]
+ "(INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16)
+ && INTVAL (operands[2]) % INTVAL (operands[1]) == 0
+ && (REG_P (operands[0])
+ || ! mode_dependent_address_p (XEXP (operands[0], 0),
+ MEM_ADDR_SPACE (operands[0])))"
+ "*
+{
+ if (REG_P (operands[0]))
+ {
+ if (INTVAL (operands[2]) != 0)
+ return \"insv %3,%2,%1,%0\";
+ }
+ else
+ operands[0]
+ = adjust_address (operands[0],
+ INTVAL (operands[1]) == 8 ? QImode : HImode,
+ INTVAL (operands[2]) / 8);
+
+ CC_STATUS_INIT;
+ if (INTVAL (operands[1]) == 8)
+ return \"movb %3,%0\";
+ return \"movw %3,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=&g")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "ro")
+ (match_operand:QI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
+ && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
+ && (REG_P (operands[1])
+ || ! mode_dependent_address_p (XEXP (operands[1], 0),
+ MEM_ADDR_SPACE (operands[1])))"
+ "*
+{
+ if (REG_P (operands[1]))
+ {
+ if (INTVAL (operands[3]) != 0)
+ return \"extzv %3,%2,%1,%0\";
+ }
+ else
+ operands[1]
+ = adjust_address (operands[1],
+ INTVAL (operands[2]) == 8 ? QImode : HImode,
+ INTVAL (operands[3]) / 8);
+
+ if (INTVAL (operands[2]) == 8)
+ return \"movzbl %1,%0\";
+ return \"movzwl %1,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "ro")
+ (match_operand:QI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
+ && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
+ && (REG_P (operands[1])
+ || ! mode_dependent_address_p (XEXP (operands[1], 0),
+ MEM_ADDR_SPACE (operands[1])))"
+ "*
+{
+ if (REG_P (operands[1]))
+ {
+ if (INTVAL (operands[3]) != 0)
+ return \"extv %3,%2,%1,%0\";
+ }
+ else
+ operands[1]
+ = adjust_address (operands[1],
+ INTVAL (operands[2]) == 8 ? QImode : HImode,
+ INTVAL (operands[3]) / 8);
+
+ if (INTVAL (operands[2]) == 8)
+ return \"cvtbl %1,%0\";
+ return \"cvtwl %1,%0\";
+}")
+
+;; Register-only SImode cases of bit-field insns.
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (sign_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "nrmT"))
+ (match_operand:SI 3 "general_operand" "nrmT")))]
+ ""
+ "cmpv %2,%1,%0,%3")
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "nrmT"))
+ (match_operand:SI 3 "general_operand" "nrmT")))]
+ ""
+ "cmpzv %2,%1,%0,%3")
+
+;; When the field position and size are constant and the destination
+;; is a register, extv and extzv are much slower than a rotate followed
+;; by a bicl or sign extension. Because we might end up choosing ext[z]v
+;; anyway, we can't allow immediate values for the primary source operand.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "ro")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "nrmT")))]
+ ""
+ "*
+{
+ if (! CONST_INT_P (operands[3]) || ! CONST_INT_P (operands[2])
+ || ! REG_P (operands[0])
+ || (INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16))
+ return \"extv %3,%2,%1,%0\";
+ if (INTVAL (operands[2]) == 8)
+ return \"rotl %R3,%1,%0\;cvtbl %0,%0\";
+ return \"rotl %R3,%1,%0\;cvtwl %0,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "ro")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "nrmT")))]
+ ""
+ "*
+{
+ if (! CONST_INT_P (operands[3]) || ! CONST_INT_P (operands[2])
+ || ! REG_P (operands[0]))
+ return \"extzv %3,%2,%1,%0\";
+ if (INTVAL (operands[2]) == 8)
+ return \"rotl %R3,%1,%0\;movzbl %0,%0\";
+ if (INTVAL (operands[2]) == 16)
+ return \"rotl %R3,%1,%0\;movzwl %0,%0\";
+ if (INTVAL (operands[3]) & 31)
+ return \"rotl %R3,%1,%0\;bicl2 %M2,%0\";
+ if (rtx_equal_p (operands[0], operands[1]))
+ return \"bicl2 %M2,%0\";
+ return \"bicl3 %M2,%1,%0\";
+}")
+
+;; Non-register cases.
+;; nonimmediate_operand is used to make sure that mode-ambiguous cases
+;; don't match these (and therefore match the cases above instead).
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (sign_extract:SI (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "nrmT"))
+ (match_operand:SI 3 "general_operand" "nrmT")))]
+ ""
+ "cmpv %2,%1,%0,%3")
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (zero_extract:SI (match_operand:QI 0 "nonimmediate_operand" "rm")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "nrmT"))
+ (match_operand:SI 3 "general_operand" "nrmT")))]
+ ""
+ "cmpzv %2,%1,%0,%3")
+
+(define_insn "extv"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (sign_extract:SI (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "nrmT")))]
+ ""
+ "*
+{
+ if (!REG_P (operands[0]) || !CONST_INT_P (operands[2])
+ || !CONST_INT_P (operands[3])
+ || (INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16)
+ || INTVAL (operands[2]) + INTVAL (operands[3]) > 32
+ || side_effects_p (operands[1])
+ || (MEM_P (operands[1])
+ && mode_dependent_address_p (XEXP (operands[1], 0),
+ MEM_ADDR_SPACE (operands[1]))))
+ return \"extv %3,%2,%1,%0\";
+ if (INTVAL (operands[2]) == 8)
+ return \"rotl %R3,%1,%0\;cvtbl %0,%0\";
+ return \"rotl %R3,%1,%0\;cvtwl %0,%0\";
+}")
+
+(define_expand "extzv"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (zero_extract:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:QI 2 "general_operand" "")
+ (match_operand:SI 3 "general_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (zero_extract:SI (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "nrmT")))]
+ ""
+ "*
+{
+ if (!REG_P (operands[0]) || !CONST_INT_P (operands[2])
+ || !CONST_INT_P (operands[3])
+ || INTVAL (operands[2]) + INTVAL (operands[3]) > 32
+ || side_effects_p (operands[1])
+ || (MEM_P (operands[1])
+ && mode_dependent_address_p (XEXP (operands[1], 0),
+ MEM_ADDR_SPACE (operands[1]))))
+ return \"extzv %3,%2,%1,%0\";
+ if (INTVAL (operands[2]) == 8)
+ return \"rotl %R3,%1,%0\;movzbl %0,%0\";
+ if (INTVAL (operands[2]) == 16)
+ return \"rotl %R3,%1,%0\;movzwl %0,%0\";
+ if (MEM_P (operands[1])
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && REG_P (XEXP (XEXP (operands[1], 0), 0))
+ && CONST_INT_P (XEXP (XEXP (operands[1], 0), 1))
+ && CONST_INT_P (operands[2])
+ && CONST_INT_P (operands[3]))
+ {
+ HOST_WIDE_INT o = INTVAL (XEXP (XEXP (operands[1], 0), 1));
+ HOST_WIDE_INT l = INTVAL (operands[2]);
+ HOST_WIDE_INT v = INTVAL (operands[3]);
+ if ((o & 3) && (o & 3) * 8 + v + l <= 32)
+ {
+ rtx tmp;
+ tmp = XEXP (XEXP (operands[1], 0), 0);
+ if (o & ~3)
+ tmp = gen_rtx_PLUS (SImode, tmp, GEN_INT (o & ~3));
+ operands[1] = gen_rtx_MEM (QImode, tmp);
+ operands[3] = GEN_INT (v + (o & 3) * 8);
+ }
+ if (optimize_size)
+ return \"extzv %3,%2,%1,%0\";
+ }
+ return \"rotl %R3,%1,%0\;bicl2 %M2,%0\";
+}")
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" ""))
+ (match_operand:SI 3 "general_operand" ""))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+g")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "nrmT"))
+ (match_operand:SI 3 "general_operand" "nrmT"))]
+ ""
+ "*
+{
+ if (MEM_P (operands[0])
+ && GET_CODE (XEXP (operands[0], 0)) == PLUS
+ && REG_P (XEXP (XEXP (operands[0], 0), 0))
+ && CONST_INT_P (XEXP (XEXP (operands[0], 0), 1))
+ && CONST_INT_P (operands[1])
+ && CONST_INT_P (operands[2]))
+ {
+ HOST_WIDE_INT o = INTVAL (XEXP (XEXP (operands[0], 0), 1));
+ HOST_WIDE_INT v = INTVAL (operands[2]);
+ HOST_WIDE_INT l = INTVAL (operands[1]);
+ if ((o & 3) && (o & 3) * 8 + v + l <= 32)
+ {
+ rtx tmp;
+ tmp = XEXP (XEXP (operands[0], 0), 0);
+ if (o & ~3)
+ tmp = gen_rtx_PLUS (SImode, tmp, GEN_INT (o & ~3));
+ operands[0] = gen_rtx_MEM (QImode, tmp);
+ operands[2] = GEN_INT (v + (o & 3) * 8);
+ }
+ }
+ return \"insv %3,%2,%1,%0\";
+}")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "nrmT"))
+ (match_operand:SI 3 "general_operand" "nrmT"))]
+ ""
+ "insv %3,%2,%1,%0")
+
+;; Unconditional jump
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jbr %l0")
+
+;; Conditional jumps
+
+(define_expand "cbranch<mode>4"
+ [(set (cc0)
+ (compare (match_operand:VAXint 1 "nonimmediate_operand" "")
+ (match_operand:VAXint 2 "general_operand" "")))
+ (set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator" [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "")
+
+(define_expand "cbranch<mode>4"
+ [(set (cc0)
+ (compare (match_operand:VAXfp 1 "general_operand" "")
+ (match_operand:VAXfp 2 "general_operand" "")))
+ (set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator" [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "")
+
+(define_insn "*branch"
+ [(set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "j%c0 %l1")
+
+;; Recognize reversed jumps.
+(define_insn "*branch_reversed"
+ [(set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 1 "" ""))))]
+ ""
+ "j%C0 %l1") ; %C0 negates condition
+
+;; Recognize jbs, jlbs, jbc and jlbc instructions. Note that the operand
+;; of jlbs and jlbc insns are SImode in the hardware. However, if it is
+;; memory, we use QImode in the insn. So we can't use those instructions
+;; for mode-dependent addresses.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:QI 0 "memory_operand" "Q,g")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,nrmT"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbs %0,%l2
+ jbs %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:QI 0 "memory_operand" "Q,g")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,nrmT"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbc %0,%l2
+ jbc %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r,r")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,nrmT"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbs %0,%l2
+ jbs %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r,r")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,nrmT"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbc %0,%l2
+ jbc %1,%0,%l2")
+
+;; Subtract-and-jump and Add-and-jump insns.
+;; These are not used when output is for the Unix assembler
+;; because it does not know how to modify them to reach far.
+
+;; Normal sob insns.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (gt (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g")
+ (const_int -1))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ "!TARGET_UNIX_ASM"
+ "jsobgtr %0,%l1")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ge (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g")
+ (const_int -1))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ "!TARGET_UNIX_ASM"
+ "jsobgeq %0,%l1")
+
+;; Normal aob insns. Define a version for when operands[1] is a constant.
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (lt (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g")
+ (const_int 1))
+ (match_operand:SI 1 "general_operand" "nrmT"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM"
+ "jaoblss %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (lt (match_operand:SI 0 "nonimmediate_operand" "+g")
+ (match_operand:SI 1 "general_operand" "nrmT"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM && CONST_INT_P (operands[1])"
+ "jaoblss %P1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (le (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g")
+ (const_int 1))
+ (match_operand:SI 1 "general_operand" "nrmT"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM"
+ "jaobleq %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (le (match_operand:SI 0 "nonimmediate_operand" "+g")
+ (match_operand:SI 1 "general_operand" "nrmT"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM && CONST_INT_P (operands[1])"
+ "jaobleq %P1,%0,%l2")
+
+;; Something like a sob insn, but compares against -1.
+;; This finds `while (foo--)' which was changed to `while (--foo != -1)'.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (match_operand:SI 0 "nonimmediate_operand" "+g")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ ""
+ "decl %0\;jgequ %l1")
+
+(define_expand "call_pop"
+ [(parallel [(call (match_operand:QI 0 "memory_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))
+ (set (reg:SI VAX_SP_REGNUM)
+ (plus:SI (reg:SI VAX_SP_REGNUM)
+ (match_operand:SI 3 "immediate_operand" "")))])]
+ ""
+{
+ gcc_assert (INTVAL (operands[3]) <= 255 * 4 && INTVAL (operands[3]) % 4 == 0);
+
+ /* Operand 1 is the number of bytes to be popped by DW_CFA_GNU_args_size
+ during EH unwinding. We must include the argument count pushed by
+ the calls instruction. */
+ operands[1] = GEN_INT (INTVAL (operands[3]) + 4);
+})
+
+(define_insn "*call_pop"
+ [(call (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:SI 1 "const_int_operand" "n"))
+ (set (reg:SI VAX_SP_REGNUM) (plus:SI (reg:SI VAX_SP_REGNUM)
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ ""
+{
+ operands[1] = GEN_INT ((INTVAL (operands[1]) - 4) / 4);
+ return "calls %1,%0";
+})
+
+(define_expand "call_value_pop"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "memory_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (reg:SI VAX_SP_REGNUM)
+ (plus:SI (reg:SI VAX_SP_REGNUM)
+ (match_operand:SI 4 "immediate_operand" "")))])]
+ ""
+{
+ gcc_assert (INTVAL (operands[4]) <= 255 * 4 && INTVAL (operands[4]) % 4 == 0);
+
+ /* Operand 2 is the number of bytes to be popped by DW_CFA_GNU_args_size
+ during EH unwinding. We must include the argument count pushed by
+ the calls instruction. */
+ operands[2] = GEN_INT (INTVAL (operands[4]) + 4);
+})
+
+(define_insn "*call_value_pop"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (reg:SI VAX_SP_REGNUM) (plus:SI (reg:SI VAX_SP_REGNUM)
+ (match_operand:SI 3 "immediate_operand" "i")))]
+ ""
+ "*
+{
+ operands[2] = GEN_INT ((INTVAL (operands[2]) - 4) / 4);
+ return \"calls %2,%1\";
+}")
+
+(define_expand "call"
+ [(call (match_operand:QI 0 "memory_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ ""
+ "
+{
+ /* Operand 1 is the number of bytes to be popped by DW_CFA_GNU_args_size
+ during EH unwinding. We must include the argument count pushed by
+ the calls instruction. */
+ operands[1] = GEN_INT (INTVAL (operands[1]) + 4);
+}")
+
+(define_insn "*call"
+ [(call (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ ""
+ "calls $0,%0")
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "memory_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ ""
+ "
+{
+ /* Operand 2 is the number of bytes to be popped by DW_CFA_GNU_args_size
+ during EH unwinding. We must include the argument count pushed by
+ the calls instruction. */
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 4);
+}")
+
+(define_insn "*call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ ""
+ "calls $0,%1")
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call_pop (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)]
+ ""
+ "")
+
+(define_insn "procedure_entry_mask"
+ [(unspec_volatile [(match_operand 0 "const_int_operand")] VUNSPEC_PEM)]
+ ""
+ ".word %x0")
+
+(define_insn "return"
+ [(return)]
+ ""
+ "ret")
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+{
+ vax_expand_prologue ();
+ DONE;
+})
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+ "
+{
+ emit_jump_insn (gen_return ());
+ DONE;
+}")
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop")
+
+;; This had a wider constraint once, and it had trouble.
+;; If you are tempted to try `g', please don't--it's not worth
+;; the risk we will reopen the same bug.
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "jmp (%0)")
+
+;; This is here to accept 5 arguments (as passed by expand_end_case)
+;; and pass the first 4 along to the casesi1 pattern that really does
+;; the actual casesi work. We emit a jump here to the default label
+;; _before_ the casesi so that we can be sure that the casesi never
+;; drops through.
+;; This is suboptimal perhaps, but so is much of the rest of this
+;; machine description. For what it's worth, HPPA uses the same trick.
+;;
+;; operand 0 is index
+;; operand 1 is the minimum bound (a const_int)
+;; operand 2 is the maximum bound - minimum bound + 1 (also a const_int)
+;; operand 3 is CODE_LABEL for the table;
+;; operand 4 is the CODE_LABEL to go to if index out of range (ie. default).
+;;
+;; We emit:
+;; i = index - minimum_bound
+;; if (i > (maximum_bound - minimum_bound + 1) goto default;
+;; casesi (i, 0, table);
+;;
+(define_expand "casesi"
+ [(match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")
+ (match_operand 3 "" "")
+ (match_operand 4 "" "")]
+ ""
+{
+ rtx test;
+
+ /* i = index - minimum_bound;
+ But only if the lower bound is not already zero. */
+ if (operands[1] != const0_rtx)
+ {
+ rtx index = gen_reg_rtx (SImode);
+ emit_insn (gen_addsi3 (index,
+ operands[0],
+ GEN_INT (-INTVAL (operands[1]))));
+ operands[0] = index;
+ }
+
+ /* if (i > (maximum_bound - minimum_bound + 1)) goto default; */
+ test = gen_rtx_fmt_ee (GTU, VOIDmode, operands[0], operands[2]);
+ emit_jump_insn (gen_cbranchsi4 (test, operands[0], operands[2], operands[4]));
+
+ /* casesi (i, 0, table); */
+ emit_jump_insn (gen_casesi1 (operands[0], operands[2], operands[3]));
+ DONE;
+})
+
+;; This insn is a bit of a lier. It actually falls through if no case
+;; matches. But, we prevent that from ever happening by emitting a jump
+;; before this, see the define_expand above.
+(define_insn "casesi1"
+ [(match_operand:SI 1 "const_int_operand" "n")
+ (set (pc)
+ (plus:SI (sign_extend:SI
+ (mem:HI (plus:SI (mult:SI (match_operand:SI 0 "general_operand" "nrmT")
+ (const_int 2))
+ (pc))))
+ (label_ref:SI (match_operand 2 "" ""))))]
+ ""
+ "casel %0,$0,%1")
+
+(define_insn "pushextsym"
+ [(set (match_operand:SI 0 "push_operand" "=g")
+ (match_operand:SI 1 "external_symbolic_operand" "i"))]
+ ""
+ "pushab %a1")
+
+(define_insn "movextsym"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (match_operand:SI 1 "external_symbolic_operand" "i"))]
+ ""
+ "movab %a1,%0")
+
+(define_insn "pushlclsym"
+ [(set (match_operand:SI 0 "push_operand" "=g")
+ (match_operand:SI 1 "local_symbolic_operand" "i"))]
+ ""
+ "pushab %a1")
+
+(define_insn "movlclsym"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (match_operand:SI 1 "local_symbolic_operand" "i"))]
+ ""
+ "movab %a1,%0")
+
+;;- load or push effective address
+;; These come after the move and add/sub patterns
+;; because we don't want pushl $1 turned into pushad 1.
+;; or addl3 r1,r2,r3 turned into movab 0(r1)[r2],r3.
+
+;; It does not work to use constraints to distinguish pushes from moves,
+;; because < matches any autodecrement, not just a push.
+
+(define_insn "pushaddr<mode>"
+ [(set (match_operand:SI 0 "push_operand" "=g")
+ (match_operand:VAXintQHSD 1 "address_operand" "p"))]
+ ""
+ "pusha<VAXintQHSD:isfx> %a1")
+
+(define_insn "movaddr<mode>"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (match_operand:VAXintQHSD 1 "address_operand" "p"))]
+ ""
+ "mova<VAXintQHSD:isfx> %a1,%0")
+
+(define_insn "pushaddr<mode>"
+ [(set (match_operand:SI 0 "push_operand" "=g")
+ (match_operand:VAXfp 1 "address_operand" "p"))]
+ ""
+ "pusha<VAXfp:fsfx> %a1")
+
+(define_insn "movaddr<mode>"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
+ (match_operand:VAXfp 1 "address_operand" "p"))]
+ ""
+ "mova<VAXfp:fsfx> %a1,%0")
+
+;; These used to be peepholes, but it is more straightforward to do them
+;; as single insns. However, we must force the output to be a register
+;; if it is not an offsettable address so that we know that we can assign
+;; to it twice.
+
+;; If we had a good way of evaluating the relative costs, these could be
+;; machine-independent.
+
+;; Optimize extzv ...,z; andl2 ...,z
+;; or ashl ...,z; andl2 ...,z
+;; with other operands constant. This is what the combiner converts the
+;; above sequences to before attempting to recognize the new insn.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro")
+ (and:SI (ashiftrt:SI (match_operand:SI 1 "general_operand" "nrmT")
+ (match_operand:QI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "(INTVAL (operands[3]) & ~((1 << (32 - INTVAL (operands[2]))) - 1)) == 0"
+ "*
+{
+ unsigned long mask1 = INTVAL (operands[3]);
+ unsigned long mask2 = (1 << (32 - INTVAL (operands[2]))) - 1;
+
+ if ((mask1 & mask2) != mask1)
+ operands[3] = GEN_INT (mask1 & mask2);
+
+ return \"rotl %R2,%1,%0\;bicl2 %N3,%0\";
+}")
+
+;; left-shift and mask
+;; The only case where `ashl' is better is if the mask only turns off
+;; bits that the ashl would anyways, in which case it should have been
+;; optimized away.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro")
+ (and:SI (ashift:SI (match_operand:SI 1 "general_operand" "nrmT")
+ (match_operand:QI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ ""
+ "*
+{
+ operands[3]
+ = GEN_INT (INTVAL (operands[3]) & ~((1 << INTVAL (operands[2])) - 1));
+ return \"rotl %2,%1,%0\;bicl2 %N3,%0\";
+}")
+
+;; Instruction sequence to sync the VAX instruction stream.
+(define_insn "sync_istream"
+ [(unspec_volatile [(const_int 0)] VUNSPEC_SYNC_ISTREAM)]
+ ""
+ "movpsl -(%|sp)\;pushal 1(%|pc)\;rei")
+
+(define_expand "nonlocal_goto"
+ [(use (match_operand 0 "general_operand" ""))
+ (use (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "general_operand" ""))
+ (use (match_operand 3 "general_operand" ""))]
+ ""
+{
+ rtx lab = operands[1];
+ rtx stack = operands[2];
+ rtx fp = operands[3];
+
+ emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
+ emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
+
+ emit_move_insn (hard_frame_pointer_rtx, fp);
+ emit_stack_restore (SAVE_NONLOCAL, stack);
+
+ emit_use (hard_frame_pointer_rtx);
+ emit_use (stack_pointer_rtx);
+
+ /* We'll convert this to direct jump via a peephole optimization. */
+ emit_indirect_jump (copy_to_reg (lab));
+ emit_barrier ();
+ DONE;
+})
diff --git a/gcc-4.8/gcc/config/vax/vax.opt b/gcc-4.8/gcc/config/vax/vax.opt
new file mode 100644
index 000000000..25f81f0b3
--- /dev/null
+++ b/gcc-4.8/gcc/config/vax/vax.opt
@@ -0,0 +1,51 @@
+; Options for the VAX port of the compiler.
+
+; Copyright (C) 2005-2013 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+md
+Target RejectNegative InverseMask(G_FLOAT)
+Target DFLOAT double precision code
+
+md-float
+Target RejectNegative InverseMask(G_FLOAT)
+Target DFLOAT double precision code
+
+mg
+Target RejectNegative Mask(G_FLOAT)
+Generate GFLOAT double precision code
+
+mg-float
+Target RejectNegative Mask(G_FLOAT)
+Generate GFLOAT double precision code
+
+mgnu
+Target RejectNegative InverseMask(UNIX_ASM)
+Generate code for GNU assembler (gas)
+
+munix
+Target RejectNegative Mask(UNIX_ASM)
+Generate code for UNIX assembler
+
+mvaxc-alignment
+Target RejectNegative Mask(VAXC_ALIGNMENT)
+Use VAXC structure conventions
+
+mqmath
+Target Mask(QMATH)
+Use new adddi3/subdi3 patterns