aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/gcc')
-rw-r--r--gcc-4.9/gcc/config.gcc69
-rw-r--r--gcc-4.9/gcc/config.in6
-rw-r--r--gcc-4.9/gcc/config/mips/constraints.md70
-rw-r--r--gcc-4.9/gcc/config/mips/linux.h5
-rw-r--r--gcc-4.9/gcc/config/mips/linux64.h15
-rw-r--r--gcc-4.9/gcc/config/mips/loongson.md30
-rw-r--r--gcc-4.9/gcc/config/mips/mips-cpus.def15
-rw-r--r--gcc-4.9/gcc/config/mips/mips-ftypes.def224
-rw-r--r--gcc-4.9/gcc/config/mips/mips-modes.def17
-rw-r--r--gcc-4.9/gcc/config/mips/mips-msa.md2905
-rw-r--r--gcc-4.9/gcc/config/mips/mips-opts.h7
-rw-r--r--gcc-4.9/gcc/config/mips/mips-protos.h40
-rw-r--r--gcc-4.9/gcc/config/mips/mips-tables.opt421
-rw-r--r--gcc-4.9/gcc/config/mips/mips.c2910
-rw-r--r--gcc-4.9/gcc/config/mips/mips.h380
-rw-r--r--gcc-4.9/gcc/config/mips/mips.md452
-rw-r--r--gcc-4.9/gcc/config/mips/mips.opt43
-rw-r--r--gcc-4.9/gcc/config/mips/msa.h1113
-rw-r--r--gcc-4.9/gcc/config/mips/mti-elf.h3
-rw-r--r--gcc-4.9/gcc/config/mips/mti-linux.h14
-rw-r--r--gcc-4.9/gcc/config/mips/netbsd.h25
-rw-r--r--gcc-4.9/gcc/config/mips/p5600.md304
-rw-r--r--gcc-4.9/gcc/config/mips/predicates.md247
-rw-r--r--gcc-4.9/gcc/config/mips/t-img-elf36
-rw-r--r--gcc-4.9/gcc/config/mips/t-img-linux30
-rw-r--r--gcc-4.9/gcc/config/mips/t-isa32648
-rw-r--r--gcc-4.9/gcc/config/mips/t-linux-android4
-rw-r--r--gcc-4.9/gcc/config/mips/t-mti-elf16
-rw-r--r--gcc-4.9/gcc/config/mips/t-mti-linux18
-rw-r--r--gcc-4.9/gcc/config/mips/t-sde2
-rw-r--r--gcc-4.9/gcc/config/mips/t-sdemtk1
-rwxr-xr-xgcc-4.9/gcc/configure35
-rw-r--r--gcc-4.9/gcc/configure.ac11
-rw-r--r--gcc-4.9/gcc/doc/extend.texi783
-rw-r--r--gcc-4.9/gcc/doc/invoke.texi55
-rw-r--r--gcc-4.9/gcc/doc/md.texi6
-rw-r--r--gcc-4.9/gcc/dwarf2cfi.c5
-rw-r--r--gcc-4.9/gcc/lra-constraints.c207
-rw-r--r--gcc-4.9/gcc/prefix.c1
-rw-r--r--gcc-4.9/gcc/regcprop.c7
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.dg/vect/tree-vect.h2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/args-1.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/args-3.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/branch-cost-2.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-1.c21
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-2.c21
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-3.c23
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-4.c22
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-5.c21
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-4.c32
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-5.c32
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-6.c32
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/dmult-1.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/fpcmp-1.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/fpcmp-2.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/madd-3.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/madd-9.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/maddu-3.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/mips-ps-type-2.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/mips.exp103
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-1.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-2.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-3.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-1.c13
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-2.c13
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-3.c12
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/msa-type.c254
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/msa.c151
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/msub-3.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/msubu-3.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/neg-abs-2.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-1.c13
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-2.c10
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-3.c10
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-4.c15
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-5.c15
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-6.c13
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-7.c10
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/pr37362.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/timode-1.c2
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/umips-store16-1.c30
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/umips-store16-2.c22
-rw-r--r--gcc-4.9/gcc/testsuite/gcc.target/mips/unaligned-1.c2
-rw-r--r--gcc-4.9/gcc/testsuite/lib/target-supports.exp191
-rw-r--r--gcc-4.9/gcc/testsuite/lib/target-supports.exp.orig5791
85 files changed, 16709 insertions, 741 deletions
diff --git a/gcc-4.9/gcc/config.gcc b/gcc-4.9/gcc/config.gcc
index f5fbb7d70..41c9d81dc 100644
--- a/gcc-4.9/gcc/config.gcc
+++ b/gcc-4.9/gcc/config.gcc
@@ -422,7 +422,7 @@ microblaze*-*-*)
mips*-*-*)
cpu_type=mips
need_64bit_hwint=yes
- extra_headers="loongson.h"
+ extra_headers="loongson.h msa.h"
extra_options="${extra_options} g.opt mips/mips-tables.opt"
;;
nds32*)
@@ -1950,6 +1950,14 @@ mips*-*-netbsd*) # NetBSD/mips, either endian.
tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
;;
+mips*-img-linux*)
+ tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/gnu-user64.h mips/linux64.h mips/linux-common.h mips/mti-linux.h"
+ extra_options="${extra_options} linux-android.opt"
+ tmake_file="${tmake_file} mips/t-img-linux"
+ tm_defines="${tm_defines} MIPS_ISA_DEFAULT=37 MIPS_ABI_DEFAULT=ABI_32"
+ gnu_ld=yes
+ gas=yes
+ ;;
mips*-mti-linux*)
tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/gnu-user64.h mips/linux64.h mips/linux-common.h mips/mti-linux.h"
extra_options="${extra_options} linux-android.opt"
@@ -1961,7 +1969,6 @@ mips*-mti-linux*)
mips64*-*-linux* | mipsisa64*-*-linux*)
tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/gnu-user64.h mips/linux64.h mips/linux-common.h"
extra_options="${extra_options} linux-android.opt"
- tmake_file="${tmake_file} mips/t-linux64"
case ${target} in
*android*)
# Default to ABI_64 for MIPS64 Android
@@ -1969,6 +1976,7 @@ mips64*-*-linux* | mipsisa64*-*-linux*)
;;
*)
tm_defines="${tm_defines} MIPS_ABI_DEFAULT=ABI_N32"
+ tmake_file="${tmake_file} mips/t-linux64"
;;
esac
case ${target} in
@@ -1980,6 +1988,9 @@ mips64*-*-linux* | mipsisa64*-*-linux*)
tm_defines="${tm_defines} MIPS_CPU_STRING_DEFAULT=\\\"octeon\\\""
target_cpu_default=MASK_SOFT_FLOAT_ABI
;;
+ mipsisa64r6*-*-linux*)
+ tm_defines="${tm_defines} MIPS_ISA_DEFAULT=69"
+ ;;
mipsisa64r2*-*-linux*)
tm_defines="${tm_defines} MIPS_ISA_DEFAULT=65"
;;
@@ -1996,6 +2007,9 @@ mips*-*-linux*) # Linux MIPS, either endian.
fi
tm_file="${tm_file} mips/linux-common.h"
case ${target} in
+ mipsisa32r6*)
+ tm_defines="${tm_defines} MIPS_ISA_DEFAULT=37"
+ ;;
mipsisa32r2*)
tm_defines="${tm_defines} MIPS_ISA_DEFAULT=33"
;;
@@ -2014,6 +2028,11 @@ mips*-mti-elf*)
tmake_file="mips/t-mti-elf"
tm_defines="${tm_defines} MIPS_ISA_DEFAULT=33 MIPS_ABI_DEFAULT=ABI_32"
;;
+mips*-img-elf*)
+ tm_file="elfos.h newlib-stdint.h ${tm_file} mips/elf.h mips/n32-elf.h mips/sde.h mips/mti-elf.h"
+ tmake_file="mips/t-img-elf"
+ tm_defines="${tm_defines} MIPS_ISA_DEFAULT=37 MIPS_ABI_DEFAULT=ABI_32"
+ ;;
mips*-sde-elf*)
tm_file="elfos.h newlib-stdint.h ${tm_file} mips/elf.h mips/n32-elf.h mips/sde.h"
tmake_file="mips/t-sde"
@@ -2034,12 +2053,18 @@ mips*-sde-elf*)
;;
esac
case ${target} in
+ mipsisa32r6*)
+ tm_defines="MIPS_ISA_DEFAULT=37 MIPS_ABI_DEFAULT=ABI_32"
+ ;;
mipsisa32r2*)
tm_defines="MIPS_ISA_DEFAULT=33 MIPS_ABI_DEFAULT=ABI_32"
;;
mipsisa32*)
tm_defines="MIPS_ISA_DEFAULT=32 MIPS_ABI_DEFAULT=ABI_32"
;;
+ mipsisa64r6*)
+ tm_defines="MIPS_ISA_DEFAULT=69 MIPS_ABI_DEFAULT=ABI_N32"
+ ;;
mipsisa64r2*)
tm_defines="MIPS_ISA_DEFAULT=65 MIPS_ABI_DEFAULT=ABI_N32"
;;
@@ -2050,17 +2075,25 @@ mips*-sde-elf*)
;;
mipsisa32-*-elf* | mipsisa32el-*-elf* | \
mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \
+mipsisa32r6-*-elf* | mipsisa32r6el-*-elf* | \
mipsisa64-*-elf* | mipsisa64el-*-elf* | \
-mipsisa64r2-*-elf* | mipsisa64r2el-*-elf*)
+mipsisa64r2-*-elf* | mipsisa64r2el-*-elf* | \
+mipsisa64r6-*-elf* | mipsisa64r6el-*-elf*)
tm_file="elfos.h newlib-stdint.h ${tm_file} mips/elf.h"
tmake_file="mips/t-isa3264"
case ${target} in
+ mipsisa32r6*)
+ tm_defines="${tm_defines} MIPS_ISA_DEFAULT=37"
+ ;;
mipsisa32r2*)
tm_defines="${tm_defines} MIPS_ISA_DEFAULT=33"
;;
mipsisa32*)
tm_defines="${tm_defines} MIPS_ISA_DEFAULT=32"
;;
+ mipsisa64r6*)
+ tm_defines="${tm_defines} MIPS_ISA_DEFAULT=69"
+ ;;
mipsisa64r2*)
tm_defines="${tm_defines} MIPS_ISA_DEFAULT=65"
;;
@@ -3766,7 +3799,7 @@ case "${target}" in
;;
mips*-*-*)
- supported_defaults="abi arch arch_32 arch_64 float fpu nan tune tune_32 tune_64 divide llsc mips-plt synci"
+ supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci"
case ${with_float} in
"" | soft | hard)
@@ -3798,6 +3831,32 @@ case "${target}" in
;;
esac
+ case ${with_fp_32} in
+ "" | 32 | xx | 64)
+ # OK
+ ;;
+ *)
+ echo "Unknown FP mode used in --with-fp-32=$with_fp_32" 1>&2
+ exit 1
+ ;;
+ esac
+
+ case ${with_odd_spreg_32} in
+ yes)
+ with_odd_spreg_32="odd-spreg"
+ ;;
+ no)
+ with_odd_spreg_32="no-odd-spreg"
+ ;;
+ "")
+ # OK
+ ;;
+ *)
+ echo "Unknown odd-spreg-32 type used in --with-odd-spreg-32=$with_odd_spreg_32" 1>&2
+ exit 1
+ ;;
+ esac
+
case ${with_abi} in
"" | 32 | o64 | n32 | 64 | eabi)
# OK
@@ -4200,7 +4259,7 @@ case ${target} in
esac
t=
-all_defaults="abi cpu cpu_32 cpu_64 arch arch_32 arch_64 tune tune_32 tune_64 schedule float mode fpu nan divide llsc mips-plt synci tls"
+all_defaults="abi cpu cpu_32 cpu_64 arch arch_32 arch_64 tune tune_32 tune_64 schedule float mode fpu nan fp_32 odd_spreg_32 divide llsc mips-plt synci tls"
for option in $all_defaults
do
eval "val=\$with_"`echo $option | sed s/-/_/g`
diff --git a/gcc-4.9/gcc/config.in b/gcc-4.9/gcc/config.in
index df7a3455f..4d57b87ca 100644
--- a/gcc-4.9/gcc/config.in
+++ b/gcc-4.9/gcc/config.in
@@ -447,6 +447,12 @@
#endif
+/* Define if the assembler understands .module. */
+#ifndef USED_FOR_TARGET
+#undef HAVE_AS_MODULE
+#endif
+
+
/* Define if your assembler supports the -no-mul-bug-abort option. */
#ifndef USED_FOR_TARGET
#undef HAVE_AS_NO_MUL_BUG_ABORT_OPTION
diff --git a/gcc-4.9/gcc/config/mips/constraints.md b/gcc-4.9/gcc/config/mips/constraints.md
index 49e48954f..92ab2dee5 100644
--- a/gcc-4.9/gcc/config/mips/constraints.md
+++ b/gcc-4.9/gcc/config/mips/constraints.md
@@ -19,7 +19,7 @@
;; Register constraints
-(define_register_constraint "d" "BASE_REG_CLASS"
+(define_register_constraint "d" "TARGET_MIPS16 ? M16_REGS : GR_REGS"
"An address register. This is equivalent to @code{r} unless
generating MIPS16 code.")
@@ -92,6 +92,9 @@
;; but the DSP version allows any accumulator target.
(define_register_constraint "ka" "ISA_HAS_DSP_MULT ? ACC_REGS : MD_REGS")
+(define_register_constraint "kb" "M16_STORE_REGS"
+ "@internal")
+
(define_constraint "kf"
"@internal"
(match_operand 0 "force_to_mem_operand"))
@@ -305,6 +308,61 @@
"@internal"
(match_operand 0 "low_bitmask_operand"))
+(define_constraint "YI"
+ "@internal
+ A replicated vector const in which the replicated is a 10-bit signed
+ value."
+ (and (match_code "const_vector")
+ (match_test "mips_const_vector_same_int_p (op, mode, -1024, 1023)")))
+
+(define_constraint "YC"
+ "@internal
+ A replicated vector const in which the replicated value has a single
+ bit set."
+ (and (match_code "const_vector")
+ (match_test "mips_const_vector_bitimm_set_p (op, mode)")))
+
+(define_constraint "YZ"
+ "@internal
+ A replicated vector const in which the replicated value has a single
+ bit clear."
+ (and (match_code "const_vector")
+ (match_test "mips_const_vector_bitimm_clr_p (op, mode)")))
+
+(define_constraint "Unv5"
+ "@internal
+ A replicated vector const in which the replicated value is negative
+ integer number in range [-31,0]."
+ (and (match_code "const_vector")
+ (match_test "mips_const_vector_same_int_p (op, mode, -31, 0)")))
+
+(define_constraint "Uuv5"
+ "@internal
+ A replicated vector const in which the replicated value is positive
+ integer number in range [0,31]."
+ (and (match_code "const_vector")
+ (match_test "mips_const_vector_same_int_p (op, mode, 0, 31)")))
+
+(define_constraint "Uuv6"
+ "@internal
+ A replicated vector const in which the replicated value is a unsigned
+ 6-bit integer number."
+ (and (match_code "const_vector")
+ (match_test "mips_const_vector_same_int_p (op, mode, 0, 63)")))
+
+(define_constraint "Uuv8"
+ "@internal
+ A replicated vector const in which the replicated value is a unsigned
+ 8-bit integer number."
+ (and (match_code "const_vector")
+ (match_test "mips_const_vector_same_int_p (op, mode, 0, 255)")))
+
+(define_constraint "Ubv8"
+ "@internal
+ A replicated vector const in which the replicated value is a 8-bit byte."
+ (and (match_code "const_vector")
+ (match_test "mips_const_vector_same_byte_p (op, mode)")))
+
(define_memory_constraint "ZC"
"When compiling microMIPS code, this constraint matches a memory operand
whose address is formed from a base register and a 12-bit offset. These
@@ -318,13 +376,13 @@
(match_test "mips_address_insns (XEXP (op, 0), mode, false)"))))
(define_address_constraint "ZD"
- "When compiling microMIPS code, this constraint matches an address operand
- that is formed from a base register and a 12-bit offset. These operands
- can be used for microMIPS instructions such as @code{prefetch}. When
- not compiling for microMIPS code, @code{ZD} is equivalent to @code{p}."
+ "An address suitable for a @code{prefetch} instruction, or for any other
+ instruction with the same addressing mode as @code{prefetch}."
(if_then_else (match_test "TARGET_MICROMIPS")
(match_test "umips_12bit_offset_address_p (op, mode)")
- (match_test "mips_address_insns (op, mode, false)")))
+ (if_then_else (match_test "ISA_HAS_PREFETCH_9BIT")
+ (match_test "mips_9bit_offset_address_p (op, mode)")
+ (match_test "mips_address_insns (op, mode, false)"))))
(define_memory_constraint "ZR"
"@internal
diff --git a/gcc-4.9/gcc/config/mips/linux.h b/gcc-4.9/gcc/config/mips/linux.h
index e539422d4..d045089d8 100644
--- a/gcc-4.9/gcc/config/mips/linux.h
+++ b/gcc-4.9/gcc/config/mips/linux.h
@@ -18,8 +18,9 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#define GLIBC_DYNAMIC_LINKER \
- "%{mnan=2008:/lib/ld-linux-mipsn8.so.1;:/lib/ld.so.1}"
+ "%{mnan=2008|mips32r6|mips64r6:/lib/ld-linux-mipsn8.so.1;:/lib/ld.so.1}"
#undef UCLIBC_DYNAMIC_LINKER
#define UCLIBC_DYNAMIC_LINKER \
- "%{mnan=2008:/lib/ld-uClibc-mipsn8.so.0;:/lib/ld-uClibc.so.0}"
+ "%{mnan=2008|mips32r6|mips64r6:/lib/ld-uClibc-mipsn8.so.0;" \
+ ":/lib/ld-uClibc.so.0}"
diff --git a/gcc-4.9/gcc/config/mips/linux64.h b/gcc-4.9/gcc/config/mips/linux64.h
index 7ad3b2af2..d09fe2367 100644
--- a/gcc-4.9/gcc/config/mips/linux64.h
+++ b/gcc-4.9/gcc/config/mips/linux64.h
@@ -23,20 +23,23 @@ along with GCC; see the file COPYING3. If not see
#define GNU_USER_LINK_EMULATIONN32 "elf32%{EB:b}%{EL:l}tsmipn32"
#define GLIBC_DYNAMIC_LINKER32 \
- "%{mnan=2008:/lib/ld-linux-mipsn8.so.1;:/lib/ld.so.1}"
+ "%{mnan=2008|mips32r6|mips64r6:/lib/ld-linux-mipsn8.so.1;:/lib/ld.so.1}"
#define GLIBC_DYNAMIC_LINKER64 \
- "%{mnan=2008:/lib64/ld-linux-mipsn8.so.1;:/lib64/ld.so.1}"
+ "%{mnan=2008|mips32r6|mips64r6:/lib64/ld-linux-mipsn8.so.1;:/lib64/ld.so.1}"
#define GLIBC_DYNAMIC_LINKERN32 \
- "%{mnan=2008:/lib32/ld-linux-mipsn8.so.1;:/lib32/ld.so.1}"
+ "%{mnan=2008|mips32r6|mips64r6:/lib32/ld-linux-mipsn8.so.1;:/lib32/ld.so.1}"
#undef UCLIBC_DYNAMIC_LINKER32
#define UCLIBC_DYNAMIC_LINKER32 \
- "%{mnan=2008:/lib/ld-uClibc-mipsn8.so.0;:/lib/ld-uClibc.so.0}"
+ "%{mnan=2008|mips32r6|mips64r6:/lib/ld-uClibc-mipsn8.so.0;" \
+ ":/lib/ld-uClibc.so.0}"
#undef UCLIBC_DYNAMIC_LINKER64
#define UCLIBC_DYNAMIC_LINKER64 \
- "%{mnan=2008:/lib/ld64-uClibc-mipsn8.so.0;:/lib/ld64-uClibc.so.0}"
+ "%{mnan=2008|mips32r6|mips64r6:/lib/ld64-uClibc-mipsn8.so.0;" \
+ ":/lib/ld64-uClibc.so.0}"
#define UCLIBC_DYNAMIC_LINKERN32 \
- "%{mnan=2008:/lib32/ld-uClibc-mipsn8.so.0;:/lib32/ld-uClibc.so.0}"
+ "%{mnan=2008|mips32r6|mips64r6:/lib32/ld-uClibc-mipsn8.so.0;" \
+ ":/lib32/ld-uClibc.so.0}"
#define BIONIC_DYNAMIC_LINKERN32 "/system/bin/linker32"
#define GNU_USER_DYNAMIC_LINKERN32 \
diff --git a/gcc-4.9/gcc/config/mips/loongson.md b/gcc-4.9/gcc/config/mips/loongson.md
index 474033d1e..08691313c 100644
--- a/gcc-4.9/gcc/config/mips/loongson.md
+++ b/gcc-4.9/gcc/config/mips/loongson.md
@@ -907,33 +907,3 @@
mips_expand_vec_reduc (operands[0], operands[1], gen_umin<mode>3);
DONE;
})
-
-;; Integer division and modulus. For integer multiplication, see mips.md.
-
-(define_insn "<u>div<mode>3"
- [(set (match_operand:GPR 0 "register_operand" "=&d")
- (any_div:GPR (match_operand:GPR 1 "register_operand" "d")
- (match_operand:GPR 2 "register_operand" "d")))]
- "TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A"
- {
- if (TARGET_LOONGSON_2EF)
- return mips_output_division ("<d>div<u>.g\t%0,%1,%2", operands);
- else
- return mips_output_division ("gs<d>div<u>\t%0,%1,%2", operands);
- }
- [(set_attr "type" "idiv3")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "<u>mod<mode>3"
- [(set (match_operand:GPR 0 "register_operand" "=&d")
- (any_mod:GPR (match_operand:GPR 1 "register_operand" "d")
- (match_operand:GPR 2 "register_operand" "d")))]
- "TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A"
- {
- if (TARGET_LOONGSON_2EF)
- return mips_output_division ("<d>mod<u>.g\t%0,%1,%2", operands);
- else
- return mips_output_division ("gs<d>mod<u>\t%0,%1,%2", operands);
- }
- [(set_attr "type" "idiv3")
- (set_attr "mode" "<MODE>")])
diff --git a/gcc-4.9/gcc/config/mips/mips-cpus.def b/gcc-4.9/gcc/config/mips/mips-cpus.def
index 07fbf9c7e..8f480772a 100644
--- a/gcc-4.9/gcc/config/mips/mips-cpus.def
+++ b/gcc-4.9/gcc/config/mips/mips-cpus.def
@@ -44,9 +44,19 @@ MIPS_CPU ("mips4", PROCESSOR_R8000, 4, 0)
isn't tuned to a specific processor. */
MIPS_CPU ("mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY)
MIPS_CPU ("mips32r2", PROCESSOR_74KF2_1, 33, PTF_AVOID_BRANCHLIKELY)
+/* mips32r3 is micromips hense why it uses the M4K processor.
+ mips32r5 should use the p5600 processor, but there is no definition
+ for this yet, so in the short term we will use the same processor entry
+ as mips32r2. */
+MIPS_CPU ("mips32r3", PROCESSOR_M4K, 34, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("mips32r5", PROCESSOR_P5600, 36, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("mips32r6", PROCESSOR_W32, 37, PTF_AVOID_BRANCHLIKELY)
MIPS_CPU ("mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY)
-/* ??? For now just tune the generic MIPS64r2 for 5KC as well. */
+/* ??? For now just tune the generic MIPS64r2 and above for 5KC as well. */
MIPS_CPU ("mips64r2", PROCESSOR_5KC, 65, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("mips64r3", PROCESSOR_5KC, 66, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("mips64r5", PROCESSOR_5KC, 68, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("mips64r6", PROCESSOR_W64, 69, PTF_AVOID_BRANCHLIKELY)
/* MIPS I processors. */
MIPS_CPU ("r3000", PROCESSOR_R3000, 1, 0)
@@ -137,6 +147,9 @@ MIPS_CPU ("1004kf2_1", PROCESSOR_24KF2_1, 33, 0)
MIPS_CPU ("1004kf", PROCESSOR_24KF2_1, 33, 0)
MIPS_CPU ("1004kf1_1", PROCESSOR_24KF1_1, 33, 0)
+/* MIPS32 Release 5 processors. */
+MIPS_CPU ("p5600", PROCESSOR_P5600, 36, PTF_AVOID_BRANCHLIKELY)
+
/* MIPS64 processors. */
MIPS_CPU ("5kc", PROCESSOR_5KC, 64, 0)
MIPS_CPU ("5kf", PROCESSOR_5KF, 64, 0)
diff --git a/gcc-4.9/gcc/config/mips/mips-ftypes.def b/gcc-4.9/gcc/config/mips/mips-ftypes.def
index 078a595a8..fde206d1c 100644
--- a/gcc-4.9/gcc/config/mips/mips-ftypes.def
+++ b/gcc-4.9/gcc/config/mips/mips-ftypes.def
@@ -36,6 +36,230 @@ along with GCC; see the file COPYING3. If not see
DEF_MIPS_FTYPE (1, (DF, DF))
DEF_MIPS_FTYPE (2, (DF, DF, DF))
+DEF_MIPS_FTYPE (2, (V16QI, V16QI, V16QI))
+DEF_MIPS_FTYPE (2, (V8HI, V8HI, V8HI))
+DEF_MIPS_FTYPE (2, (V4SI, V4SI, V4SI))
+DEF_MIPS_FTYPE (2, (V2DI, V2DI, V2DI))
+
+DEF_MIPS_FTYPE (2, (UV16QI, UV16QI, UV16QI))
+DEF_MIPS_FTYPE (2, (UV8HI, UV8HI, UV8HI))
+DEF_MIPS_FTYPE (2, (UV4SI, UV4SI, UV4SI))
+DEF_MIPS_FTYPE (2, (UV2DI, UV2DI, UV2DI))
+
+DEF_MIPS_FTYPE (2, (V16QI, V16QI, UQI))
+DEF_MIPS_FTYPE (2, (V8HI, V8HI, UQI))
+DEF_MIPS_FTYPE (2, (V4SI, V4SI, UQI))
+DEF_MIPS_FTYPE (2, (V2DI, V2DI, UQI))
+
+DEF_MIPS_FTYPE (2, (V16QI, V16QI, QI))
+DEF_MIPS_FTYPE (2, (V8HI, V8HI, QI))
+DEF_MIPS_FTYPE (2, (V4SI, V4SI, QI))
+DEF_MIPS_FTYPE (2, (V2DI, V2DI, QI))
+
+DEF_MIPS_FTYPE (2, (UV16QI, UV16QI, UQI))
+DEF_MIPS_FTYPE (2, (UV8HI, UV8HI, UQI))
+DEF_MIPS_FTYPE (2, (UV4SI, UV4SI, UQI))
+DEF_MIPS_FTYPE (2, (UV2DI, UV2DI, UQI))
+
+DEF_MIPS_FTYPE (3, (V16QI, V16QI, V16QI, V16QI))
+DEF_MIPS_FTYPE (3, (V8HI, V8HI, V8HI, V8HI))
+DEF_MIPS_FTYPE (3, (V4SI, V4SI, V4SI, V4SI))
+DEF_MIPS_FTYPE (3, (V2DI, V2DI, V2DI, V2DI))
+DEF_MIPS_FTYPE (3, (V4SF, V4SI, V4SF, V4SF))
+DEF_MIPS_FTYPE (3, (V2DF, V2DI, V2DF, V2DF))
+
+DEF_MIPS_FTYPE (3, (V16QI, V16QI, V16QI, UQI))
+DEF_MIPS_FTYPE (3, (V8HI, V8HI, V8HI, UQI))
+DEF_MIPS_FTYPE (3, (V4SI, V4SI, V4SI, UQI))
+DEF_MIPS_FTYPE (3, (V2DI, V2DI, V2DI, UQI))
+
+DEF_MIPS_FTYPE (2, (SI, V16QI, UQI))
+DEF_MIPS_FTYPE (2, (SI, V8HI, UQI))
+DEF_MIPS_FTYPE (2, (SI, V4SI, UQI))
+DEF_MIPS_FTYPE (2, (SF, V4SF, UQI))
+
+DEF_MIPS_FTYPE (2, (DI, V2DI, UQI))
+DEF_MIPS_FTYPE (2, (DF, V2DF, UQI))
+
+DEF_MIPS_FTYPE (3, (V16QI, V16QI, SI, UQI))
+DEF_MIPS_FTYPE (3, (V8HI, V8HI, SI, UQI))
+DEF_MIPS_FTYPE (3, (V4SI, V4SI, SI, UQI))
+DEF_MIPS_FTYPE (3, (V2DI, V2DI, DI, UQI))
+DEF_MIPS_FTYPE (3, (V4SF, V4SF, SF, UQI))
+DEF_MIPS_FTYPE (3, (V2DF, V2DF, DF, UQI))
+
+DEF_MIPS_FTYPE (2, (V8HI, V16QI, V16QI))
+DEF_MIPS_FTYPE (2, (V4SI, V8HI, V8HI))
+DEF_MIPS_FTYPE (2, (V2DI, V4SI, V4SI))
+DEF_MIPS_FTYPE (2, (UV8HI, UV16QI, UV16QI))
+DEF_MIPS_FTYPE (2, (UV4SI, UV8HI, UV8HI))
+DEF_MIPS_FTYPE (2, (UV2DI, UV4SI, UV4SI))
+
+DEF_MIPS_FTYPE (3, (V8HI, V8HI, V16QI, V16QI))
+DEF_MIPS_FTYPE (3, (V4SI, V4SI, V8HI, V8HI))
+DEF_MIPS_FTYPE (3, (V2DI, V2DI, V4SI, V4SI))
+DEF_MIPS_FTYPE (3, (UV8HI, UV8HI, UV16QI, UV16QI))
+DEF_MIPS_FTYPE (3, (UV4SI, UV4SI, UV8HI, UV8HI))
+DEF_MIPS_FTYPE (3, (UV2DI, UV2DI, UV4SI, UV4SI))
+
+DEF_MIPS_FTYPE (2, (V4SF, V4SF, V4SF))
+DEF_MIPS_FTYPE (2, (V2DF, V2DF, V2DF))
+
+DEF_MIPS_FTYPE (2, (V4SI, V4SF, V4SF))
+DEF_MIPS_FTYPE (2, (V2DI, V2DF, V2DF))
+
+DEF_MIPS_FTYPE (1, (V4SI, V4SF))
+DEF_MIPS_FTYPE (1, (V2DI, V2DF))
+
+DEF_MIPS_FTYPE (2, (V4SF, V4SF, V4SI))
+DEF_MIPS_FTYPE (2, (V2DF, V2DF, V2DI))
+
+DEF_MIPS_FTYPE (1, (V4SF, V4SI))
+DEF_MIPS_FTYPE (1, (V2DF, V2DI))
+
+DEF_MIPS_FTYPE (1, (V4SF, UV4SI))
+DEF_MIPS_FTYPE (1, (V2DF, UV2DI))
+
+DEF_MIPS_FTYPE (1, (V4SF, V8HI))
+DEF_MIPS_FTYPE (1, (V2DF, V4SI))
+
+DEF_MIPS_FTYPE (3, (V4SF, V4SF, V4SF, V4SF))
+DEF_MIPS_FTYPE (3, (V2DF, V2DF, V2DF, V2DF))
+
+DEF_MIPS_FTYPE (1, (UV4SI, V4SF))
+DEF_MIPS_FTYPE (1, (UV2DI, V2DF))
+
+DEF_MIPS_FTYPE (2, (V8HI, V4SF, V4SF))
+DEF_MIPS_FTYPE (2, (V4SI, V2DF, V2DF))
+
+DEF_MIPS_FTYPE (1, (V16QI, V16QI))
+DEF_MIPS_FTYPE (1, (V8HI, V8HI))
+DEF_MIPS_FTYPE (1, (V4SI, V4SI))
+DEF_MIPS_FTYPE (1, (V2DI, V2DI))
+DEF_MIPS_FTYPE (1, (V4SF, V4SF))
+DEF_MIPS_FTYPE (1, (V2DF, V2DF))
+
+DEF_MIPS_FTYPE (2, (UV16QI, V16QI, V16QI))
+DEF_MIPS_FTYPE (2, (UV8HI, V8HI, V8HI))
+DEF_MIPS_FTYPE (2, (UV4SI, V4SI, V4SI))
+DEF_MIPS_FTYPE (2, (UV2DI, V2DI, V2DI))
+
+DEF_MIPS_FTYPE (2, (V16QI, UV16QI, UV16QI))
+DEF_MIPS_FTYPE (2, (V8HI, UV8HI, UV8HI))
+DEF_MIPS_FTYPE (2, (V4SI, UV4SI, UV4SI))
+DEF_MIPS_FTYPE (2, (V2DI, UV2DI, UV2DI))
+
+DEF_MIPS_FTYPE (3, (V16QI, V16QI, V16QI, SI))
+DEF_MIPS_FTYPE (3, (V8HI, V8HI, V8HI, SI))
+DEF_MIPS_FTYPE (3, (V4SI, V4SI, V4SI, SI))
+DEF_MIPS_FTYPE (3, (V2DI, V2DI, V2DI, SI))
+DEF_MIPS_FTYPE (3, (V4SF, V4SF, V4SF, SI))
+DEF_MIPS_FTYPE (3, (V2DF, V2DF, V2DF, SI))
+
+DEF_MIPS_FTYPE (3, (V4SF, V4SF, V4SF, UQI))
+DEF_MIPS_FTYPE (3, (V2DF, V2DF, V2DF, UQI))
+
+DEF_MIPS_FTYPE (2, (V16QI, V16QI, SI))
+DEF_MIPS_FTYPE (2, (V8HI, V8HI, SI))
+DEF_MIPS_FTYPE (2, (V4SI, V4SI, SI))
+DEF_MIPS_FTYPE (2, (V2DI, V2DI, SI))
+DEF_MIPS_FTYPE (2, (V4SF, V4SF, SI))
+DEF_MIPS_FTYPE (2, (V2DF, V2DF, SI))
+
+DEF_MIPS_FTYPE (2, (V4SF, V4SF, UQI))
+DEF_MIPS_FTYPE (2, (V2DF, V2DF, UQI))
+
+DEF_MIPS_FTYPE (1, (V16QI, SI))
+DEF_MIPS_FTYPE (1, (V8HI, SI))
+DEF_MIPS_FTYPE (1, (V4SI, SI))
+DEF_MIPS_FTYPE (1, (V2DI, DI))
+DEF_MIPS_FTYPE (1, (V4SF, SF))
+DEF_MIPS_FTYPE (1, (V2DF, DF))
+
+DEF_MIPS_FTYPE (1, (V16QI, HI))
+DEF_MIPS_FTYPE (1, (V8HI, HI))
+DEF_MIPS_FTYPE (1, (V4SI, HI))
+DEF_MIPS_FTYPE (1, (V2DI, HI))
+DEF_MIPS_FTYPE (1, (V4SF, HI))
+DEF_MIPS_FTYPE (1, (V2DF, HI))
+
+DEF_MIPS_FTYPE (1, (SI, UQI))
+DEF_MIPS_FTYPE (2, (VOID, UQI, SI))
+
+/* V8HF is not supported yet. */
+/* DEF_MIPS_FTYPE (1, (V4SF, V8HF)) */
+/* DEF_MIPS_FTYPE (2, (V8HF, V4SF, V4SF)) */
+
+DEF_MIPS_FTYPE (1, (V2DF, V4SF))
+DEF_MIPS_FTYPE (2, (V4SF, V2DF, V2DF))
+
+DEF_MIPS_FTYPE (2, (V16QI, POINTER, SI))
+DEF_MIPS_FTYPE (2, (V8HI, POINTER, SI))
+DEF_MIPS_FTYPE (2, (V4SI, POINTER, SI))
+DEF_MIPS_FTYPE (2, (V2DI, POINTER, SI))
+DEF_MIPS_FTYPE (2, (V4SF, POINTER, SI))
+DEF_MIPS_FTYPE (2, (V2DF, POINTER, SI))
+
+DEF_MIPS_FTYPE (3, (VOID, V16QI, POINTER, SI))
+DEF_MIPS_FTYPE (3, (VOID, V8HI, POINTER, SI))
+DEF_MIPS_FTYPE (3, (VOID, V4SI, POINTER, SI))
+DEF_MIPS_FTYPE (3, (VOID, V2DI, POINTER, SI))
+DEF_MIPS_FTYPE (3, (VOID, V4SF, POINTER, SI))
+DEF_MIPS_FTYPE (3, (VOID, V2DF, POINTER, SI))
+
+DEF_MIPS_FTYPE (1, (SI, V16QI))
+DEF_MIPS_FTYPE (1, (SI, V8HI))
+DEF_MIPS_FTYPE (1, (SI, V4SI))
+DEF_MIPS_FTYPE (1, (SI, V2DI))
+DEF_MIPS_FTYPE (1, (SI, V4SF))
+DEF_MIPS_FTYPE (1, (SI, V2DF))
+
+DEF_MIPS_FTYPE (1, (SF, V4SF))
+DEF_MIPS_FTYPE (1, (DF, V2DF))
+
+DEF_MIPS_FTYPE (2, (UV16QI, UV16QI, V16QI))
+DEF_MIPS_FTYPE (2, (UV8HI, UV8HI, V8HI))
+DEF_MIPS_FTYPE (2, (UV4SI, UV4SI, V4SI))
+DEF_MIPS_FTYPE (2, (UV2DI, UV2DI, V2DI))
+
+DEF_MIPS_FTYPE (2, (V8HI, UV16QI, UV16QI))
+DEF_MIPS_FTYPE (2, (V4SI, UV8HI, UV8HI))
+DEF_MIPS_FTYPE (2, (V2DI, UV4SI, UV4SI))
+
+DEF_MIPS_FTYPE (3, (V8HI, V8HI, UV16QI, UV16QI))
+DEF_MIPS_FTYPE (3, (V4SI, V4SI, UV8HI, UV8HI))
+DEF_MIPS_FTYPE (3, (V2DI, V2DI, UV4SI, UV4SI))
+
+DEF_MIPS_FTYPE (3, (UV16QI, UV16QI, UV16QI, UV16QI))
+DEF_MIPS_FTYPE (3, (UV8HI, UV8HI, UV8HI, UV8HI))
+DEF_MIPS_FTYPE (3, (UV4SI, UV4SI, UV4SI, UV4SI))
+DEF_MIPS_FTYPE (3, (UV2DI, UV2DI, UV2DI, UV2DI))
+
+DEF_MIPS_FTYPE (3, (UV16QI, UV16QI, UV16QI, UQI))
+DEF_MIPS_FTYPE (3, (UV8HI, UV8HI, UV8HI, UQI))
+DEF_MIPS_FTYPE (3, (UV4SI, UV4SI, UV4SI, UQI))
+DEF_MIPS_FTYPE (3, (UV2DI, UV2DI, UV2DI, UQI))
+
+DEF_MIPS_FTYPE (1, (SI, UV16QI))
+DEF_MIPS_FTYPE (1, (SI, UV8HI))
+DEF_MIPS_FTYPE (1, (SI, UV4SI))
+DEF_MIPS_FTYPE (1, (SI, UV2DI))
+
+DEF_MIPS_FTYPE (2, (V16QI, UV16QI, UQI))
+DEF_MIPS_FTYPE (2, (V8HI, UV8HI, UQI))
+DEF_MIPS_FTYPE (2, (V4SI, UV4SI, UQI))
+DEF_MIPS_FTYPE (2, (V2DI, UV2DI, UQI))
+
+DEF_MIPS_FTYPE (3, (V16QI, V16QI, UQI, SI))
+DEF_MIPS_FTYPE (3, (V8HI, V8HI, UQI, SI))
+DEF_MIPS_FTYPE (3, (V4SI, V4SI, UQI, SI))
+DEF_MIPS_FTYPE (3, (V2DI, V2DI, UQI, DI))
+
+DEF_MIPS_FTYPE (3, (V16QI, V16QI, UQI, V16QI))
+DEF_MIPS_FTYPE (3, (V8HI, V8HI, UQI, V8HI))
+DEF_MIPS_FTYPE (3, (V4SI, V4SI, UQI, V4SI))
+DEF_MIPS_FTYPE (3, (V2DI, V2DI, UQI, V2DI))
+
DEF_MIPS_FTYPE (2, (DI, DI, DI))
DEF_MIPS_FTYPE (2, (DI, DI, SI))
DEF_MIPS_FTYPE (3, (DI, DI, SI, SI))
diff --git a/gcc-4.9/gcc/config/mips/mips-modes.def b/gcc-4.9/gcc/config/mips/mips-modes.def
index fa1d1e7d6..d87d10019 100644
--- a/gcc-4.9/gcc/config/mips/mips-modes.def
+++ b/gcc-4.9/gcc/config/mips/mips-modes.def
@@ -24,11 +24,17 @@ VECTOR_MODES (INT, 4); /* V4QI V2HI */
VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
+/* For MIPS MSA 128 bits. */
+VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */
+VECTOR_MODES (FLOAT, 16); /* V4SF V2DF */
+
/* Double-sized vector modes for vec_concat. */
-VECTOR_MODE (INT, QI, 16); /* V16QI */
-VECTOR_MODE (INT, HI, 8); /* V8HI */
-VECTOR_MODE (INT, SI, 4); /* V4SI */
-VECTOR_MODE (FLOAT, SF, 4); /* V4SF */
+VECTOR_MODE (INT, QI, 32); /* V32QI */
+VECTOR_MODE (INT, HI, 16); /* V16HI */
+VECTOR_MODE (INT, SI, 8); /* V8SI */
+VECTOR_MODE (INT, DI, 4); /* V4DI */
+VECTOR_MODE (FLOAT, SF, 8); /* V8SF */
+VECTOR_MODE (FLOAT, DF, 4); /* V4DF */
VECTOR_MODES (FRACT, 4); /* V4QQ V2HQ */
VECTOR_MODES (UFRACT, 4); /* V4UQQ V2UHQ */
@@ -46,3 +52,6 @@ ADJUST_ALIGNMENT (CCV4, 16);
/* For MIPS DSP control registers. */
CC_MODE (CCDSP);
+
+/* For floating point conditions in FP registers. */
+CC_MODE (CCF);
diff --git a/gcc-4.9/gcc/config/mips/mips-msa.md b/gcc-4.9/gcc/config/mips/mips-msa.md
new file mode 100644
index 000000000..37f5fcab5
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-msa.md
@@ -0,0 +1,2905 @@
+;; Machine Description for MIPS MSA ASE
+;; Based on the MIPS MSA spec Revision 1.07 30/8/2013
+;; Contributed by Chao-ying Fu (fu@mips.com), MIPS Technologies, Inc.
+;;
+;; Copyright (C) 2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+;;
+
+(define_c_enum "unspec" [
+ UNSPEC_MSA_ADDVI
+ UNSPEC_MSA_ANDI_B
+ UNSPEC_MSA_ASUB_S
+ UNSPEC_MSA_ASUB_U
+ UNSPEC_MSA_AVE_S
+ UNSPEC_MSA_AVE_U
+ UNSPEC_MSA_AVER_S
+ UNSPEC_MSA_AVER_U
+ UNSPEC_MSA_BCLR
+ UNSPEC_MSA_BCLRI
+ UNSPEC_MSA_BINSL
+ UNSPEC_MSA_BINSLI
+ UNSPEC_MSA_BINSR
+ UNSPEC_MSA_BINSRI
+ UNSPEC_MSA_BMNZ_V
+ UNSPEC_MSA_BMNZI_B
+ UNSPEC_MSA_BMZ_V
+ UNSPEC_MSA_BMZI_B
+ UNSPEC_MSA_BNEG
+ UNSPEC_MSA_BNEGI
+ UNSPEC_MSA_BSEL_V
+ UNSPEC_MSA_BSELI_B
+ UNSPEC_MSA_BSET
+ UNSPEC_MSA_BSETI
+ UNSPEC_MSA_BNZ_V
+ UNSPEC_MSA_BZ_V
+ UNSPEC_MSA_BNZ
+ UNSPEC_MSA_BZ
+ UNSPEC_MSA_CFCMSA
+ UNSPEC_MSA_CMPI
+ UNSPEC_MSA_COPY_S
+ UNSPEC_MSA_COPY_U
+ UNSPEC_MSA_CTCMSA
+ UNSPEC_MSA_DOTP_S
+ UNSPEC_MSA_DOTP_U
+ UNSPEC_MSA_DPADD_S
+ UNSPEC_MSA_DPADD_U
+ UNSPEC_MSA_DPSUB_S
+ UNSPEC_MSA_DPSUB_U
+ UNSPEC_MSA_FCAF
+ UNSPEC_MSA_FCLASS
+ UNSPEC_MSA_FCUNE
+ UNSPEC_MSA_FEXDO
+ UNSPEC_MSA_FEXP2
+ UNSPEC_MSA_FEXUPL
+ UNSPEC_MSA_FEXUPR
+ UNSPEC_MSA_FFINT_S
+ UNSPEC_MSA_FFINT_U
+ UNSPEC_MSA_FFQL
+ UNSPEC_MSA_FFQR
+ UNSPEC_MSA_FILL
+ UNSPEC_MSA_FLOG2
+ UNSPEC_MSA_FMAX
+ UNSPEC_MSA_FMAX_A
+ UNSPEC_MSA_FMIN
+ UNSPEC_MSA_FMIN_A
+ UNSPEC_MSA_FRCP
+ UNSPEC_MSA_FRINT
+ UNSPEC_MSA_FRSQRT
+ UNSPEC_MSA_FSAF
+ UNSPEC_MSA_FSEQ
+ UNSPEC_MSA_FSLE
+ UNSPEC_MSA_FSLT
+ UNSPEC_MSA_FSNE
+ UNSPEC_MSA_FSOR
+ UNSPEC_MSA_FSUEQ
+ UNSPEC_MSA_FSULE
+ UNSPEC_MSA_FSULT
+ UNSPEC_MSA_FSUN
+ UNSPEC_MSA_FSUNE
+ UNSPEC_MSA_FTINT_S
+ UNSPEC_MSA_FTINT_U
+ UNSPEC_MSA_FTRUNC_S
+ UNSPEC_MSA_FTRUNC_U
+ UNSPEC_MSA_FTQ
+ UNSPEC_MSA_HADD_S
+ UNSPEC_MSA_HADD_U
+ UNSPEC_MSA_HSUB_S
+ UNSPEC_MSA_HSUB_U
+ UNSPEC_MSA_ILVEV
+ UNSPEC_MSA_ILVL
+ UNSPEC_MSA_ILVOD
+ UNSPEC_MSA_ILVR
+ UNSPEC_MSA_INSERT
+ UNSPEC_MSA_INSVE
+ UNSPEC_MSA_LD0
+ UNSPEC_MSA_MADD_Q
+ UNSPEC_MSA_MADDR_Q
+ UNSPEC_MSA_MAX_A
+ UNSPEC_MSA_MAX_S
+ UNSPEC_MSA_MAX_U
+ UNSPEC_MSA_MAXI_S
+ UNSPEC_MSA_MAXI_U
+ UNSPEC_MSA_MIN_A
+ UNSPEC_MSA_MIN_S
+ UNSPEC_MSA_MIN_U
+ UNSPEC_MSA_MINI_S
+ UNSPEC_MSA_MINI_U
+ UNSPEC_MSA_MSUB_Q
+ UNSPEC_MSA_MSUBR_Q
+ UNSPEC_MSA_MUL_Q
+ UNSPEC_MSA_MULR_Q
+ UNSPEC_MSA_NLOC
+ UNSPEC_MSA_NLZC
+ UNSPEC_MSA_NORI_B
+ UNSPEC_MSA_ORI_B
+ UNSPEC_MSA_PCKEV
+ UNSPEC_MSA_PCKOD
+ UNSPEC_MSA_PCNT
+ UNSPEC_MSA_SAT_S
+ UNSPEC_MSA_SAT_U
+ UNSPEC_MSA_SHF
+ UNSPEC_MSA_SLD
+ UNSPEC_MSA_SLDI
+ UNSPEC_MSA_SLLI
+ UNSPEC_MSA_SPLAT
+ UNSPEC_MSA_SPLATI
+ UNSPEC_MSA_SRAI
+ UNSPEC_MSA_SRAR
+ UNSPEC_MSA_SRARI
+ UNSPEC_MSA_SRLI
+ UNSPEC_MSA_SRLR
+ UNSPEC_MSA_SRLRI
+ UNSPEC_MSA_SUBS_S
+ UNSPEC_MSA_SUBS_U
+ UNSPEC_MSA_SUBSUU_S
+ UNSPEC_MSA_SUBSUS_U
+ UNSPEC_MSA_SUBVI
+ UNSPEC_MSA_TSTNZ_V
+ UNSPEC_MSA_TSTZ_V
+ UNSPEC_MSA_TSTNZ
+ UNSPEC_MSA_TSTZ
+ UNSPEC_MSA_VSHF
+ UNSPEC_MSA_XORI_B
+ UNSPEC_MSA_CAST_TO_SCALAR
+ UNSPEC_MSA_CAST_TO_VECTOR
+])
+
+;; Attributes to categorize MSA instructions based on execution units
+(define_attr "msa_execunit"
+ "unknown, msa_eu_div, msa_eu_float2, msa_eu_float2_l,
+ msa_eu_float4, msa_eu_float5, msa_eu_float8, msa_eu_logic,
+ msa_eu_logic3, msa_eu_logic_l, msa_eu_mult, msa_eu_cmp,
+ msa_eu_store4, msa_eu_int_add, msa_eu_fdiv"
+ (const_string "unknown"))
+
+;; All vector modes with 128 bits.
+(define_mode_iterator MODE128 [V2DF V4SF V2DI V4SI V8HI V16QI])
+(define_mode_iterator MSA [V2DF V4SF V2DI V4SI V8HI V16QI])
+
+;; Same as MSA. Used by vcond to iterate two modes.
+(define_mode_iterator MSA_2 [V2DF V4SF V2DI V4SI V8HI V16QI])
+
+;; Only integer modes.
+(define_mode_iterator IMSA [V2DI V4SI V8HI V16QI])
+
+;; mode that can combine a copy+insert into insve.
+;; note V2DI is excluded because it split if !TARGET_64
+(define_mode_iterator INSVE [V4SI V8HI V16QI])
+
+;; mode that can be combine copy+inset with subreg info insve.
+(define_mode_iterator INSVE_2 [V8HI V16QI])
+
+;; As IMSA but excludeds V16QI.
+(define_mode_iterator IMSA_X [V2DI V4SI V8HI])
+
+;; Only used with insert.
+(define_mode_iterator MSA_3 [V16QI V8HI V2DF V4SF])
+
+;; Only integer modes for fixed-point madd_q/maddr_q.
+(define_mode_iterator QMSA [V4SI V8HI])
+
+;; Only floating-point modes.
+(define_mode_iterator FMSA [V2DF V4SF])
+
+;; Only integer modes for dot product.
+(define_mode_iterator IDOTP128 [V2DI V4SI V8HI])
+
+;; Only used in spliters
+(define_mode_iterator SPLIT [V2DI V2DF])
+
+;; Only used with SPILT iteraror
+(define_mode_attr predicate
+ [(V2DI "reg_or_0")
+ (V2DF "register")])
+
+(define_mode_attr VHALFMODE
+ [(V8HI "V16QI")
+ (V4SI "V8HI")
+ (V2DI "V4SI")
+ (V2DF "V4SF")])
+
+;; The attribute give the integer vector mode with same size.
+(define_mode_attr VIMODE
+ [(V2DF "V2DI")
+ (V4SF "V4SI")
+ (V2DI "V2DI")
+ (V4SI "V4SI")
+ (V8HI "V8HI")
+ (V16QI "V16QI")])
+
+;; This attribute gives the integer vector mode with same size.
+(define_mode_attr mode_i
+ [(V2DF "v2di")
+ (V4SF "v4si")
+ (V2DI "v2di")
+ (V4SI "v4si")
+ (V8HI "v8hi")
+ (V16QI "v16qi")])
+
+;; This attribute gives the mode of the result for "copy_s_b, copy_u_b" etc.
+(define_mode_attr RES
+ [(V2DF "DF")
+ (V4SF "SF")
+ (V2DI "DI")
+ (V4SI "SI")
+ (V8HI "SI")
+ (V16QI "SI")])
+
+;; This attribute qives suffix for MSA instructions.
+(define_mode_attr msafmt
+ [(V2DF "d")
+ (V4SF "w")
+ (V2DI "d")
+ (V4SI "w")
+ (V8HI "h")
+ (V16QI "b")])
+
+;; This is used in msa_cast* to output mov.s or mov.d.
+(define_mode_attr unitfmt
+ [(V2DF "d")
+ (V4SF "s")])
+
+;; This attribute qives define_insn suffix for MSA instructions
+;; with need distinction between integer and floating point.
+(define_mode_attr msafmt_f
+ [(V2DF "d_f")
+ (V4SF "w_f")
+ (V2DI "d")
+ (V4SI "w")
+ (V8HI "h")
+ (V16QI "b")])
+
+;; The mask for shift amounts.
+(define_mode_attr shift_mask
+ [(V2DI "63")
+ (V4SI "31")
+ (V8HI "15")
+ (V16QI "7")])
+
+;; This is used to form an immediate operand constraint
+;; using "const_<indeximm>_operand".
+(define_mode_attr indeximm
+ [(V2DF "0_or_1")
+ (V4SF "0_to_3")
+ (V2DI "0_or_1")
+ (V4SI "0_to_3")
+ (V8HI "uimm3")
+ (V16QI "uimm4")])
+
+;; This attribute is used to form the MODE for reg_or_0_operand
+;; constraint.
+(define_mode_attr REGOR0
+ [(V2DF "DF")
+ (V4SF "SF")
+ (V2DI "DI")
+ (V4SI "SI")
+ (V8HI "SI")
+ (V16QI "SI")])
+
+;; This attribute used to form an immediate operand constraint
+;; using "const_<bitimm>_operand"
+(define_mode_attr bitimm
+ [(V16QI "uimm3")
+ (V8HI "uimm4")
+ (V4SI "uimm5")
+ (V2DI "uimm6")
+ ])
+
+(define_expand "vec_init<mode>"
+ [(match_operand:MSA 0 "register_operand")
+ (match_operand:MSA 1 "")]
+ "ISA_HAS_MSA"
+{
+ mips_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_extract<mode>"
+ [(match_operand:<UNITMODE> 0 "register_operand")
+ (match_operand:IMSA 1 "register_operand")
+ (match_operand 2 "const_<indeximm>_operand")]
+ "ISA_HAS_MSA"
+{
+ if (<UNITMODE>mode == QImode || <UNITMODE>mode == HImode)
+ {
+ rtx dest1 = gen_reg_rtx (SImode);
+ emit_insn (gen_msa_copy_s_<msafmt> (dest1, operands[1], operands[2]));
+ emit_move_insn (operands[0],
+ gen_lowpart (<UNITMODE>mode, dest1));
+ }
+ else
+ emit_insn (gen_msa_copy_s_<msafmt> (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extract<mode>"
+ [(match_operand:<UNITMODE> 0 "register_operand")
+ (match_operand:FMSA 1 "register_operand")
+ (match_operand 2 "const_<indeximm>_operand")]
+ "ISA_HAS_MSA"
+{
+ rtx temp;
+ HOST_WIDE_INT val = UINTVAL (operands[2]);
+
+ if (val == 0)
+ temp = operands[1];
+ else
+ {
+ /* We need to do the SLDI operation in V16QImode and adjust
+ operand[2] accordingly. */
+ rtx tempb = gen_reg_rtx (V16QImode);
+ rtx op1b = gen_reg_rtx (V16QImode);
+ emit_move_insn (op1b, gen_rtx_SUBREG (V16QImode, operands[1], 0));
+ rtx op2b = GEN_INT (val * GET_MODE_SIZE (<UNITMODE>mode));
+ gcc_assert (UINTVAL (op2b) < GET_MODE_NUNITS (V16QImode));
+ emit_insn (gen_msa_sldi_b (tempb, op1b, op1b, op2b));
+ temp = gen_reg_rtx (<MODE>mode);
+ emit_move_insn (temp, gen_rtx_SUBREG (<MODE>mode, tempb, 0));
+ }
+ emit_insn (gen_msa_cast_to_scalar_<msafmt_f> (operands[0], temp));
+ DONE;
+})
+
+(define_expand "vec_set<mode>"
+ [(match_operand:IMSA 0 "register_operand")
+ (match_operand:<UNITMODE> 1 "register_operand")
+ (match_operand 2 "const_<indeximm>_operand")]
+ "ISA_HAS_MSA"
+{
+ emit_insn (gen_msa_insert_<msafmt> (operands[0], operands[0], operands[2],
+ operands[1]));
+ DONE;
+})
+
+(define_expand "vec_set<mode>"
+ [(match_operand:FMSA 0 "register_operand")
+ (match_operand:<UNITMODE> 1 "register_operand")
+ (match_operand 2 "const_<indeximm>_operand")]
+ "ISA_HAS_MSA"
+{
+ emit_insn (gen_msa_insve_<msafmt_f>_s (operands[0], operands[0], operands[2],
+ operands[1]));
+ DONE;
+})
+
+(define_expand "vcondu<MSA_2:mode><IMSA:mode>"
+ [(set (match_operand:MSA_2 0 "register_operand")
+ (if_then_else:MSA_2
+ (match_operator 3 ""
+ [(match_operand:IMSA 4 "register_operand")
+ (match_operand:IMSA 5 "register_operand")])
+ (match_operand:MSA_2 1 "reg_or_m1_operand")
+ (match_operand:MSA_2 2 "reg_or_0_operand")))]
+ "ISA_HAS_MSA
+ && (GET_MODE_NUNITS (<MSA_2:MODE>mode)
+ == GET_MODE_NUNITS (<IMSA:MODE>mode))"
+{
+ mips_expand_vec_cond_expr (<MSA_2:MODE>mode,
+ <MSA_2:VIMODE>mode,
+ operands,
+ gen_and<MSA_2:mode_i>3,
+ gen_msa_nor_v_<MSA_2:msafmt>,
+ gen_ior<MSA_2:mode_i>3);
+ DONE;
+})
+
+(define_expand "vcond<MSA_2:mode><MSA:mode>"
+ [(set (match_operand:MSA_2 0 "register_operand")
+ (if_then_else:MSA_2
+ (match_operator 3 ""
+ [(match_operand:MSA 4 "register_operand")
+ (match_operand:MSA 5 "register_operand")])
+ (match_operand:MSA_2 1 "reg_or_m1_operand")
+ (match_operand:MSA_2 2 "reg_or_0_operand")))]
+ "ISA_HAS_MSA
+ && (GET_MODE_NUNITS (<MSA_2:MODE>mode)
+ == GET_MODE_NUNITS (<MSA:MODE>mode))"
+{
+ mips_expand_vec_cond_expr (<MSA_2:MODE>mode,
+ <MSA_2:VIMODE>mode,
+ operands,
+ gen_and<MSA_2:mode_i>3,
+ gen_msa_nor_v_<MSA_2:msafmt>,
+ gen_ior<MSA_2:mode_i>3);
+ DONE;
+})
+
+(define_insn "msa_insert_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "0")
+ (match_operand 2 "const_<indeximm>_operand" "")
+ (match_operand:<REGOR0> 3 "reg_or_0_operand" "dJ")]
+ UNSPEC_MSA_INSERT))]
+ "ISA_HAS_MSA"
+ "insert.<msafmt>\t%w0[%2],%z3"
+ [(set_attr "type" "mtc")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+; Similar to msa_insert_<msafmt> but with <UNITMODE>mode for operand 3.
+;; Note that insert.d and insert.d_f will be split later if !TARGET_64BIT.
+
+(define_insn "*msa_insert_<msafmt_f>"
+ [(set (match_operand:MSA_3 0 "register_operand" "=f")
+ (unspec:MSA_3 [(match_operand:MSA_3 1 "register_operand" "0")
+ (match_operand 2 "const_<indeximm>_operand" "")
+ (match_operand:<UNITMODE> 3 "reg_or_0_operand" "dJ")]
+ UNSPEC_MSA_INSERT))]
+ "ISA_HAS_MSA"
+ "insert.<msafmt>\t%w0[%2],%z3"
+ [(set_attr "type" "mtc")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_split
+ [(set (match_operand:SPLIT 0 "register_operand")
+ (unspec:SPLIT [(match_operand:SPLIT 1 "register_operand")
+ (match_operand 2 "const_0_or_1_operand")
+ (match_operand:<UNITMODE> 3 "<SPLIT:predicate>_operand")]
+ UNSPEC_MSA_INSERT))]
+ "reload_completed && TARGET_MSA && !TARGET_64BIT"
+ [(const_int 0)]
+{
+ mips_split_msa_insert_d (operands[0], operands[1], operands[2], operands[3]);
+ DONE;
+})
+
+;; Used by combine to convert a copy_s + insert into an insve
+(define_insn "msa_insve_s_insn_<msafmt>"
+ [(set (match_operand:INSVE 0 "register_operand" "=f")
+ (unspec:INSVE [(match_operand:INSVE 1 "register_operand" "0")
+ (match_operand 2 "const_<indeximm>_operand" "")
+ (unspec:<RES>
+ [(match_operand:INSVE 3 "register_operand" "f")
+ (match_operand 4 "const_0_operand" "")
+ ] UNSPEC_MSA_COPY_S)
+ ] UNSPEC_MSA_INSERT))]
+ "ISA_HAS_MSA"
+ "insve.<msafmt>\t%w0[%2],%w3[0]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+;; Used by combine to convert a copy_u + insert into an insve
+(define_insn "msa_insve_u_insn_<msafmt>"
+ [(set (match_operand:INSVE 0 "register_operand" "=f")
+ (unspec:INSVE [(match_operand:INSVE 1 "register_operand" "0")
+ (match_operand 2 "const_<indeximm>_operand" "")
+ (unspec:<RES>
+ [(match_operand:INSVE 3 "register_operand" "f")
+ (match_operand 4 "const_0_operand" "")
+ ] UNSPEC_MSA_COPY_U)
+ ] UNSPEC_MSA_INSERT))]
+ "ISA_HAS_MSA"
+ "insve.<msafmt>\t%w0[%2],%w3[0]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+;; Used by combine to convert a copy_s + insert with subreg into an insve
+(define_insn "*msa_insve_sext_insn_<msafmt>"
+ [(set (match_operand:INSVE_2 0 "register_operand" "=f")
+ (unspec:INSVE_2 [(match_operand:INSVE_2 1 "register_operand" "0")
+ (match_operand 2 "const_<indeximm>_operand" "")
+ (subreg:<UNITMODE>
+ (unspec:<RES>
+ [(match_operand:INSVE_2 3 "register_operand" "f")
+ (match_operand 4 "const_0_operand" "")
+ ] UNSPEC_MSA_COPY_S) 0)
+ ] UNSPEC_MSA_INSERT))]
+ "ISA_HAS_MSA"
+ "insve.<msafmt>\t%w0[%2],%w3[0]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+;; Used by combine to convert a copy_u + insert with subreg into an insve
+(define_insn "*msa_insve_zext_insn_<msafmt>"
+ [(set (match_operand:INSVE_2 0 "register_operand" "=f")
+ (unspec:INSVE_2 [(match_operand:INSVE_2 1 "register_operand" "0")
+ (match_operand 2 "const_<indeximm>_operand" "")
+ (subreg:<UNITMODE>
+ (unspec:<RES>
+ [(match_operand:INSVE_2 3 "register_operand" "f")
+ (match_operand 4 "const_0_operand" "")
+ ] UNSPEC_MSA_COPY_U) 0)
+ ] UNSPEC_MSA_INSERT))]
+ "ISA_HAS_MSA"
+ "insve.<msafmt>\t%w0[%2],%w3[%4]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_insve_<msafmt_f>"
+ [(set (match_operand:MSA 0 "register_operand" "=f")
+ (unspec:MSA [(match_operand:MSA 1 "register_operand" "0")
+ (match_operand 2 "const_<indeximm>_operand" "")
+ (match_operand:MSA 3 "register_operand" "f")]
+ UNSPEC_MSA_INSVE))]
+ "ISA_HAS_MSA"
+ "insve.<msafmt>\t%w0[%2],%w3[0]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+;; operand 3 is a scalar
+(define_insn "msa_insve_<msafmt>_f_s"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:FMSA [(match_operand:FMSA 1 "register_operand" "0")
+ (match_operand 2 "const_<indeximm>_operand" "")
+ (match_operand:<UNITMODE> 3 "register_operand" "f")]
+ UNSPEC_MSA_INSVE))]
+ "ISA_HAS_MSA"
+ "insve.<msafmt>\t%w0[%2],%w3[0]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+;; Note that copy_s.d and copy_s.d_f will be split later if !TARGET_64BIT.
+(define_insn "msa_copy_s_<msafmt_f>"
+ [(set (match_operand:<RES> 0 "register_operand" "=d")
+ (unspec:<RES> [(match_operand:MSA 1 "register_operand" "f")
+ (match_operand 2 "const_<indeximm>_operand" "")]
+ UNSPEC_MSA_COPY_S))]
+ "ISA_HAS_MSA"
+ "copy_s.<msafmt>\t%0,%w1[%2]"
+ [(set_attr "type" "mfc")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_store4")])
+
+(define_split
+ [(set (match_operand:<UNITMODE> 0 "register_operand")
+ (unspec:<UNITMODE> [(match_operand:SPLIT 1 "register_operand")
+ (match_operand 2 "const_0_or_1_operand")]
+ UNSPEC_MSA_COPY_S))]
+ "reload_completed && TARGET_MSA && !TARGET_64BIT"
+ [(const_int 0)]
+{
+ mips_split_msa_copy_d (operands[0], operands[1], operands[2], gen_msa_copy_s_w);
+ DONE;
+})
+
+;; Note that copy_u.d and copy_u.d_f will be split later if !TARGET_64BIT.
+(define_insn "msa_copy_u_<msafmt_f>"
+ [(set (match_operand:<RES> 0 "register_operand" "=d")
+ (unspec:<RES> [(match_operand:MSA 1 "register_operand" "f")
+ (match_operand 2 "const_<indeximm>_operand" "")]
+ UNSPEC_MSA_COPY_U))]
+ "ISA_HAS_MSA"
+ "copy_u.<msafmt>\t%0,%w1[%2]"
+ [(set_attr "type" "mfc")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_store4")])
+
+(define_split
+ [(set (match_operand:<UNITMODE> 0 "register_operand")
+ (unspec:<UNITMODE> [(match_operand:SPLIT 1 "register_operand")
+ (match_operand 2 "const_0_or_1_operand")]
+ UNSPEC_MSA_COPY_U))]
+ "reload_completed && TARGET_MSA && !TARGET_64BIT"
+ [(const_int 0)]
+{
+ mips_split_msa_copy_d (operands[0], operands[1], operands[2], gen_msa_copy_u_w);
+ DONE;
+})
+
+(define_expand "vec_perm<mode>"
+ [(match_operand:MSA 0 "register_operand")
+ (match_operand:MSA 1 "register_operand")
+ (match_operand:MSA 2 "register_operand")
+ (match_operand:<VIMODE> 3 "register_operand")]
+ "ISA_HAS_MSA"
+{
+ /* The optab semantics are that index 0 selects the first element
+ of operands[1] and the highest index selects the last element
+ of operands[2]. This is the oppossite order from "vshf.df wd,rs,wt"
+ where index 0 selects the first elemnt of wt and the highest index
+ selects the last element of ws. We therefore swap the operands here. */
+ emit_insn (gen_msa_vshf<mode> (operands[0], operands[3], operands[2],
+ operands[1]));
+ DONE;
+})
+
+(define_expand "neg<mode>2"
+ [(match_operand:IMSA 0 "register_operand")
+ (match_operand:IMSA 1 "register_operand")]
+ "ISA_HAS_MSA"
+{
+ rtx reg = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_msa_ldi<mode> (reg, const0_rtx));
+ emit_insn (gen_sub<mode>3 (operands[0], reg, operands[1]));
+ DONE;
+})
+
+(define_expand "neg<mode>2"
+ [(match_operand:FMSA 0 "register_operand")
+ (match_operand:FMSA 1 "register_operand")]
+ "ISA_HAS_MSA"
+{
+ rtx reg = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_msa_ld0<mode> (reg, const0_rtx));
+ emit_insn (gen_sub<mode>3 (operands[0], reg, operands[1]));
+ DONE;
+})
+
+(define_insn "msa_ldi<mode>_insn"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (match_operand:IMSA 1 "const_vector_same_simm10_operand" ""))]
+ "ISA_HAS_MSA"
+ {
+ operands[1] = CONST_VECTOR_ELT (operands[1], 0);
+ return "ldi.<msafmt>\t%w0,%d1";
+ }
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_expand "msa_ldi<mode>"
+ [(match_operand:IMSA 0 "register_operand")
+ (match_operand 1 "const_imm10_operand")]
+ "ISA_HAS_MSA"
+ {
+ unsigned n_elts = GET_MODE_NUNITS (<MODE>mode);
+ rtvec v = rtvec_alloc (n_elts);
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned int i;
+
+ if (<MODE>mode != V16QImode)
+ {
+ unsigned shift = HOST_BITS_PER_WIDE_INT - 10;
+ val = trunc_int_for_mode ((val << shift) >> shift, <UNITMODE>mode);
+ }
+ else
+ val = trunc_int_for_mode (val, <UNITMODE>mode);
+
+ for (i = 0; i < n_elts; i++)
+ RTVEC_ELT (v, i) = GEN_INT (val);
+ emit_insn (gen_msa_ldi<mode>_insn (operands[0],
+ gen_rtx_CONST_VECTOR (<MODE>mode, v)));
+ DONE;
+ })
+
+(define_insn "msa_ld0<mode>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:FMSA [(match_operand 1 "const_0_operand" "")]
+ UNSPEC_MSA_LD0))]
+ "ISA_HAS_MSA"
+ "ldi.<msafmt>\t%w0,%d1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_lsa"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "const_immlsa_operand" ""))
+ (match_operand:SI 3 "register_operand" "d")))]
+ "ISA_HAS_LSA"
+ "lsa\t%0,%1,%3,%y2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "msa_vshf<mode>"
+ [(set (match_operand:MSA 0 "register_operand" "=f")
+ (unspec:MSA [(match_operand:<VIMODE> 1 "register_operand" "0")
+ (match_operand:MSA 2 "register_operand" "f")
+ (match_operand:MSA 3 "register_operand" "f")]
+ UNSPEC_MSA_VSHF))]
+ "ISA_HAS_MSA"
+ "vshf.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+;; 128-bit integer/MSA vector registers moves
+;; Note that we prefer floating-point loads, stores, and moves by adding * to
+;; other register preferences.
+;; Note that we combine f and J, so that move_type for J is fmove and its
+;; instruction length can be 1.
+(define_insn "movti_msa"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=*d,*d,*d,*R,*d,*f,f,R,f,*m,*d,*m,*f")
+ (match_operand:TI 1 "move_operand" "*d,*i,*R,*d*J,*f,*d,R,f,fJ,*d*J,*m,*f,*m"))]
+ "ISA_HAS_MSA
+ && !TARGET_64BIT
+ && (register_operand (operands[0], TImode)
+ || reg_or_0_operand (operands[1], TImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,load,store,mfc,mtc,fpload,fpstore,fmove,store,load,fpstore,fpload")
+ (set_attr "mode" "TI")])
+
+;; Note that we prefer floating-point loads, stores, and moves by adding * to
+;; other register preferences.
+;; Note that we combine f and J, so that move_type for J is fmove and its
+;; instruction length can be 1.
+(define_insn "movti_msa_64bit"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=*d,*d,*d,*R,*a,*d,*d,*f,f,R,f,*m,*d,*m,*f")
+ (match_operand:TI 1 "move_operand" "*d,*i,*R,*d*J,*d*J,*a,*f,*d,R,f,fJ,*d*J,*m,*f,*m"))]
+ "ISA_HAS_MSA
+ && TARGET_64BIT
+ && (register_operand (operands[0], TImode)
+ || reg_or_0_operand (operands[1], TImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,load,store,mtlo,mflo,mfc,mtc,fpload,fpstore,fmove,store,load,fpstore,fpload")
+ (set_attr "mode" "TI")])
+
+(define_expand "mov<mode>"
+ [(set (match_operand:MODE128 0)
+ (match_operand:MODE128 1))]
+ "TARGET_64BIT || TARGET_MSA"
+{
+ if (mips_legitimize_move (<MODE>mode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_expand "movmisalign<mode>"
+ [(set (match_operand:MODE128 0)
+ (match_operand:MODE128 1))]
+ "TARGET_64BIT || TARGET_MSA"
+{
+ if (mips_legitimize_move (<MODE>mode, operands[0], operands[1]))
+ DONE;
+})
+
+;; 128bit MSA modes only in msa registers or memmory
+;; an exception is allowing MSA modes for GP registers for arguments
+;; and return values.
+(define_insn "mov<mode>_msa"
+ [(set (match_operand:MODE128 0 "nonimmediate_operand" "=f,f,R,!d,f")
+ (match_operand:MODE128 1 "move_operand" "fYG,R,f,f,!d"))]
+ "ISA_HAS_MSA
+ && (register_operand (operands[0], <MODE>mode)
+ || reg_or_0_operand (operands[1], <MODE>mode))"
+{ return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "fmove,fpload,fpstore,fmove,fmove")
+ (set_attr "mode" "TI")])
+
+(define_split
+ [(set (match_operand:TI 0 "nonimmediate_operand")
+ (match_operand:TI 1 "move_operand"))]
+ "reload_completed && TARGET_MSA
+ && mips_split_move_insn_p (operands[0], operands[1], insn)"
+ [(const_int 0)]
+{
+ /* Temporary sanity check */
+ gcc_assert (mips_split_128bit_move_p (operands[0], operands[1]));
+ gcc_assert (mips_split_move_insn_p (operands[0], operands[1], curr_insn));
+ mips_split_move_insn (operands[0], operands[1], curr_insn);
+ DONE;
+})
+
+(define_split
+ [(set (match_operand:MSA 0 "nonimmediate_operand")
+ (match_operand:MSA 1 "move_operand"))]
+ "reload_completed && TARGET_MSA
+ && mips_split_move_insn_p (operands[0], operands[1], insn)"
+ [(const_int 0)]
+{
+ /* Temporary sanity check */
+ gcc_assert (mips_split_128bit_move_p (operands[0], operands[1]));
+ mips_split_move_insn (operands[0], operands[1], curr_insn);
+ DONE;
+})
+
+;; Offset load
+(define_expand "msa_ld_<msafmt_f>"
+ [(match_operand:MSA 0 "register_operand")
+ (match_operand 1 "pmode_register_operand")
+ (match_operand 2 "aq10<msafmt>_operand")]
+ "ISA_HAS_MSA"
+{
+ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
+ INTVAL (operands[2]));
+ mips_emit_move (operands[0], gen_rtx_MEM (<MODE>mode, addr));
+ DONE;
+})
+
+;; Offset store
+(define_expand "msa_st_<msafmt_f>"
+ [(match_operand:MSA 0 "register_operand")
+ (match_operand 1 "pmode_register_operand")
+ (match_operand 2 "aq10<msafmt>_operand")]
+ "ISA_HAS_MSA"
+{
+ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
+ INTVAL (operands[2]));
+ mips_emit_move (gen_rtx_MEM (<MODE>mode, addr), operands[0]);
+ DONE;
+})
+
+;; Integer operations
+(define_insn "add<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f,f,f")
+ (plus:IMSA (match_operand:IMSA 1 "register_operand" "f,f,f")
+ (match_operand:IMSA 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))]
+ "ISA_HAS_MSA"
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ return "addv.<msafmt>\t%w0,%w1,%w2";
+ case 1:
+ {
+ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0));
+
+ operands[2] = GEN_INT (-val);
+ return "subvi.<msafmt>\t%w0,%w1,%d2";
+ }
+ case 2:
+ {
+ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0));
+
+ operands[2] = GEN_INT (val);
+ return "addvi.<msafmt>\t%w0,%w1,%d2";
+ }
+ default:
+ gcc_unreachable ();
+ }
+ }
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f,f,f")
+ (minus:IMSA (match_operand:IMSA 1 "register_operand" "f,f,f")
+ (match_operand:IMSA 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))]
+ "ISA_HAS_MSA"
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ return "subv.<msafmt>\t%w0,%w1,%w2";
+ case 1:
+ {
+ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0));
+
+ operands[2] = GEN_INT (-val);
+ return "addvi.<msafmt>\t%w0,%w1,%d2";
+ }
+ case 2:
+ {
+ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0));
+
+ operands[2] = GEN_INT (val);
+ return "subvi.<msafmt>\t%w0,%w1,%d2";
+ }
+ default:
+ gcc_unreachable ();
+ }
+ }
+ [(set_attr "alu_type" "sub")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (mult:IMSA (match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "mulv.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_maddv_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (plus:IMSA (mult:IMSA (match_operand:IMSA 2 "register_operand" "f")
+ (match_operand:IMSA 3 "register_operand" "f"))
+ (match_operand:IMSA 1 "register_operand" "0")))]
+ "ISA_HAS_MSA"
+ "maddv.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "imadd")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_msubv_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (minus:IMSA (match_operand:IMSA 1 "register_operand" "0")
+ (mult:IMSA (match_operand:IMSA 2 "register_operand" "f")
+ (match_operand:IMSA 3 "register_operand" "f"))))]
+ "ISA_HAS_MSA"
+ "msubv.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "imadd")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "div<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (div:IMSA (match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ { return mips_msa_output_division ("div_s.<msafmt>\t%w0,%w1,%w2", operands); }
+ [(set_attr "type" "idiv3")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_div")])
+
+(define_insn "udiv<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (udiv:IMSA (match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ { return mips_msa_output_division ("div_u.<msafmt>\t%w0,%w1,%w2", operands); }
+ [(set_attr "type" "idiv3")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_div")])
+
+(define_insn "mod<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (mod:IMSA (match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ { return mips_msa_output_division ("mod_s.<msafmt>\t%w0,%w1,%w2", operands); }
+ [(set_attr "type" "idiv3")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_div")])
+
+(define_insn "umod<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (umod:IMSA (match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ { return mips_msa_output_division ("mod_u.<msafmt>\t%w0,%w1,%w2", operands); }
+ [(set_attr "type" "idiv3")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_div")])
+
+(define_insn "xorv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=f,f")
+ (xor:V16QI (match_operand:V16QI 1 "register_operand" "f,f")
+ (match_operand:V16QI 2 "reg_or_vector_same_byte_operand" "f,Ubv8")))]
+ "ISA_HAS_MSA"
+ {
+ if (which_alternative == 1)
+ {
+ operands[2] = CONST_VECTOR_ELT (operands[2], 0);
+ return "xori.b\t%w0,%w1,%B2";
+ }
+ else
+ return "xor.v\t%w0,%w1,%w2";
+ }
+ [(set_attr "alu_type" "xor")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "xor<mode>3"
+ [(set (match_operand:IMSA_X 0 "register_operand" "=f,f")
+ (xor:IMSA_X (match_operand:IMSA_X 1 "register_operand" "f,f")
+ (match_operand:IMSA_X 2 "reg_or_vector_same_<mode>_set_operand" "f,YC")))]
+ "ISA_HAS_MSA"
+ {
+ if (which_alternative == 1)
+ {
+ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0));
+ int vlog2 = exact_log2 (val);
+ gcc_assert (vlog2 != -1);
+ operands[2] = GEN_INT (vlog2);
+ return "bnegi.%v0\t%w0,%w1,%2";
+ }
+ else
+ return "xor.v\t%w0,%w1,%w2";
+ }
+ [(set_attr "alu_type" "xor")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "iorv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=f,f")
+ (ior:V16QI (match_operand:V16QI 1 "register_operand" "f,f")
+ (match_operand:V16QI 2 "reg_or_vector_same_byte_operand" "f,Ubv8")))]
+ "ISA_HAS_MSA"
+ {
+ if (which_alternative == 1)
+ {
+ operands[2] = CONST_VECTOR_ELT (operands[2], 0);
+ return "ori.b\t%w0,%w1,%B2";
+ }
+ else
+ return "or.v\t%w0,%w1,%w2";
+ }
+ [(set_attr "alu_type" "or")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "ior<mode>3"
+ [(set (match_operand:IMSA_X 0 "register_operand" "=f,f")
+ (ior:IMSA_X (match_operand:IMSA_X 1 "register_operand" "f,f")
+ (match_operand:IMSA_X 2 "reg_or_vector_same_<mode>_set_operand" "f,YC")))]
+ "ISA_HAS_MSA"
+ {
+ if (which_alternative == 1)
+ {
+ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0));
+ int vlog2 = exact_log2 (val);
+ gcc_assert (vlog2 != -1);
+ operands[2] = GEN_INT (vlog2);
+ return "bseti.%v0\t%w0,%w1,%2";
+ }
+ else
+ return "or.v\t%w0,%w1,%w2";
+ }
+ [(set_attr "alu_type" "or")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "andv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=f,f")
+ (and:V16QI (match_operand:V16QI 1 "register_operand" "f,f")
+ (match_operand:V16QI 2 "reg_or_vector_same_byte_operand" "f,Ubv8")))]
+ "ISA_HAS_MSA"
+ {
+ if (which_alternative == 1)
+ {
+ operands[2] = CONST_VECTOR_ELT (operands[2], 0);
+ return "andi.b\t%w0,%w0,%B2";
+ }
+ else
+ return "and.v\t%w0,%w1,%w2";
+ }
+ [(set_attr "alu_type" "and")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "and<mode>3"
+ [(set (match_operand:IMSA_X 0 "register_operand" "=f,f")
+ (and:IMSA_X (match_operand:IMSA_X 1 "register_operand" "f,f")
+ (match_operand:IMSA_X 2 "reg_or_vector_same_<mode>_clr_operand" "f,YZ")))]
+ "ISA_HAS_MSA"
+ {
+ if (which_alternative == 1)
+ {
+ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0));
+ int vlog2 = exact_log2 (~val);
+ gcc_assert (vlog2 != -1);
+ operands[2] = GEN_INT (vlog2);
+ return "bclri.%v0\t%w0,%w1,%2";
+ }
+ else
+ return "and.v\t%w0,%w1,%w2";
+ }
+ [(set_attr "alu_type" "and")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (not:IMSA (match_operand:IMSA 1 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "nor.v\t%w0,%w1,%w1"
+ [(set_attr "alu_type" "nor")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "vlshr<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f,f")
+ (lshiftrt:<MODE> (match_operand:<MODE> 1 "register_operand" "f,f")
+ (match_operand:<MODE> 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))]
+ "ISA_HAS_MSA"
+{
+ if (which_alternative == 0)
+ return "srl.<msafmt>\t%w0,%w1,%w2";
+
+ operands[2] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[2], 0))
+ & <shift_mask>);
+ return "srli.<msafmt>\t%w0,%w1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "vashr<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f,f")
+ (ashiftrt:IMSA (match_operand:IMSA 1 "register_operand" "f,f")
+ (match_operand:IMSA 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))]
+ "ISA_HAS_MSA"
+{
+ if (which_alternative == 0)
+ return "sra.<msafmt>\t%w0,%w1,%w2";
+
+ operands[2] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[2], 0))
+ & <shift_mask>);
+ return "srai.<msafmt>\t%w0,%w1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "vashl<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f,f")
+ (ashift:IMSA (match_operand:IMSA 1 "register_operand" "f,f")
+ (match_operand:IMSA 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))]
+ "ISA_HAS_MSA"
+{
+ if (which_alternative == 0)
+ return "sll.<msafmt>\t%w0,%w1,%w2";
+
+ operands[2] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[2], 0))
+ & <shift_mask>);
+ return "slli.<msafmt>\t%w0,%w1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+;; Floating-point operations
+(define_insn "add<mode>3"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (plus:FMSA (match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "fadd.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (minus:FMSA (match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "fsub.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (mult:FMSA (match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "fmul.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fmul")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float5")])
+
+(define_insn "div<mode>3"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (div:FMSA (match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "fdiv.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fdiv")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_fdiv")])
+
+(define_insn "msa_fmadd_<msafmt>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (plus:FMSA (mult:FMSA (match_operand:FMSA 2 "register_operand" "f")
+ (match_operand:FMSA 3 "register_operand" "f"))
+ (match_operand:FMSA 1 "register_operand" "0")))]
+ "ISA_HAS_MSA"
+ "fmadd.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float8")])
+
+(define_insn "msa_fmsub_<msafmt>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (minus:FMSA (match_operand:FMSA 1 "register_operand" "0")
+ (mult:FMSA (match_operand:FMSA 2 "register_operand" "f")
+ (match_operand:FMSA 3 "register_operand" "f"))))]
+ "ISA_HAS_MSA"
+ "fmsub.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float8")])
+
+(define_insn "sqrt<mode>2"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (sqrt:FMSA (match_operand:FMSA 1 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "fsqrt.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fsqrt")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_fdiv")])
+
+;; Built-in functions
+(define_insn "msa_add_a_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (plus:IMSA (abs:IMSA (match_operand:IMSA 1 "register_operand" "f"))
+ (abs:IMSA (match_operand:IMSA 2 "register_operand" "f"))))]
+ "ISA_HAS_MSA"
+ "add_a.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_adds_a_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (ss_plus:IMSA (abs:IMSA (match_operand:IMSA 1 "register_operand" "f"))
+ (abs:IMSA (match_operand:IMSA 2 "register_operand" "f"))))]
+ "ISA_HAS_MSA"
+ "adds_a.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "ssadd<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (ss_plus:IMSA (match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "adds_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "usadd<mode>3"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (us_plus:IMSA (match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "adds_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_addvi_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_uimm5_operand" "")]
+ UNSPEC_MSA_ADDVI))]
+ "ISA_HAS_MSA"
+ "addvi.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_andi_b"
+ [(set (match_operand:V16QI 0 "register_operand" "=f")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")
+ (match_operand 2 "const_uimm8_operand" "")]
+ UNSPEC_MSA_ANDI_B))]
+ "ISA_HAS_MSA"
+ "andi.b\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_asub_s_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_ASUB_S))]
+ "ISA_HAS_MSA"
+ "asub_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_asub_u_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_ASUB_U))]
+ "ISA_HAS_MSA"
+ "asub_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_ave_s_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_AVE_S))]
+ "ISA_HAS_MSA"
+ "ave_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_ave_u_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_AVE_U))]
+ "ISA_HAS_MSA"
+ "ave_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_aver_s_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_AVER_S))]
+ "ISA_HAS_MSA"
+ "aver_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_aver_u_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_AVER_U))]
+ "ISA_HAS_MSA"
+ "aver_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_bclr_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_BCLR))]
+ "ISA_HAS_MSA"
+ "bclr.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_bclri_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_BCLRI))]
+ "ISA_HAS_MSA"
+ "bclri.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_binsl_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "0")
+ (match_operand:IMSA 2 "register_operand" "f")
+ (match_operand:IMSA 3 "register_operand" "f")]
+ UNSPEC_MSA_BINSL))]
+ "ISA_HAS_MSA"
+ "binsl.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_binsli_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "0")
+ (match_operand:IMSA 2 "register_operand" "f")
+ (match_operand 3 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_BINSLI))]
+ "ISA_HAS_MSA"
+ "binsli.<msafmt>\t%w0,%w2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_binsr_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "0")
+ (match_operand:IMSA 2 "register_operand" "f")
+ (match_operand:IMSA 3 "register_operand" "f")]
+ UNSPEC_MSA_BINSR))]
+ "ISA_HAS_MSA"
+ "binsr.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_binsri_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "0")
+ (match_operand:IMSA 2 "register_operand" "f")
+ (match_operand 3 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_BINSRI))]
+ "ISA_HAS_MSA"
+ "binsri.<msafmt>\t%w0,%w2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_bmnz_v_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "0")
+ (match_operand:IMSA 2 "register_operand" "f")
+ (match_operand:IMSA 3 "register_operand" "f")]
+ UNSPEC_MSA_BMNZ_V))]
+ "ISA_HAS_MSA"
+ "bmnz.v\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_bmnzi_b"
+ [(set (match_operand:V16QI 0 "register_operand" "=f")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "register_operand" "f")
+ (match_operand 3 "const_uimm8_operand" "")]
+ UNSPEC_MSA_BMNZI_B))]
+ "ISA_HAS_MSA"
+ "bmnzi.b\t%w0,%w2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_bmz_v_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "0")
+ (match_operand:IMSA 2 "register_operand" "f")
+ (match_operand:IMSA 3 "register_operand" "f")]
+ UNSPEC_MSA_BMZ_V))]
+ "ISA_HAS_MSA"
+ "bmz.v\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_bmzi_b"
+ [(set (match_operand:V16QI 0 "register_operand" "=f")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "register_operand" "f")
+ (match_operand 3 "const_uimm8_operand" "")]
+ UNSPEC_MSA_BMZI_B))]
+ "ISA_HAS_MSA"
+ "bmzi.b\t%w0,%w2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_bneg_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_BNEG))]
+ "ISA_HAS_MSA"
+ "bneg.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_bnegi_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_msa_branch_operand" "")]
+ UNSPEC_MSA_BNEGI))]
+ "ISA_HAS_MSA"
+ "bnegi.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_bsel_v_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "0")
+ (match_operand:IMSA 2 "register_operand" "f")
+ (match_operand:IMSA 3 "register_operand" "f")]
+ UNSPEC_MSA_BSEL_V))]
+ "ISA_HAS_MSA"
+ "bsel.v\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")])
+
+(define_insn "msa_bseli_b"
+ [(set (match_operand:V16QI 0 "register_operand" "=f")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "register_operand" "f")
+ (match_operand 3 "const_uimm8_operand" "")]
+ UNSPEC_MSA_BSELI_B))]
+ "ISA_HAS_MSA"
+ "bseli.b\t%w0,%w2,%3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_bset_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_BSET))]
+ "ISA_HAS_MSA"
+ "bset.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_bseti_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_BSETI))]
+ "ISA_HAS_MSA"
+ "bseti.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_code_iterator ICC [eq le leu lt ltu])
+
+(define_code_attr icc
+ [(eq "eq")
+ (le "le_s")
+ (leu "le_u")
+ (lt "lt_s")
+ (ltu "lt_u")])
+
+(define_code_attr icci
+ [(eq "eqi")
+ (le "lei_s")
+ (leu "lei_u")
+ (lt "lti_s")
+ (ltu "lti_u")])
+
+(define_code_attr cmpi
+ [(eq "s")
+ (le "s")
+ (leu "u")
+ (lt "s")
+ (ltu "u")])
+
+(define_insn "msa_c<ICC:icc>_<IMSA:msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (ICC:IMSA (match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "c<ICC:icc>.<IMSA:msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_c<ICC:icci>i_<IMSA:msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (ICC:IMSA (match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "const_vector_same_cmp<ICC:cmpi>imm4_operand" "")))]
+ "ISA_HAS_MSA"
+ {
+ operands[2] = CONST_VECTOR_ELT (operands[2], 0);
+ return "c<ICC:icci>.<IMSA:msafmt>\t%w0,%w1,%d2";
+ }
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_c<ICC:icci>_<IMSA:msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(ICC:IMSA (match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_imm5_operand" ""))]
+ UNSPEC_MSA_CMPI))]
+ "ISA_HAS_MSA"
+ "c<ICC:icci>.<IMSA:msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_dotp_s_<msafmt>"
+ [(set (match_operand:IDOTP128 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<VHALFMODE> 1 "register_operand" "f")
+ (match_operand:<VHALFMODE> 2 "register_operand" "f")]
+ UNSPEC_MSA_DOTP_S))]
+ "ISA_HAS_MSA"
+ "dotp_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_dotp_u_<msafmt>"
+ [(set (match_operand:IDOTP128 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<VHALFMODE> 1 "register_operand" "f")
+ (match_operand:<VHALFMODE> 2 "register_operand" "f")]
+ UNSPEC_MSA_DOTP_U))]
+ "ISA_HAS_MSA"
+ "dotp_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_dpadd_s_<msafmt>"
+ [(set (match_operand:IDOTP128 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<MODE> 1 "register_operand" "0")
+ (match_operand:<VHALFMODE> 2 "register_operand" "f")
+ (match_operand:<VHALFMODE> 3 "register_operand" "f")]
+ UNSPEC_MSA_DPADD_S))]
+ "ISA_HAS_MSA"
+ "dpadd_s.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_dpadd_u_<msafmt>"
+ [(set (match_operand:IDOTP128 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<MODE> 1 "register_operand" "0")
+ (match_operand:<VHALFMODE> 2 "register_operand" "f")
+ (match_operand:<VHALFMODE> 3 "register_operand" "f")]
+ UNSPEC_MSA_DPADD_U))]
+ "ISA_HAS_MSA"
+ "dpadd_u.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_dpsub_s_<msafmt>"
+ [(set (match_operand:IDOTP128 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<MODE> 1 "register_operand" "0")
+ (match_operand:<VHALFMODE> 2 "register_operand" "f")
+ (match_operand:<VHALFMODE> 3 "register_operand" "f")]
+ UNSPEC_MSA_DPSUB_S))]
+ "ISA_HAS_MSA"
+ "dpsub_s.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_dpsub_u_<msafmt>"
+ [(set (match_operand:IDOTP128 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<MODE> 1 "register_operand" "0")
+ (match_operand:<VHALFMODE> 2 "register_operand" "f")
+ (match_operand:<VHALFMODE> 3 "register_operand" "f")]
+ UNSPEC_MSA_DPSUB_U))]
+ "ISA_HAS_MSA"
+ "dpsub_u.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_fclass_<msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_FCLASS))]
+ "ISA_HAS_MSA"
+ "fclass.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float2_l")])
+
+(define_insn "msa_fcaf_<msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FCAF))]
+ "ISA_HAS_MSA"
+ "fcaf.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fcune_<FMSA:msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FCUNE))]
+ "ISA_HAS_MSA"
+ "fcune.<FMSA:msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_code_iterator FCC [unordered ordered eq ne le lt uneq unle unlt])
+
+(define_code_attr fcc
+ [(unordered "fcun")
+ (ordered "fcor")
+ (eq "fceq")
+ (ne "fcne")
+ (uneq "fcueq")
+ (unle "fcule")
+ (unlt "fcult")
+ (le "fcle")
+ (lt "fclt")])
+
+(define_insn "msa_<FCC:fcc>_<FMSA:msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (FCC:<VIMODE> (match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "<FCC:fcc>.<FMSA:msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fsaf_<FMSA:msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSAF))]
+ "ISA_HAS_MSA"
+ "fsaf.<FMSA:msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fsor_<FMSA:msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSOR))]
+ "ISA_HAS_MSA"
+ "fsor.<FMSA:msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fsun_<FMSA:msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSUN))]
+ "ISA_HAS_MSA"
+ "fsun.<FMSA:msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fsune_<msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSUNE))]
+ "ISA_HAS_MSA"
+ "fsune.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fsueq_<msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSUEQ))]
+ "ISA_HAS_MSA"
+ "fsueq.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fseq_<msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSEQ))]
+ "ISA_HAS_MSA"
+ "fseq.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fsne_<msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSNE))]
+ "ISA_HAS_MSA"
+ "fsne.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fslt_<msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSLT))]
+ "ISA_HAS_MSA"
+ "fslt.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fsult_<msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSULT))]
+ "ISA_HAS_MSA"
+ "fsult.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fsle_<msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSLE))]
+ "ISA_HAS_MSA"
+ "fsle.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fsule_<msafmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (unspec:<VIMODE> [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:FMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_FSULE))]
+ "ISA_HAS_MSA"
+ "fsule.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_cmp")])
+
+(define_insn "msa_fexp2_<msafmt>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:FMSA [(match_operand:FMSA 1 "register_operand" "f")
+ (match_operand:<VIMODE> 2 "register_operand" "f")]
+ UNSPEC_MSA_FEXP2))]
+ "ISA_HAS_MSA"
+ "fexp2.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fmul")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float2")])
+
+(define_mode_attr FINT
+ [(V4SF "V4SI")
+ (V2DF "V2DI")])
+
+(define_mode_attr fint
+ [(V4SF "v4si")
+ (V2DF "v2di")])
+
+(define_mode_attr FINTCNV
+ [(V4SF "I2S")
+ (V2DF "I2D")])
+
+(define_mode_attr FINTCNV_2
+ [(V4SF "S2I")
+ (V2DF "D2I")])
+
+(define_insn "float<FMSA:mode><fint>2"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (float:<MODE> (match_operand:<FINT> 1 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "ffint_s.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "<FINTCNV>")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "floatuns<FMSA:mode><fint>2"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unsigned_float:<MODE> (match_operand:<FINT> 1 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "ffint_u.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "<FINTCNV>")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_mode_attr FFQ
+ [(V4SF "V8HI")
+ (V2DF "V4SI")])
+
+(define_insn "msa_ffql_<msafmt>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<FINT> 1 "register_operand" "f")]
+ UNSPEC_MSA_FFQL))]
+ "ISA_HAS_MSA"
+ "ffql.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "<FINTCNV>")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_ffqr_<msafmt>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<FINT> 1 "register_operand" "f")]
+ UNSPEC_MSA_FFQR))]
+ "ISA_HAS_MSA"
+ "ffqr.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "<FINTCNV>")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_fill_<msafmt_f>"
+ [(set (match_operand:MSA 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<REGOR0> 1 "reg_or_0_operand" "dJ")]
+ UNSPEC_MSA_FILL))]
+ "ISA_HAS_MSA"
+ "fill.<msafmt>\t%w0,%z1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+;; Note that fill.d and fill_d_f will be split later if !TARGET_64BIT.
+
+(define_split
+ [(set (match_operand:V2DI 0 "register_operand")
+ (unspec:V2DI [(match_operand:DI 1 "reg_or_0_operand")]
+ UNSPEC_MSA_FILL))]
+ "reload_completed && TARGET_MSA && !TARGET_64BIT"
+ [(const_int 0)]
+{
+ mips_split_msa_fill_d (operands[0], operands[1]);
+ DONE;
+})
+
+(define_split
+ [(set (match_operand:V2DF 0 "register_operand")
+ (unspec:V2DF [(match_operand:DF 1 "register_operand")]
+ UNSPEC_MSA_FILL))]
+ "reload_completed && TARGET_MSA && !TARGET_64BIT"
+ [(const_int 0)]
+{
+ mips_split_msa_fill_d (operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "msa_flog2_<msafmt>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:FMSA [(match_operand:FMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_FLOG2))]
+ "ISA_HAS_MSA"
+ "flog2.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fmul")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float2_l")])
+
+;;UNSPEC_MSA_FMAX
+(define_insn "smax<mode>3"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (smax:<MODE> (match_operand:<MODE> 1 "register_operand" "f")
+ (match_operand:<MODE> 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "fmax.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float2")])
+
+;;UNSPEC_MSA_FMAX_A
+(define_insn "umax<mode>3"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (umax:<MODE> (match_operand:<MODE> 1 "register_operand" "f")
+ (match_operand:<MODE> 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "fmax_a.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float2")])
+
+;;UNSPEC_MSA_FMIN
+(define_insn "smin<mode>3"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (smin:<MODE> (match_operand:<MODE> 1 "register_operand" "f")
+ (match_operand:<MODE> 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "fmin.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float2")])
+
+;;UNSPEC_MSA_FMIN_A
+(define_insn "umin<mode>3"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (umin:<MODE> (match_operand:<MODE> 1 "register_operand" "f")
+ (match_operand:<MODE> 2 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "fmin_a.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float2")])
+
+(define_insn "msa_frcp_<msafmt>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:FMSA [(match_operand:FMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_FRCP))]
+ "ISA_HAS_MSA"
+ "frcp.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "frdiv")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "msa_frint_<msafmt>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:FMSA [(match_operand:FMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_FRINT))]
+ "ISA_HAS_MSA"
+ "frint.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fmul")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_frsqrt_<msafmt>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:FMSA [(match_operand:FMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_FRSQRT))]
+ "ISA_HAS_MSA"
+ "frsqrt.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "frsqrt")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "msa_ftint_s_<msafmt>"
+ [(set (match_operand:<FINT> 0 "register_operand" "=f")
+ (unspec:<FINT> [(match_operand:FMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_FTINT_S))]
+ "ISA_HAS_MSA"
+ "ftint_s.<>msafmt>\t%w0,%w1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "<FINTCNV_2>")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_ftint_u_<msafmt>"
+ [(set (match_operand:<FINT> 0 "register_operand" "=f")
+ (unspec:<FINT> [(match_operand:FMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_FTINT_U))]
+ "ISA_HAS_MSA"
+ "ftint_u.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "<FINTCNV_2>")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_ftrunc_s_<msafmt>"
+ [(set (match_operand:<FINT> 0 "register_operand" "=f")
+ (unspec:<FINT> [(match_operand:FMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_FTRUNC_S))]
+ "ISA_HAS_MSA"
+ "ftrunc_s.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "<FINTCNV_2>")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_ftrunc_u_<msafmt>"
+ [(set (match_operand:<FINT> 0 "register_operand" "=f")
+ (unspec:<FINT> [(match_operand:FMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_FTRUNC_U))]
+ "ISA_HAS_MSA"
+ "ftrunc_u.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "<FINTCNV_2>")
+ (set_attr "mode" "<UNITMODE>")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_ftq_h"
+ [(set (match_operand:V8HI 0 "register_operand" "=f")
+ (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "f")
+ (match_operand:V4SF 2 "register_operand" "f")]
+ UNSPEC_MSA_FTQ))]
+ "ISA_HAS_MSA"
+ "ftq.h\t%w0,%w1,%w2"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "S2I")
+ (set_attr "mode" "SF")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_ftq_w"
+ [(set (match_operand:V4SI 0 "register_operand" "=f")
+ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f")
+ (match_operand:V2DF 2 "register_operand" "f")]
+ UNSPEC_MSA_FTQ))]
+ "ISA_HAS_MSA"
+ "ftq.w\t%w0,%w1,%w2"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "D2I")
+ (set_attr "mode" "DF")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_mode_iterator IZMODE [V8HI V4SI V2DI])
+(define_mode_attr IZDOUBLE
+ [(V8HI "V16QI")
+ (V4SI "V8HI")
+ (V2DI "V4SI")])
+
+(define_insn "msa_hadd_s_<msafmt>"
+ [(set (match_operand:IZMODE 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<IZDOUBLE> 1 "register_operand" "f")
+ (match_operand:<IZDOUBLE> 2 "register_operand" "f")]
+ UNSPEC_MSA_HADD_S))]
+ "ISA_HAS_MSA"
+ "hadd_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_hadd_u_<msafmt>"
+ [(set (match_operand:IZMODE 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<IZDOUBLE> 1 "register_operand" "f")
+ (match_operand:<IZDOUBLE> 2 "register_operand" "f")]
+ UNSPEC_MSA_HADD_U))]
+ "ISA_HAS_MSA"
+ "hadd_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_hsub_s_<msafmt>"
+ [(set (match_operand:IZMODE 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<IZDOUBLE> 1 "register_operand" "f")
+ (match_operand:<IZDOUBLE> 2 "register_operand" "f")]
+ UNSPEC_MSA_HSUB_S))]
+ "ISA_HAS_MSA"
+ "hsub_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_hsub_u_<msafmt>"
+ [(set (match_operand:IZMODE 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<IZDOUBLE> 1 "register_operand" "f")
+ (match_operand:<IZDOUBLE> 2 "register_operand" "f")]
+ UNSPEC_MSA_HSUB_U))]
+ "ISA_HAS_MSA"
+ "hsub_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_ilvev_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_ILVEV))]
+ "ISA_HAS_MSA"
+ "ilvev.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_ilvl_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_ILVL))]
+ "ISA_HAS_MSA"
+ "ilvl.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_ilvod_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_ILVOD))]
+ "ISA_HAS_MSA"
+ "ilvod.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_ilvr_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_ILVR))]
+ "ISA_HAS_MSA"
+ "ilvr.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_madd_q_<msafmt>"
+ [(set (match_operand:QMSA 0 "register_operand" "=f")
+ (unspec:QMSA [(match_operand:QMSA 1 "register_operand" "0")
+ (match_operand:QMSA 2 "register_operand" "f")
+ (match_operand:QMSA 3 "register_operand" "f")]
+ UNSPEC_MSA_MADD_Q))]
+ "ISA_HAS_MSA"
+ "madd_q.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_maddr_q_<msafmt>"
+ [(set (match_operand:QMSA 0 "register_operand" "=f")
+ (unspec:QMSA [(match_operand:QMSA 1 "register_operand" "0")
+ (match_operand:QMSA 2 "register_operand" "f")
+ (match_operand:QMSA 3 "register_operand" "f")]
+ UNSPEC_MSA_MADDR_Q))]
+ "ISA_HAS_MSA"
+ "maddr_q.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_max_a_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_MAX_A))]
+ "ISA_HAS_MSA"
+ "max_a.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_max_s_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_MAX_S))]
+ "ISA_HAS_MSA"
+ "max_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_max_u_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_MAX_U))]
+ "ISA_HAS_MSA"
+ "max_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_maxi_s_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_imm5_operand" "")]
+ UNSPEC_MSA_MAXI_S))]
+ "ISA_HAS_MSA"
+ "maxi_s.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_maxi_u_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_uimm5_operand" "")]
+ UNSPEC_MSA_MAXI_U))]
+ "ISA_HAS_MSA"
+ "maxi_u.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_min_a_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_MIN_A))]
+ "ISA_HAS_MSA"
+ "min_a.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_min_s_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_MIN_S))]
+ "ISA_HAS_MSA"
+ "min_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_min_u_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_MIN_U))]
+ "ISA_HAS_MSA"
+ "min_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_mini_s_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_imm5_operand" "")]
+ UNSPEC_MSA_MINI_S))]
+ "ISA_HAS_MSA"
+ "mini_s.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_mini_u_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_uimm5_operand" "")]
+ UNSPEC_MSA_MINI_U))]
+ "ISA_HAS_MSA"
+ "mini_u.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_msub_q_<msafmt>"
+ [(set (match_operand:QMSA 0 "register_operand" "=f")
+ (unspec:QMSA [(match_operand:QMSA 1 "register_operand" "0")
+ (match_operand:QMSA 2 "register_operand" "f")
+ (match_operand:QMSA 3 "register_operand" "f")]
+ UNSPEC_MSA_MSUB_Q))]
+ "ISA_HAS_MSA"
+ "msub_q.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_msubr_q_<msafmt>"
+ [(set (match_operand:QMSA 0 "register_operand" "=f")
+ (unspec:QMSA [(match_operand:QMSA 1 "register_operand" "0")
+ (match_operand:QMSA 2 "register_operand" "f")
+ (match_operand:QMSA 3 "register_operand" "f")]
+ UNSPEC_MSA_MSUBR_Q))]
+ "ISA_HAS_MSA"
+ "msubr_q.<msafmt>\t%w0,%w2,%w3"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_mul_q_<msafmt>"
+ [(set (match_operand:QMSA 0 "register_operand" "=f")
+ (unspec:QMSA [(match_operand:QMSA 1 "register_operand" "f")
+ (match_operand:QMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_MUL_Q))]
+ "ISA_HAS_MSA"
+ "mul_q.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_mulr_q_<msafmt>"
+ [(set (match_operand:QMSA 0 "register_operand" "=f")
+ (unspec:QMSA [(match_operand:QMSA 1 "register_operand" "f")
+ (match_operand:QMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_MULR_Q))]
+ "ISA_HAS_MSA"
+ "mulr_q.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_mult")])
+
+(define_insn "msa_nloc_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_NLOC))]
+ "ISA_HAS_MSA"
+ "nloc.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "clz<mode>2"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (clz:IMSA (match_operand:IMSA 1 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "nlzc.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_nor_v_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (and:IMSA (not:IMSA (match_operand:IMSA 1 "register_operand" "f"))
+ (not:IMSA (match_operand:IMSA 2 "register_operand" "f"))))]
+ "ISA_HAS_MSA"
+ "nor.v\t%w0,%w1,%w2"
+ [(set_attr "alu_type" "nor")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_nori_b"
+ [(set (match_operand:V16QI 0 "register_operand" "=f")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")
+ (match_operand 2 "const_uimm8_operand" "")]
+ UNSPEC_MSA_NORI_B))]
+ "ISA_HAS_MSA"
+ "nori.b\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_ori_b"
+ [(set (match_operand:V16QI 0 "register_operand" "=f")
+ (ior:V16QI (match_operand:V16QI 1 "register_operand" "f")
+ (match_operand 2 "const_uimm8_operand" "")))]
+ "ISA_HAS_MSA"
+ "ori.b\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_pckev_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_PCKEV))]
+ "ISA_HAS_MSA"
+ "pckev.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_pckod_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_PCKOD))]
+ "ISA_HAS_MSA"
+ "pckod.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "popcount<mode>2"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (popcount:IMSA (match_operand:IMSA 1 "register_operand" "f")))]
+ "ISA_HAS_MSA"
+ "pcnt.<msafmt>\t%w0,%w1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_sat_s_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_SAT_S))]
+ "ISA_HAS_MSA"
+ "sat_s.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic3")])
+
+(define_insn "msa_sat_u_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_SAT_U))]
+ "ISA_HAS_MSA"
+ "sat_u.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic3")])
+
+(define_insn "msa_shf_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_uimm8_operand" "")]
+ UNSPEC_MSA_SHF))]
+ "ISA_HAS_MSA"
+ "shf.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_shf_w_f"
+ [(set (match_operand:V4SF 0 "register_operand" "=f")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")
+ (match_operand 2 "const_uimm8_operand" "")]
+ UNSPEC_MSA_SHF))]
+ "ISA_HAS_MSA"
+ "shf.w\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_slli_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_SLLI))]
+ "ISA_HAS_MSA"
+ "slli.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_srai_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA[(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_SRAI))]
+ "ISA_HAS_MSA"
+ "srai.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_srar_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_SRAR))]
+ "ISA_HAS_MSA"
+ "srar.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_srari_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_SRARI))]
+ "ISA_HAS_MSA"
+ "srari.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_srli_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_SRLI))]
+ "ISA_HAS_MSA"
+ "srli.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_srlr_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_SRLR))]
+ "ISA_HAS_MSA"
+ "srlr.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_srlri_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_<bitimm>_operand" "")]
+ UNSPEC_MSA_SRLRI))]
+ "ISA_HAS_MSA"
+ "srlri.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_subs_s_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_SUBS_S))]
+ "ISA_HAS_MSA"
+ "subs_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_subs_u_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_SUBS_U))]
+ "ISA_HAS_MSA"
+ "subs_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_subsuu_s_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_SUBSUU_S))]
+ "ISA_HAS_MSA"
+ "subsuu_s.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_subsus_u_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand:IMSA 2 "register_operand" "f")]
+ UNSPEC_MSA_SUBSUS_U))]
+ "ISA_HAS_MSA"
+ "subsus_u.<msafmt>\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_subvi_<msafmt>"
+ [(set (match_operand:IMSA 0 "register_operand" "=f")
+ (unspec:IMSA [(match_operand:IMSA 1 "register_operand" "f")
+ (match_operand 2 "const_uimm5_operand" "")]
+ UNSPEC_MSA_SUBVI))]
+ "ISA_HAS_MSA"
+ "subvi.<msafmt>\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_int_add")])
+
+(define_insn "msa_xori_b"
+ [(set (match_operand:V16QI 0 "register_operand" "=f")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")
+ (match_operand 2 "const_uimm8_operand" "")]
+ UNSPEC_MSA_XORI_B))]
+ "ISA_HAS_MSA"
+ "xori.b\t%w0,%w1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_sld_<msafmt_f>"
+ [(set (match_operand:MSA 0 "register_operand" "=f")
+ (unspec:MSA [(match_operand:MSA 1 "register_operand" "0")
+ (match_operand:MSA 2 "register_operand" "f")
+ (match_operand:SI 3 "reg_or_0_operand" "dJ")]
+ UNSPEC_MSA_SLD))]
+ "ISA_HAS_MSA"
+ "sld.<msafmt>\t%w0,%w2[%z3]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_sldi_<msafmt_f>"
+ [(set (match_operand:MSA 0 "register_operand" "=f")
+ (unspec:MSA [(match_operand:MSA 1 "register_operand" "0")
+ (match_operand:MSA 2 "register_operand" "f")
+ (match_operand 3 "const_<indeximm>_operand" "")]
+ UNSPEC_MSA_SLDI))]
+ "ISA_HAS_MSA"
+ "sldi.<msafmt>\t%w0,%w2[%3]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic_l")])
+
+(define_insn "msa_splat_<msafmt_f>"
+ [(set (match_operand:MSA 0 "register_operand" "=f")
+ (unspec:MSA [(match_operand:MSA 1 "register_operand" "f")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")]
+ UNSPEC_MSA_SPLAT))]
+ "ISA_HAS_MSA"
+ "splat.<msafmt>\t%w0,%w1[%z2]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_splati_<msafmt_f>"
+ [(set (match_operand:MSA 0 "register_operand" "=f")
+ (unspec:MSA [(match_operand:MSA 1 "register_operand" "f")
+ (match_operand 2 "const_<indeximm>_operand" "")]
+ UNSPEC_MSA_SPLATI))]
+ "ISA_HAS_MSA"
+ "splati.<msafmt>\t%w0,%w1[%2]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+;; operand 1 is a scalar
+(define_insn "msa_splati_<msafmt_f>_s"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:<MODE> [(match_operand:<UNITMODE> 1 "register_operand" "f")
+ (match_operand 2 "const_<indeximm>_operand" "")]
+ UNSPEC_MSA_SPLATI))]
+ "ISA_HAS_MSA"
+ "splati.<msafmt>\t%w0,%w1[%2]"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_logic")])
+
+(define_insn "msa_cfcmsa"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec_volatile:SI [(match_operand 1 "const_uimm5_operand" "")]
+ UNSPEC_MSA_CFCMSA))]
+ "ISA_HAS_MSA"
+ "cfcmsa\t%0,$%1"
+ [(set_attr "type" "mfc")
+ (set_attr "mode" "SI")
+ (set_attr "msa_execunit" "msa_eu_store4")])
+
+(define_insn "msa_ctcmsa"
+ [(unspec_volatile [(match_operand 0 "const_uimm5_operand" "")
+ (match_operand:SI 1 "register_operand" "d")]
+ UNSPEC_MSA_CTCMSA)]
+ "ISA_HAS_MSA"
+ "ctcmsa\t$%0,%1"
+ [(set_attr "type" "mtc")
+ (set_attr "mode" "SI")
+ (set_attr "msa_execunit" "msa_eu_store4")])
+
+(define_insn "msa_fexdo_h"
+ [(set (match_operand:V8HI 0 "register_operand" "=f")
+ (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "f")
+ (match_operand:V4SF 2 "register_operand" "f")]
+ UNSPEC_MSA_FEXDO))]
+ "ISA_HAS_MSA"
+ "fexdo.h\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_fexdo_w"
+ [(set (match_operand:V4SF 0 "register_operand" "=f")
+ (unspec:V4SF [(match_operand:V2DF 1 "register_operand" "f")
+ (match_operand:V2DF 2 "register_operand" "f")]
+ UNSPEC_MSA_FEXDO))]
+ "ISA_HAS_MSA"
+ "fexdo.w\t%w0,%w1,%w2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_fexupl_w"
+ [(set (match_operand:V4SF 0 "register_operand" "=f")
+ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "f")]
+ UNSPEC_MSA_FEXUPL))]
+ "ISA_HAS_MSA"
+ "fexupl.w\t%w0,%w1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_fexupl_d"
+ [(set (match_operand:V2DF 0 "register_operand" "=f")
+ (unspec:V2DF [(match_operand:V4SF 1 "register_operand" "f")]
+ UNSPEC_MSA_FEXUPL))]
+ "ISA_HAS_MSA"
+ "fexupl.d\t%w0,%w1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_fexupr_w"
+ [(set (match_operand:V4SF 0 "register_operand" "=f")
+ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "f")]
+ UNSPEC_MSA_FEXUPR))]
+ "ISA_HAS_MSA"
+ "fexupr.w\t%w0,%w1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_fexupr_d"
+ [(set (match_operand:V2DF 0 "register_operand" "=f")
+ (unspec:V2DF [(match_operand:V4SF 1 "register_operand" "f")]
+ UNSPEC_MSA_FEXUPR))]
+ "ISA_HAS_MSA"
+ "fexupr.d\t%w0,%w1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_float4")])
+
+(define_insn "msa_branch_nz_v_<msafmt_f>"
+ [(set (pc) (if_then_else
+ (ne (unspec:SI [(match_operand:MSA 1 "register_operand" "f")]
+ UNSPEC_MSA_BNZ_V)
+ (match_operand:SI 2 "const_0_operand"))
+ (label_ref (match_operand 0))
+ (pc)))]
+ "ISA_HAS_MSA"
+ {
+ return mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("bnz.v", "%w1,%0"),
+ MIPS_BRANCH ("bz.v", "%w1,%0"));
+ }
+ [(set_attr "type" "branch")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_store4")])
+
+(define_expand "msa_bnz_v_<msafmt_f>"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:MSA 1 "register_operand" "f")]
+ UNSPEC_MSA_TSTNZ_V))]
+ "ISA_HAS_MSA"
+ {
+ mips_expand_msa_branch (operands, gen_msa_branch_nz_v_<MSA:msafmt_f>);
+ DONE;
+ })
+
+(define_insn "msa_branchz_v_<msafmt_f>"
+ [(set (pc) (if_then_else
+ (eq (unspec:SI [(match_operand:MSA 1 "register_operand" "f")]
+ UNSPEC_MSA_BZ_V)
+ (match_operand:SI 2 "const_0_operand"))
+ (label_ref (match_operand 0))
+ (pc)))]
+ "ISA_HAS_MSA"
+ {
+ return mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("bz.v", "%w1,%0"),
+ MIPS_BRANCH ("bnz.v", "%w1,%0"));
+ }
+ [(set_attr "type" "branch")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_store4")])
+
+(define_expand "msa_bz_v_<msafmt_f>"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:MSA 1 "register_operand" "f")]
+ UNSPEC_MSA_TSTZ_V))]
+ "ISA_HAS_MSA"
+ {
+ mips_expand_msa_branch (operands, gen_msa_branchz_v_<MSA:msafmt_f>);
+ DONE;
+ })
+
+(define_insn "msa_branchnz_<msafmt_f>"
+ [(set (pc) (if_then_else
+ (ne (unspec:SI [(match_operand:MSA 1 "register_operand" "f")]
+ UNSPEC_MSA_BNZ)
+ (match_operand:SI 2 "const_0_operand"))
+ (label_ref (match_operand 0))
+ (pc)))]
+ "ISA_HAS_MSA"
+ {
+ return mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("bnz.<msafmt>", "%w1,%0"),
+ MIPS_BRANCH ("bz.<msafmt>", "%w1,%0"));
+
+ }
+
+ [(set_attr "type" "branch")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_store4")])
+
+(define_expand "msa_bnz_<msafmt>"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:IMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_TSTNZ))]
+ "ISA_HAS_MSA"
+ {
+ mips_expand_msa_branch (operands, gen_msa_branchnz_<IMSA:msafmt>);
+ DONE;
+ })
+
+(define_insn "msa_branchz_<msafmt>"
+ [(set (pc) (if_then_else
+ (eq (unspec:SI [(match_operand:IMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_BZ)
+ (match_operand:IMSA 2 "const_0_operand"))
+ (label_ref (match_operand 0))
+ (pc)))]
+ "ISA_HAS_MSA"
+ {
+ return mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("bz.<msafmt>", "%w1,%0"),
+ MIPS_BRANCH ("bnz.<msafmt>","%w1,%0"));
+ }
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")
+ (set_attr "msa_execunit" "msa_eu_store4")])
+
+(define_expand "msa_bz_<msafmt>"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:IMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_TSTZ))]
+ "ISA_HAS_MSA"
+ {
+ mips_expand_msa_branch (operands, gen_msa_branchz_<IMSA:msafmt>);
+ DONE;
+ })
+
+;; Note that this instruction treats scalar as vector registers freely.
+(define_insn "msa_cast_to_vector_<msafmt_f>"
+ [(set (match_operand:FMSA 0 "register_operand" "=f")
+ (unspec:FMSA [(match_operand:<UNITMODE> 1 "register_operand" "f")]
+ UNSPEC_MSA_CAST_TO_VECTOR))]
+ "ISA_HAS_MSA"
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ return "nop\t# Cast %1 to %w0";
+ else
+ return "mov.<unitfmt>\t%0,%1\t# Cast %1 to %w0";
+}
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")])
+
+;; Note that this instruction treats vector as scalar registers freely.
+(define_insn "msa_cast_to_scalar_<msafmt_f>"
+ [(set (match_operand:<UNITMODE> 0 "register_operand" "=f")
+ (unspec:<UNITMODE> [(match_operand:FMSA 1 "register_operand" "f")]
+ UNSPEC_MSA_CAST_TO_SCALAR))]
+ "ISA_HAS_MSA"
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ return "nop\t# Cast %w1 to %0";
+ else
+ return "mov.<unitfmt>\t%0,%1\t# Cast %w1 to %0";
+}
+ [(set_attr "type" "arith")
+ (set_attr "mode" "TI")])
diff --git a/gcc-4.9/gcc/config/mips/mips-opts.h b/gcc-4.9/gcc/config/mips/mips-opts.h
index be288d64c..51375ff3a 100644
--- a/gcc-4.9/gcc/config/mips/mips-opts.h
+++ b/gcc-4.9/gcc/config/mips/mips-opts.h
@@ -47,4 +47,11 @@ enum mips_r10k_cache_barrier_setting {
#define MIPS_ARCH_OPTION_FROM_ABI -1
#define MIPS_ARCH_OPTION_NATIVE -2
+/* Enumerates the setting of the -mclib= option. */
+enum mips_lib_setting {
+ MIPS_LIB_NEWLIB,
+ MIPS_LIB_SMALL,
+ MIPS_LIB_TINY
+};
+
#endif
diff --git a/gcc-4.9/gcc/config/mips/mips-protos.h b/gcc-4.9/gcc/config/mips/mips-protos.h
index 3d59b7b51..73d627719 100644
--- a/gcc-4.9/gcc/config/mips/mips-protos.h
+++ b/gcc-4.9/gcc/config/mips/mips-protos.h
@@ -197,8 +197,10 @@ extern bool mips_stack_address_p (rtx, enum machine_mode);
extern int mips_address_insns (rtx, enum machine_mode, bool);
extern int mips_const_insns (rtx);
extern int mips_split_const_insns (rtx);
+extern int mips_split_128bit_const_insns (rtx);
extern int mips_load_store_insns (rtx, rtx);
extern int mips_idiv_insns (void);
+extern int mips_msa_idiv_insns (void);
extern rtx mips_emit_move (rtx, rtx);
#ifdef RTX_CODE
extern void mips_emit_binary (enum rtx_code, rtx, rtx, rtx);
@@ -212,27 +214,33 @@ extern void mips_move_integer (rtx, rtx, unsigned HOST_WIDE_INT);
extern bool mips_legitimize_move (enum machine_mode, rtx, rtx);
extern rtx mips_subword (rtx, bool);
+extern rtx mips_subword_at_byte (rtx, unsigned int);
extern bool mips_split_move_p (rtx, rtx, enum mips_split_type);
-extern void mips_split_move (rtx, rtx, enum mips_split_type);
+extern bool mips_split_128bit_move_p (rtx, rtx);
extern bool mips_split_move_insn_p (rtx, rtx, rtx);
+extern void mips_split_move (rtx, rtx, enum mips_split_type);
extern void mips_split_move_insn (rtx, rtx, rtx);
+extern void mips_split_128bit_move (rtx, rtx);
+extern void mips_split_msa_copy_d (rtx, rtx, rtx, rtx (*)(rtx, rtx, rtx));
+extern void mips_split_msa_insert_d (rtx, rtx, rtx, rtx);
+extern void mips_split_msa_fill_d (rtx, rtx);
extern const char *mips_output_move (rtx, rtx);
extern bool mips_cfun_has_cprestore_slot_p (void);
extern bool mips_cprestore_address_p (rtx, bool);
extern void mips_save_gp_to_cprestore_slot (rtx, rtx, rtx, rtx);
extern void mips_restore_gp_from_cprestore_slot (rtx);
-#ifdef RTX_CODE
extern void mips_expand_scc (rtx *);
extern void mips_expand_conditional_branch (rtx *);
+#ifdef RTX_CODE
extern void mips_expand_vcondv2sf (rtx, rtx, rtx, enum rtx_code, rtx, rtx);
+#endif
extern void mips_expand_conditional_move (rtx *);
extern void mips_expand_conditional_trap (rtx);
-#endif
+extern void mips_expand_msa_branch (rtx *operands, rtx (*gen_fn)(rtx, rtx, rtx));
extern bool mips_use_pic_fn_addr_reg_p (const_rtx);
extern rtx mips_expand_call (enum mips_call_type, rtx, rtx, rtx, rtx, bool);
extern void mips_split_call (rtx, rtx);
extern bool mips_get_pic_call_symbol (rtx *, int);
-extern void mips_expand_fcc_reload (rtx, rtx, rtx);
extern void mips_set_return_address (rtx, rtx);
extern bool mips_move_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int);
extern bool mips_store_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int);
@@ -279,6 +287,13 @@ extern void mips_expand_before_return (void);
extern void mips_expand_epilogue (bool);
extern bool mips_can_use_return_insn (void);
+extern bool mips_secondary_memory_needed (enum reg_class, enum reg_class,
+ enum machine_mode);
+extern bool mips_const_vector_same_val_p (rtx, enum machine_mode);
+extern bool mips_const_vector_same_byte_p (rtx, enum machine_mode);
+extern bool mips_const_vector_same_int_p (rtx, enum machine_mode, HOST_WIDE_INT, HOST_WIDE_INT);
+extern bool mips_const_vector_bitimm_set_p (rtx, enum machine_mode);
+extern bool mips_const_vector_bitimm_clr_p (rtx, enum machine_mode);
extern bool mips_cannot_change_mode_class (enum machine_mode,
enum machine_mode, enum reg_class);
extern bool mips_dangerous_for_la25_p (rtx);
@@ -287,7 +302,9 @@ extern enum reg_class mips_secondary_reload_class (enum reg_class,
enum machine_mode,
rtx, bool);
extern int mips_class_max_nregs (enum reg_class, enum machine_mode);
-
+extern enum machine_mode mips_hard_regno_caller_save_mode (unsigned int,
+ unsigned int,
+ enum machine_mode);
extern int mips_adjust_insn_length (rtx, int);
extern void mips_output_load_label (rtx);
extern const char *mips_output_conditional_branch (rtx, rtx *, const char *,
@@ -297,6 +314,7 @@ extern const char *mips_output_sync (void);
extern const char *mips_output_sync_loop (rtx, rtx *);
extern unsigned int mips_sync_loop_insns (rtx, rtx *);
extern const char *mips_output_division (const char *, rtx *);
+extern const char *mips_msa_output_division (const char *, rtx *);
extern const char *mips_output_probe_stack_range (rtx, rtx);
extern unsigned int mips_hard_regno_nregs (int, enum machine_mode);
extern bool mips_linked_madd_p (rtx, rtx);
@@ -315,6 +333,7 @@ extern bool mips16e_save_restore_pattern_p (rtx, HOST_WIDE_INT,
extern bool mask_low_and_shift_p (enum machine_mode, rtx, rtx, int);
extern int mask_low_and_shift_len (enum machine_mode, rtx, rtx);
extern bool and_operands_ok (enum machine_mode, rtx, rtx);
+extern bool mips_fmadd_bypass (rtx, rtx);
union mips_gen_fn_ptrs
{
@@ -333,6 +352,7 @@ extern void mips_expand_vec_reduc (rtx, rtx, rtx (*)(rtx, rtx, rtx));
extern void mips_expand_vec_minmax (rtx, rtx, rtx,
rtx (*) (rtx, rtx, rtx), bool);
+extern int mips_ldst_scaled_shift (enum machine_mode);
extern bool mips_signed_immediate_p (unsigned HOST_WIDE_INT, int, int);
extern bool mips_unsigned_immediate_p (unsigned HOST_WIDE_INT, int, int);
extern const char *umips_output_save_restore (bool, rtx);
@@ -341,9 +361,10 @@ extern bool umips_load_store_pair_p (bool, rtx *);
extern void umips_output_load_store_pair (bool, rtx *);
extern bool umips_movep_target_p (rtx, rtx);
extern bool umips_12bit_offset_address_p (rtx, enum machine_mode);
+extern bool mips_9bit_offset_address_p (rtx, enum machine_mode);
extern bool lwsp_swsp_address_p (rtx, enum machine_mode);
extern bool m16_based_address_p (rtx, enum machine_mode,
- int (*)(rtx_def*, machine_mode));
+ int (*)(rtx_def*, machine_mode));
extern rtx mips_expand_thread_pointer (rtx);
extern void mips16_expand_get_fcsr (rtx);
extern void mips16_expand_set_fcsr (rtx);
@@ -359,4 +380,11 @@ typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx);
extern mulsidi3_gen_fn mips_mulsidi3_gen_fn (enum rtx_code);
#endif
+extern void mips_expand_vec_cond_expr (enum machine_mode,
+ enum machine_mode,
+ rtx *,
+ rtx (*)(rtx, rtx, rtx),
+ rtx (*)(rtx, rtx, rtx),
+ rtx (*)(rtx, rtx, rtx));
+
#endif /* ! GCC_MIPS_PROTOS_H */
diff --git a/gcc-4.9/gcc/config/mips/mips-tables.opt b/gcc-4.9/gcc/config/mips/mips-tables.opt
index 760b764e3..f963c48ff 100644
--- a/gcc-4.9/gcc/config/mips/mips-tables.opt
+++ b/gcc-4.9/gcc/config/mips/mips-tables.opt
@@ -70,575 +70,614 @@ EnumValue
Enum(mips_mips_opt_value) String(32r2) Value(5)
EnumValue
-Enum(mips_arch_opt_value) String(mips64) Value(6) Canonical
+Enum(mips_arch_opt_value) String(mips32r3) Value(6) Canonical
EnumValue
-Enum(mips_mips_opt_value) String(64) Value(6)
+Enum(mips_mips_opt_value) String(32r3) Value(6)
EnumValue
-Enum(mips_arch_opt_value) String(mips64r2) Value(7) Canonical
+Enum(mips_arch_opt_value) String(mips32r5) Value(7) Canonical
EnumValue
-Enum(mips_mips_opt_value) String(64r2) Value(7)
+Enum(mips_mips_opt_value) String(32r5) Value(7)
EnumValue
-Enum(mips_arch_opt_value) String(r3000) Value(8) Canonical
+Enum(mips_arch_opt_value) String(mips32r6) Value(8) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r3k) Value(8)
+Enum(mips_mips_opt_value) String(32r6) Value(8)
EnumValue
-Enum(mips_arch_opt_value) String(3000) Value(8)
+Enum(mips_arch_opt_value) String(mips64) Value(9) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(3k) Value(8)
+Enum(mips_mips_opt_value) String(64) Value(9)
EnumValue
-Enum(mips_arch_opt_value) String(r2000) Value(9) Canonical
+Enum(mips_arch_opt_value) String(mips64r2) Value(10) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r2k) Value(9)
+Enum(mips_mips_opt_value) String(64r2) Value(10)
EnumValue
-Enum(mips_arch_opt_value) String(2000) Value(9)
+Enum(mips_arch_opt_value) String(mips64r3) Value(11) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(2k) Value(9)
+Enum(mips_mips_opt_value) String(64r3) Value(11)
EnumValue
-Enum(mips_arch_opt_value) String(r3900) Value(10) Canonical
+Enum(mips_arch_opt_value) String(mips64r5) Value(12) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(3900) Value(10)
+Enum(mips_mips_opt_value) String(64r5) Value(12)
EnumValue
-Enum(mips_arch_opt_value) String(r6000) Value(11) Canonical
+Enum(mips_arch_opt_value) String(mips64r6) Value(13) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r6k) Value(11)
+Enum(mips_mips_opt_value) String(64r6) Value(13)
EnumValue
-Enum(mips_arch_opt_value) String(6000) Value(11)
+Enum(mips_arch_opt_value) String(r3000) Value(14) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(6k) Value(11)
+Enum(mips_arch_opt_value) String(r3k) Value(14)
EnumValue
-Enum(mips_arch_opt_value) String(r4000) Value(12) Canonical
+Enum(mips_arch_opt_value) String(3000) Value(14)
EnumValue
-Enum(mips_arch_opt_value) String(r4k) Value(12)
+Enum(mips_arch_opt_value) String(3k) Value(14)
EnumValue
-Enum(mips_arch_opt_value) String(4000) Value(12)
+Enum(mips_arch_opt_value) String(r2000) Value(15) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(4k) Value(12)
+Enum(mips_arch_opt_value) String(r2k) Value(15)
EnumValue
-Enum(mips_arch_opt_value) String(vr4100) Value(13) Canonical
+Enum(mips_arch_opt_value) String(2000) Value(15)
EnumValue
-Enum(mips_arch_opt_value) String(4100) Value(13)
+Enum(mips_arch_opt_value) String(2k) Value(15)
EnumValue
-Enum(mips_arch_opt_value) String(r4100) Value(13)
+Enum(mips_arch_opt_value) String(r3900) Value(16) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(vr4111) Value(14) Canonical
+Enum(mips_arch_opt_value) String(3900) Value(16)
EnumValue
-Enum(mips_arch_opt_value) String(4111) Value(14)
+Enum(mips_arch_opt_value) String(r6000) Value(17) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r4111) Value(14)
+Enum(mips_arch_opt_value) String(r6k) Value(17)
EnumValue
-Enum(mips_arch_opt_value) String(vr4120) Value(15) Canonical
+Enum(mips_arch_opt_value) String(6000) Value(17)
EnumValue
-Enum(mips_arch_opt_value) String(4120) Value(15)
+Enum(mips_arch_opt_value) String(6k) Value(17)
EnumValue
-Enum(mips_arch_opt_value) String(r4120) Value(15)
+Enum(mips_arch_opt_value) String(r4000) Value(18) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(vr4130) Value(16) Canonical
+Enum(mips_arch_opt_value) String(r4k) Value(18)
EnumValue
-Enum(mips_arch_opt_value) String(4130) Value(16)
+Enum(mips_arch_opt_value) String(4000) Value(18)
EnumValue
-Enum(mips_arch_opt_value) String(r4130) Value(16)
+Enum(mips_arch_opt_value) String(4k) Value(18)
EnumValue
-Enum(mips_arch_opt_value) String(vr4300) Value(17) Canonical
+Enum(mips_arch_opt_value) String(vr4100) Value(19) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(4300) Value(17)
+Enum(mips_arch_opt_value) String(4100) Value(19)
EnumValue
-Enum(mips_arch_opt_value) String(r4300) Value(17)
+Enum(mips_arch_opt_value) String(r4100) Value(19)
EnumValue
-Enum(mips_arch_opt_value) String(r4400) Value(18) Canonical
+Enum(mips_arch_opt_value) String(vr4111) Value(20) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(4400) Value(18)
+Enum(mips_arch_opt_value) String(4111) Value(20)
EnumValue
-Enum(mips_arch_opt_value) String(r4600) Value(19) Canonical
+Enum(mips_arch_opt_value) String(r4111) Value(20)
EnumValue
-Enum(mips_arch_opt_value) String(4600) Value(19)
+Enum(mips_arch_opt_value) String(vr4120) Value(21) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(orion) Value(20) Canonical
+Enum(mips_arch_opt_value) String(4120) Value(21)
EnumValue
-Enum(mips_arch_opt_value) String(r4650) Value(21) Canonical
+Enum(mips_arch_opt_value) String(r4120) Value(21)
EnumValue
-Enum(mips_arch_opt_value) String(4650) Value(21)
+Enum(mips_arch_opt_value) String(vr4130) Value(22) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r4700) Value(22) Canonical
+Enum(mips_arch_opt_value) String(4130) Value(22)
EnumValue
-Enum(mips_arch_opt_value) String(4700) Value(22)
+Enum(mips_arch_opt_value) String(r4130) Value(22)
EnumValue
-Enum(mips_arch_opt_value) String(r5900) Value(23) Canonical
+Enum(mips_arch_opt_value) String(vr4300) Value(23) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(5900) Value(23)
+Enum(mips_arch_opt_value) String(4300) Value(23)
EnumValue
-Enum(mips_arch_opt_value) String(loongson2e) Value(24) Canonical
+Enum(mips_arch_opt_value) String(r4300) Value(23)
EnumValue
-Enum(mips_arch_opt_value) String(loongson2f) Value(25) Canonical
+Enum(mips_arch_opt_value) String(r4400) Value(24) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r8000) Value(26) Canonical
+Enum(mips_arch_opt_value) String(4400) Value(24)
EnumValue
-Enum(mips_arch_opt_value) String(r8k) Value(26)
+Enum(mips_arch_opt_value) String(r4600) Value(25) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(8000) Value(26)
+Enum(mips_arch_opt_value) String(4600) Value(25)
EnumValue
-Enum(mips_arch_opt_value) String(8k) Value(26)
+Enum(mips_arch_opt_value) String(orion) Value(26) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r10000) Value(27) Canonical
+Enum(mips_arch_opt_value) String(r4650) Value(27) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r10k) Value(27)
+Enum(mips_arch_opt_value) String(4650) Value(27)
EnumValue
-Enum(mips_arch_opt_value) String(10000) Value(27)
+Enum(mips_arch_opt_value) String(r4700) Value(28) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(10k) Value(27)
+Enum(mips_arch_opt_value) String(4700) Value(28)
EnumValue
-Enum(mips_arch_opt_value) String(r12000) Value(28) Canonical
+Enum(mips_arch_opt_value) String(r5900) Value(29) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r12k) Value(28)
+Enum(mips_arch_opt_value) String(5900) Value(29)
EnumValue
-Enum(mips_arch_opt_value) String(12000) Value(28)
+Enum(mips_arch_opt_value) String(loongson2e) Value(30) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(12k) Value(28)
+Enum(mips_arch_opt_value) String(loongson2f) Value(31) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r14000) Value(29) Canonical
+Enum(mips_arch_opt_value) String(r8000) Value(32) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r14k) Value(29)
+Enum(mips_arch_opt_value) String(r8k) Value(32)
EnumValue
-Enum(mips_arch_opt_value) String(14000) Value(29)
+Enum(mips_arch_opt_value) String(8000) Value(32)
EnumValue
-Enum(mips_arch_opt_value) String(14k) Value(29)
+Enum(mips_arch_opt_value) String(8k) Value(32)
EnumValue
-Enum(mips_arch_opt_value) String(r16000) Value(30) Canonical
+Enum(mips_arch_opt_value) String(r10000) Value(33) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r16k) Value(30)
+Enum(mips_arch_opt_value) String(r10k) Value(33)
EnumValue
-Enum(mips_arch_opt_value) String(16000) Value(30)
+Enum(mips_arch_opt_value) String(10000) Value(33)
EnumValue
-Enum(mips_arch_opt_value) String(16k) Value(30)
+Enum(mips_arch_opt_value) String(10k) Value(33)
EnumValue
-Enum(mips_arch_opt_value) String(vr5000) Value(31) Canonical
+Enum(mips_arch_opt_value) String(r12000) Value(34) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(vr5k) Value(31)
+Enum(mips_arch_opt_value) String(r12k) Value(34)
EnumValue
-Enum(mips_arch_opt_value) String(5000) Value(31)
+Enum(mips_arch_opt_value) String(12000) Value(34)
EnumValue
-Enum(mips_arch_opt_value) String(5k) Value(31)
+Enum(mips_arch_opt_value) String(12k) Value(34)
EnumValue
-Enum(mips_arch_opt_value) String(r5000) Value(31)
+Enum(mips_arch_opt_value) String(r14000) Value(35) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r5k) Value(31)
+Enum(mips_arch_opt_value) String(r14k) Value(35)
EnumValue
-Enum(mips_arch_opt_value) String(vr5400) Value(32) Canonical
+Enum(mips_arch_opt_value) String(14000) Value(35)
EnumValue
-Enum(mips_arch_opt_value) String(5400) Value(32)
+Enum(mips_arch_opt_value) String(14k) Value(35)
EnumValue
-Enum(mips_arch_opt_value) String(r5400) Value(32)
+Enum(mips_arch_opt_value) String(r16000) Value(36) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(vr5500) Value(33) Canonical
+Enum(mips_arch_opt_value) String(r16k) Value(36)
EnumValue
-Enum(mips_arch_opt_value) String(5500) Value(33)
+Enum(mips_arch_opt_value) String(16000) Value(36)
EnumValue
-Enum(mips_arch_opt_value) String(r5500) Value(33)
+Enum(mips_arch_opt_value) String(16k) Value(36)
EnumValue
-Enum(mips_arch_opt_value) String(rm7000) Value(34) Canonical
+Enum(mips_arch_opt_value) String(vr5000) Value(37) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(rm7k) Value(34)
+Enum(mips_arch_opt_value) String(vr5k) Value(37)
EnumValue
-Enum(mips_arch_opt_value) String(7000) Value(34)
+Enum(mips_arch_opt_value) String(5000) Value(37)
EnumValue
-Enum(mips_arch_opt_value) String(7k) Value(34)
+Enum(mips_arch_opt_value) String(5k) Value(37)
EnumValue
-Enum(mips_arch_opt_value) String(r7000) Value(34)
+Enum(mips_arch_opt_value) String(r5000) Value(37)
EnumValue
-Enum(mips_arch_opt_value) String(r7k) Value(34)
+Enum(mips_arch_opt_value) String(r5k) Value(37)
EnumValue
-Enum(mips_arch_opt_value) String(rm9000) Value(35) Canonical
+Enum(mips_arch_opt_value) String(vr5400) Value(38) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(rm9k) Value(35)
+Enum(mips_arch_opt_value) String(5400) Value(38)
EnumValue
-Enum(mips_arch_opt_value) String(9000) Value(35)
+Enum(mips_arch_opt_value) String(r5400) Value(38)
EnumValue
-Enum(mips_arch_opt_value) String(9k) Value(35)
+Enum(mips_arch_opt_value) String(vr5500) Value(39) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r9000) Value(35)
+Enum(mips_arch_opt_value) String(5500) Value(39)
EnumValue
-Enum(mips_arch_opt_value) String(r9k) Value(35)
+Enum(mips_arch_opt_value) String(r5500) Value(39)
EnumValue
-Enum(mips_arch_opt_value) String(4kc) Value(36) Canonical
+Enum(mips_arch_opt_value) String(rm7000) Value(40) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r4kc) Value(36)
+Enum(mips_arch_opt_value) String(rm7k) Value(40)
EnumValue
-Enum(mips_arch_opt_value) String(4km) Value(37) Canonical
+Enum(mips_arch_opt_value) String(7000) Value(40)
EnumValue
-Enum(mips_arch_opt_value) String(r4km) Value(37)
+Enum(mips_arch_opt_value) String(7k) Value(40)
EnumValue
-Enum(mips_arch_opt_value) String(4kp) Value(38) Canonical
+Enum(mips_arch_opt_value) String(r7000) Value(40)
EnumValue
-Enum(mips_arch_opt_value) String(r4kp) Value(38)
+Enum(mips_arch_opt_value) String(r7k) Value(40)
EnumValue
-Enum(mips_arch_opt_value) String(4ksc) Value(39) Canonical
+Enum(mips_arch_opt_value) String(rm9000) Value(41) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r4ksc) Value(39)
+Enum(mips_arch_opt_value) String(rm9k) Value(41)
EnumValue
-Enum(mips_arch_opt_value) String(m4k) Value(40) Canonical
+Enum(mips_arch_opt_value) String(9000) Value(41)
EnumValue
-Enum(mips_arch_opt_value) String(m14kc) Value(41) Canonical
+Enum(mips_arch_opt_value) String(9k) Value(41)
EnumValue
-Enum(mips_arch_opt_value) String(m14k) Value(42) Canonical
+Enum(mips_arch_opt_value) String(r9000) Value(41)
EnumValue
-Enum(mips_arch_opt_value) String(m14ke) Value(43) Canonical
+Enum(mips_arch_opt_value) String(r9k) Value(41)
EnumValue
-Enum(mips_arch_opt_value) String(m14kec) Value(44) Canonical
+Enum(mips_arch_opt_value) String(4kc) Value(42) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(4kec) Value(45) Canonical
+Enum(mips_arch_opt_value) String(r4kc) Value(42)
EnumValue
-Enum(mips_arch_opt_value) String(r4kec) Value(45)
+Enum(mips_arch_opt_value) String(4km) Value(43) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(4kem) Value(46) Canonical
+Enum(mips_arch_opt_value) String(r4km) Value(43)
EnumValue
-Enum(mips_arch_opt_value) String(r4kem) Value(46)
+Enum(mips_arch_opt_value) String(4kp) Value(44) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(4kep) Value(47) Canonical
+Enum(mips_arch_opt_value) String(r4kp) Value(44)
EnumValue
-Enum(mips_arch_opt_value) String(r4kep) Value(47)
+Enum(mips_arch_opt_value) String(4ksc) Value(45) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(4ksd) Value(48) Canonical
+Enum(mips_arch_opt_value) String(r4ksc) Value(45)
EnumValue
-Enum(mips_arch_opt_value) String(r4ksd) Value(48)
+Enum(mips_arch_opt_value) String(m4k) Value(46) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(24kc) Value(49) Canonical
+Enum(mips_arch_opt_value) String(m14kc) Value(47) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kc) Value(49)
+Enum(mips_arch_opt_value) String(m14k) Value(48) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(24kf2_1) Value(50) Canonical
+Enum(mips_arch_opt_value) String(m14ke) Value(49) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kf2_1) Value(50)
+Enum(mips_arch_opt_value) String(m14kec) Value(50) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(24kf) Value(51) Canonical
+Enum(mips_arch_opt_value) String(4kec) Value(51) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kf) Value(51)
+Enum(mips_arch_opt_value) String(r4kec) Value(51)
EnumValue
-Enum(mips_arch_opt_value) String(24kf1_1) Value(52) Canonical
+Enum(mips_arch_opt_value) String(4kem) Value(52) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kf1_1) Value(52)
+Enum(mips_arch_opt_value) String(r4kem) Value(52)
EnumValue
-Enum(mips_arch_opt_value) String(24kfx) Value(53) Canonical
+Enum(mips_arch_opt_value) String(4kep) Value(53) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kfx) Value(53)
+Enum(mips_arch_opt_value) String(r4kep) Value(53)
EnumValue
-Enum(mips_arch_opt_value) String(24kx) Value(54) Canonical
+Enum(mips_arch_opt_value) String(4ksd) Value(54) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kx) Value(54)
+Enum(mips_arch_opt_value) String(r4ksd) Value(54)
EnumValue
-Enum(mips_arch_opt_value) String(24kec) Value(55) Canonical
+Enum(mips_arch_opt_value) String(24kc) Value(55) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kec) Value(55)
+Enum(mips_arch_opt_value) String(r24kc) Value(55)
EnumValue
-Enum(mips_arch_opt_value) String(24kef2_1) Value(56) Canonical
+Enum(mips_arch_opt_value) String(24kf2_1) Value(56) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kef2_1) Value(56)
+Enum(mips_arch_opt_value) String(r24kf2_1) Value(56)
EnumValue
-Enum(mips_arch_opt_value) String(24kef) Value(57) Canonical
+Enum(mips_arch_opt_value) String(24kf) Value(57) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kef) Value(57)
+Enum(mips_arch_opt_value) String(r24kf) Value(57)
EnumValue
-Enum(mips_arch_opt_value) String(24kef1_1) Value(58) Canonical
+Enum(mips_arch_opt_value) String(24kf1_1) Value(58) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kef1_1) Value(58)
+Enum(mips_arch_opt_value) String(r24kf1_1) Value(58)
EnumValue
-Enum(mips_arch_opt_value) String(24kefx) Value(59) Canonical
+Enum(mips_arch_opt_value) String(24kfx) Value(59) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kefx) Value(59)
+Enum(mips_arch_opt_value) String(r24kfx) Value(59)
EnumValue
-Enum(mips_arch_opt_value) String(24kex) Value(60) Canonical
+Enum(mips_arch_opt_value) String(24kx) Value(60) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r24kex) Value(60)
+Enum(mips_arch_opt_value) String(r24kx) Value(60)
EnumValue
-Enum(mips_arch_opt_value) String(34kc) Value(61) Canonical
+Enum(mips_arch_opt_value) String(24kec) Value(61) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r34kc) Value(61)
+Enum(mips_arch_opt_value) String(r24kec) Value(61)
EnumValue
-Enum(mips_arch_opt_value) String(34kf2_1) Value(62) Canonical
+Enum(mips_arch_opt_value) String(24kef2_1) Value(62) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r34kf2_1) Value(62)
+Enum(mips_arch_opt_value) String(r24kef2_1) Value(62)
EnumValue
-Enum(mips_arch_opt_value) String(34kf) Value(63) Canonical
+Enum(mips_arch_opt_value) String(24kef) Value(63) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r34kf) Value(63)
+Enum(mips_arch_opt_value) String(r24kef) Value(63)
EnumValue
-Enum(mips_arch_opt_value) String(34kf1_1) Value(64) Canonical
+Enum(mips_arch_opt_value) String(24kef1_1) Value(64) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r34kf1_1) Value(64)
+Enum(mips_arch_opt_value) String(r24kef1_1) Value(64)
EnumValue
-Enum(mips_arch_opt_value) String(34kfx) Value(65) Canonical
+Enum(mips_arch_opt_value) String(24kefx) Value(65) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r34kfx) Value(65)
+Enum(mips_arch_opt_value) String(r24kefx) Value(65)
EnumValue
-Enum(mips_arch_opt_value) String(34kx) Value(66) Canonical
+Enum(mips_arch_opt_value) String(24kex) Value(66) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r34kx) Value(66)
+Enum(mips_arch_opt_value) String(r24kex) Value(66)
EnumValue
-Enum(mips_arch_opt_value) String(34kn) Value(67) Canonical
+Enum(mips_arch_opt_value) String(34kc) Value(67) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r34kn) Value(67)
+Enum(mips_arch_opt_value) String(r34kc) Value(67)
EnumValue
-Enum(mips_arch_opt_value) String(74kc) Value(68) Canonical
+Enum(mips_arch_opt_value) String(34kf2_1) Value(68) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r74kc) Value(68)
+Enum(mips_arch_opt_value) String(r34kf2_1) Value(68)
EnumValue
-Enum(mips_arch_opt_value) String(74kf2_1) Value(69) Canonical
+Enum(mips_arch_opt_value) String(34kf) Value(69) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r74kf2_1) Value(69)
+Enum(mips_arch_opt_value) String(r34kf) Value(69)
EnumValue
-Enum(mips_arch_opt_value) String(74kf) Value(70) Canonical
+Enum(mips_arch_opt_value) String(34kf1_1) Value(70) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r74kf) Value(70)
+Enum(mips_arch_opt_value) String(r34kf1_1) Value(70)
EnumValue
-Enum(mips_arch_opt_value) String(74kf1_1) Value(71) Canonical
+Enum(mips_arch_opt_value) String(34kfx) Value(71) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r74kf1_1) Value(71)
+Enum(mips_arch_opt_value) String(r34kfx) Value(71)
EnumValue
-Enum(mips_arch_opt_value) String(74kfx) Value(72) Canonical
+Enum(mips_arch_opt_value) String(34kx) Value(72) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r74kfx) Value(72)
+Enum(mips_arch_opt_value) String(r34kx) Value(72)
EnumValue
-Enum(mips_arch_opt_value) String(74kx) Value(73) Canonical
+Enum(mips_arch_opt_value) String(34kn) Value(73) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r74kx) Value(73)
+Enum(mips_arch_opt_value) String(r34kn) Value(73)
EnumValue
-Enum(mips_arch_opt_value) String(74kf3_2) Value(74) Canonical
+Enum(mips_arch_opt_value) String(74kc) Value(74) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r74kf3_2) Value(74)
+Enum(mips_arch_opt_value) String(r74kc) Value(74)
EnumValue
-Enum(mips_arch_opt_value) String(1004kc) Value(75) Canonical
+Enum(mips_arch_opt_value) String(74kf2_1) Value(75) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r1004kc) Value(75)
+Enum(mips_arch_opt_value) String(r74kf2_1) Value(75)
EnumValue
-Enum(mips_arch_opt_value) String(1004kf2_1) Value(76) Canonical
+Enum(mips_arch_opt_value) String(74kf) Value(76) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r1004kf2_1) Value(76)
+Enum(mips_arch_opt_value) String(r74kf) Value(76)
EnumValue
-Enum(mips_arch_opt_value) String(1004kf) Value(77) Canonical
+Enum(mips_arch_opt_value) String(74kf1_1) Value(77) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r1004kf) Value(77)
+Enum(mips_arch_opt_value) String(r74kf1_1) Value(77)
EnumValue
-Enum(mips_arch_opt_value) String(1004kf1_1) Value(78) Canonical
+Enum(mips_arch_opt_value) String(74kfx) Value(78) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r1004kf1_1) Value(78)
+Enum(mips_arch_opt_value) String(r74kfx) Value(78)
EnumValue
-Enum(mips_arch_opt_value) String(5kc) Value(79) Canonical
+Enum(mips_arch_opt_value) String(74kx) Value(79) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r5kc) Value(79)
+Enum(mips_arch_opt_value) String(r74kx) Value(79)
EnumValue
-Enum(mips_arch_opt_value) String(5kf) Value(80) Canonical
+Enum(mips_arch_opt_value) String(74kf3_2) Value(80) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r5kf) Value(80)
+Enum(mips_arch_opt_value) String(r74kf3_2) Value(80)
EnumValue
-Enum(mips_arch_opt_value) String(20kc) Value(81) Canonical
+Enum(mips_arch_opt_value) String(1004kc) Value(81) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(r20kc) Value(81)
+Enum(mips_arch_opt_value) String(r1004kc) Value(81)
EnumValue
-Enum(mips_arch_opt_value) String(sb1) Value(82) Canonical
+Enum(mips_arch_opt_value) String(1004kf2_1) Value(82) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(sb1a) Value(83) Canonical
+Enum(mips_arch_opt_value) String(r1004kf2_1) Value(82)
EnumValue
-Enum(mips_arch_opt_value) String(sr71000) Value(84) Canonical
+Enum(mips_arch_opt_value) String(1004kf) Value(83) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(sr71k) Value(84)
+Enum(mips_arch_opt_value) String(r1004kf) Value(83)
EnumValue
-Enum(mips_arch_opt_value) String(xlr) Value(85) Canonical
+Enum(mips_arch_opt_value) String(1004kf1_1) Value(84) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(loongson3a) Value(86) Canonical
+Enum(mips_arch_opt_value) String(r1004kf1_1) Value(84)
EnumValue
-Enum(mips_arch_opt_value) String(octeon) Value(87) Canonical
+Enum(mips_arch_opt_value) String(p5600) Value(85) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(octeon+) Value(88) Canonical
+Enum(mips_arch_opt_value) String(5kc) Value(86) Canonical
EnumValue
-Enum(mips_arch_opt_value) String(octeon2) Value(89) Canonical
+Enum(mips_arch_opt_value) String(r5kc) Value(86)
EnumValue
-Enum(mips_arch_opt_value) String(xlp) Value(90) Canonical
+Enum(mips_arch_opt_value) String(5kf) Value(87) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r5kf) Value(87)
+
+EnumValue
+Enum(mips_arch_opt_value) String(20kc) Value(88) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r20kc) Value(88)
+
+EnumValue
+Enum(mips_arch_opt_value) String(sb1) Value(89) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(sb1a) Value(90) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(sr71000) Value(91) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(sr71k) Value(91)
+
+EnumValue
+Enum(mips_arch_opt_value) String(xlr) Value(92) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(loongson3a) Value(93) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(octeon) Value(94) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(octeon+) Value(95) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(octeon2) Value(96) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(xlp) Value(97) Canonical
diff --git a/gcc-4.9/gcc/config/mips/mips.c b/gcc-4.9/gcc/config/mips/mips.c
index 143169bc1..cf3894a8e 100644
--- a/gcc-4.9/gcc/config/mips/mips.c
+++ b/gcc-4.9/gcc/config/mips/mips.c
@@ -72,6 +72,39 @@ along with GCC; see the file COPYING3. If not see
#include "tree-pass.h"
#include "context.h"
+/* Definitions used in ready queue reordering for first scheduling pass. */
+
+/* Register types. */
+#define GPREG 0
+#define VECREG 1
+
+/* Information about GP and vector register weights
+ indexed by instruction UID. */
+struct msched_weight_info {
+ int reg_weight_gp;
+ int reg_weight_vec;
+};
+
+static msched_weight_info *regtype_weight = NULL;
+
+/* Current vector and GP register weights of scheduled instructions. */
+static int curr_regtype_pressure[2];
+
+/* Return GP register weight for INSN. */
+#define INSN_GPREG_WEIGHT(INSN) \
+ regtype_weight[INSN_UID ((INSN))].reg_weight_gp
+
+/* Return vector register weight for INSN. */
+#define INSN_VECREG_WEIGHT(INSN) \
+ regtype_weight[INSN_UID ((INSN))].reg_weight_vec
+
+/* Return current register pressure for REG_TYPE. */
+#define CURR_REGTYPE_PRESSURE(REG_TYPE) \
+ curr_regtype_pressure[(REG_TYPE)]
+
+#define PROMOTE_HIGH_PRIORITY_PRESSURE 25
+#define PROMOTE_MAX_DEP_PRESSURE 15
+
/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
#define UNSPEC_ADDRESS_P(X) \
(GET_CODE (X) == UNSPEC \
@@ -162,9 +195,10 @@ along with GCC; see the file COPYING3. If not see
#define MIPS_LUI(DEST, VALUE) \
((0xf << 26) | ((DEST) << 16) | (VALUE))
-/* Return the opcode to jump to register DEST. */
+/* Return the opcode to jump to register DEST. When the JR opcode is not
+ available use JALR $0, DEST. */
#define MIPS_JR(DEST) \
- (((DEST) << 21) | 0x8)
+ (((DEST) << 21) | (ISA_HAS_JR ? 0x8 : 0x9))
/* Return the opcode for:
@@ -585,6 +619,10 @@ const struct mips_cpu_info *mips_tune_info;
/* The ISA level associated with mips_arch. */
int mips_isa;
+/* The ISA revision level. This is 0 for MIPS I to V and N for
+ MIPS{32,64}rN. */
+int mips_isa_rev;
+
/* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
static const struct mips_cpu_info *mips_isa_option_info;
@@ -648,14 +686,15 @@ static mips_one_only_stub *mips16_set_fcsr_stub;
/* Index R is the smallest register class that contains register R. */
const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
- LEA_REGS, LEA_REGS, M16_REGS, V1_REG,
- M16_REGS, M16_REGS, M16_REGS, M16_REGS,
- LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
- LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
- M16_REGS, M16_REGS, LEA_REGS, LEA_REGS,
- LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
- T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
- LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
+ LEA_REGS, LEA_REGS, M16_STORE_REGS, V1_REG,
+ M16_STORE_REGS, M16_STORE_REGS, M16_STORE_REGS, M16_STORE_REGS,
+ LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
+ LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
+ M16_REGS, M16_STORE_REGS, LEA_REGS, LEA_REGS,
+ LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
+ T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
+ LEA_REGS, M16_SP_REGS, LEA_REGS, LEA_REGS,
+
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
@@ -763,8 +802,8 @@ static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
COSTS_N_INSNS (1), /* int_mult_di */
COSTS_N_INSNS (1), /* int_div_si */
COSTS_N_INSNS (1), /* int_div_di */
- 2, /* branch_cost */
- 4 /* memory_latency */
+ 2, /* branch_cost */
+ 4 /* memory_latency */
};
/* Costs to use when optimizing for speed, indexed by processor. */
@@ -1173,6 +1212,45 @@ static const struct mips_rtx_cost_data
COSTS_N_INSNS (68), /* int_div_di */
1, /* branch_cost */
4 /* memory_latency */
+ },
+ { /* P5600 */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (5), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (17), /* fp_div_sf */
+ COSTS_N_INSNS (17), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (8), /* int_div_si */
+ COSTS_N_INSNS (8), /* int_div_di */
+ 2, /* branch_cost */
+ 10 /* memory_latency */
+ },
+ { /* W32 */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (17), /* fp_div_sf */
+ COSTS_N_INSNS (32), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* W64 */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (17), /* fp_div_sf */
+ COSTS_N_INSNS (32), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
}
};
@@ -1180,6 +1258,7 @@ static rtx mips_find_pic_call_symbol (rtx, rtx, bool);
static int mips_register_move_cost (enum machine_mode, reg_class_t,
reg_class_t);
static unsigned int mips_function_arg_boundary (enum machine_mode, const_tree);
+static enum machine_mode mips_get_reg_raw_mode (int regno);
/* This hash table keeps track of implicit "mips16" and "nomips16" attributes
for -mflip_mips16. It maps decl names onto a boolean mode setting. */
@@ -1777,6 +1856,114 @@ mips_symbol_binds_local_p (const_rtx x)
: SYMBOL_REF_LOCAL_P (x));
}
+bool
+mips_const_vector_bitimm_set_p (rtx op, enum machine_mode mode)
+{
+ if (GET_CODE (op) == CONST_VECTOR && op != const0_rtx)
+ {
+ rtx elt0 = CONST_VECTOR_ELT (op, 0);
+ HOST_WIDE_INT val = INTVAL (elt0);
+ int vlog2 = exact_log2 (val);
+
+ if (vlog2 != -1)
+ {
+ gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
+ if (!(0 <= vlog2 && vlog2 <= GET_MODE_UNIT_SIZE (mode) - 1))
+ return false;
+
+ return mips_const_vector_same_int_p (op, mode, 0, val);
+ }
+ }
+
+ return false;
+}
+
+bool
+mips_const_vector_bitimm_clr_p (rtx op, enum machine_mode mode)
+{
+ if (GET_CODE (op) == CONST_VECTOR && op != constm1_rtx)
+ {
+ rtx elt0 = CONST_VECTOR_ELT (op, 0);
+ HOST_WIDE_INT val = INTVAL (elt0);
+ int vlog2 = exact_log2 (~val);
+
+ if (vlog2 != -1)
+ {
+ gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
+ if (!(0 <= vlog2 && vlog2 <= GET_MODE_UNIT_SIZE (mode) - 1))
+ return false;
+
+ return mips_const_vector_same_val_p (op, mode);
+ }
+ }
+
+ return false;
+}
+
+/* Return true if OP is a constant vector with the number of units in MODE,
+ and each unit has the same value. */
+
+bool
+mips_const_vector_same_val_p (rtx op, enum machine_mode mode)
+{
+ int i, nunits = GET_MODE_NUNITS (mode);
+ rtx first;
+
+ if (GET_CODE (op) != CONST_VECTOR || GET_MODE (op) != mode)
+ return false;
+
+ first = CONST_VECTOR_ELT (op, 0);
+ for (i = 1; i < nunits; i++)
+ if (!rtx_equal_p (first, CONST_VECTOR_ELT (op, i)))
+ return false;
+
+ return true;
+}
+
+/* Return true if OP is a constant vector with the number of units in MODE,
+ and each unit has the same value. */
+
+bool
+mips_const_vector_same_byte_p (rtx op, enum machine_mode mode)
+{
+ int i, nunits = GET_MODE_NUNITS (mode);
+ rtx first;
+
+ gcc_assert (mode == V16QImode);
+
+ if (GET_CODE (op) != CONST_VECTOR || GET_MODE (op) != mode)
+ return false;
+
+ first = CONST_VECTOR_ELT (op, 0);
+ for (i = 1; i < nunits; i++)
+ if (!rtx_equal_p (first, CONST_VECTOR_ELT (op, i)))
+ return false;
+
+ /* it's a 8-bit mode don't care if signed or unsigned */
+ return true;
+}
+
+/* Return true if OP is a constant vector with the number of units in MODE,
+ and each unit has the same integer value in the range [LOW, HIGH]. */
+
+bool
+mips_const_vector_same_int_p (rtx op, enum machine_mode mode, HOST_WIDE_INT low,
+ HOST_WIDE_INT high)
+{
+ HOST_WIDE_INT value;
+ rtx elem0;
+
+ if (!mips_const_vector_same_val_p (op, mode))
+ return false;
+
+ elem0 = CONST_VECTOR_ELT (op, 0);
+ if (!CONST_INT_P (elem0))
+ return false;
+
+ value = INTVAL (elem0);
+ return (value >= low && value <= high);
+}
+
/* Return true if rtx constants of mode MODE should be put into a small
data section. */
@@ -2148,6 +2335,11 @@ mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
static int
mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
{
+ /* MSA LD.* and ST.* cannot support loading symbols via an immediate
+ operand. */
+ if (MSA_SUPPORTED_MODE_P (mode))
+ return 0;
+
return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
}
@@ -2240,22 +2432,9 @@ mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
return true;
/* In MIPS16 mode, the stack pointer can only address word and doubleword
- values, nothing smaller. There are two problems here:
-
- (a) Instantiating virtual registers can introduce new uses of the
- stack pointer. If these virtual registers are valid addresses,
- the stack pointer should be too.
-
- (b) Most uses of the stack pointer are not made explicit until
- FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
- We don't know until that stage whether we'll be eliminating to the
- stack pointer (which needs the restriction) or the hard frame
- pointer (which doesn't).
-
- All in all, it seems more consistent to only enforce this restriction
- during and after reload. */
+ values, nothing smaller. */
if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
- return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
+ return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
}
@@ -2289,6 +2468,12 @@ mips_valid_offset_p (rtx x, enum machine_mode mode)
&& !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
return false;
+ /* MSA LD.* and ST.* supports 10-bit signed offsets. */
+ if (MSA_SUPPORTED_VECTOR_MODE_P (mode)
+ && !mips_signed_immediate_p (INTVAL (x), 10,
+ mips_ldst_scaled_shift (mode)))
+ return false;
+
return true;
}
@@ -2315,6 +2500,10 @@ mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
&& GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
return false;
+ /* MSA LD.* and ST.* cannot support loading symbols via %lo($base). */
+ if (MSA_SUPPORTED_MODE_P (mode))
+ return false;
+
return true;
}
@@ -2444,6 +2633,8 @@ mips_lx_address_p (rtx addr, enum machine_mode mode)
return true;
if (ISA_HAS_LDX && mode == DImode)
return true;
+ if (ISA_HAS_MSA && MSA_SUPPORTED_MODE_P (mode))
+ return true;
return false;
}
@@ -2481,6 +2672,7 @@ mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
{
struct mips_address_info addr;
int factor;
+ bool msa_p = (TARGET_MSA && !might_split_p && MSA_SUPPORTED_MODE_P (mode));
/* BLKmode is used for single unaligned loads and stores and should
not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
@@ -2495,6 +2687,16 @@ mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
switch (addr.type)
{
case ADDRESS_REG:
+ if (msa_p)
+ {
+ /* MSA LD.* and ST.* supports 10-bit signed offsets. */
+ if (MSA_SUPPORTED_VECTOR_MODE_P (mode)
+ && mips_signed_immediate_p (INTVAL (addr.offset), 10,
+ mips_ldst_scaled_shift (mode)))
+ return 1;
+ else
+ return 0;
+ }
if (TARGET_MIPS16
&& !mips16_unextended_reference_p (mode, addr.reg,
UINTVAL (addr.offset)))
@@ -2502,13 +2704,13 @@ mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
return factor;
case ADDRESS_LO_SUM:
- return TARGET_MIPS16 ? factor * 2 : factor;
+ return msa_p ? 0 : TARGET_MIPS16 ? factor * 2 : factor;
case ADDRESS_CONST_INT:
- return factor;
+ return msa_p ? 0 : factor;
case ADDRESS_SYMBOLIC:
- return factor * mips_symbol_insns (addr.symbol_type, mode);
+ return msa_p ? 0 : factor * mips_symbol_insns (addr.symbol_type, mode);
}
return 0;
}
@@ -2532,6 +2734,19 @@ mips_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0)
return mips_unsigned_immediate_p (x, bits, shift);
}
+/* Return the scale shift that applied to MSA LD/ST address offset */
+
+int
+mips_ldst_scaled_shift (enum machine_mode mode)
+{
+ int shift = exact_log2 (GET_MODE_UNIT_SIZE (mode));
+
+ if (shift < 0 || shift > 8)
+ gcc_unreachable ();
+
+ return shift;
+}
+
/* Return true if X is legitimate for accessing values of mode MODE,
if it is based on a MIPS16 register, and if the offset satisfies
OFFSET_PREDICATE. */
@@ -2576,6 +2791,20 @@ umips_12bit_offset_address_p (rtx x, enum machine_mode mode)
&& UMIPS_12BIT_OFFSET_P (INTVAL (addr.offset)));
}
+/* Return true if X is a legitimate address with a 9-bit offset.
+ MODE is the mode of the value being accessed. */
+
+bool
+mips_9bit_offset_address_p (rtx x, enum machine_mode mode)
+{
+ struct mips_address_info addr;
+
+ return (mips_classify_address (&addr, x, mode, false)
+ && addr.type == ADDRESS_REG
+ && CONST_INT_P (addr.offset)
+ && MIPS_9BIT_OFFSET_P (INTVAL (addr.offset)));
+}
+
/* Return the number of instructions needed to load constant X,
assuming that BASE_INSN_LENGTH is the length of one instruction.
Return 0 if X isn't a valid constant. */
@@ -2613,8 +2842,12 @@ mips_const_insns (rtx x)
return mips_build_integer (codes, INTVAL (x));
- case CONST_DOUBLE:
case CONST_VECTOR:
+ if (TARGET_MSA
+ && mips_const_vector_same_int_p (x, GET_MODE (x), -512, 511))
+ return 1;
+ /* fall through. */
+ case CONST_DOUBLE:
/* Allow zeros for normal mode, where we can use $0. */
return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
@@ -2674,6 +2907,25 @@ mips_split_const_insns (rtx x)
return low + high;
}
+/* X is a 128-bit constant that can be handled by splitting it into
+ two or four words and loading each word separately. Return the number of
+ instructions required to do this. */
+
+int
+mips_split_128bit_const_insns (rtx x)
+{
+ int byte;
+ unsigned int elem, total = 0;
+
+ for (byte = 0; byte < GET_MODE_SIZE (TImode); byte += UNITS_PER_WORD)
+ {
+ elem = mips_const_insns (mips_subword_at_byte (x, byte));
+ gcc_assert (elem > 0);
+ total += elem;
+ }
+ return total;
+}
+
/* Return the number of instructions needed to implement INSN,
given that it loads from or stores to MEM. Assume that
BASE_INSN_LENGTH is the length of one instruction. */
@@ -2696,6 +2948,12 @@ mips_load_store_insns (rtx mem, rtx insn)
if (set && !mips_split_move_insn_p (SET_DEST (set), SET_SRC (set), insn))
might_split_p = false;
}
+ else if (GET_MODE_BITSIZE (mode) == 128)
+ {
+ set = single_set (insn);
+ if (set && !mips_split_128bit_move_p (SET_DEST (set), SET_SRC (set)))
+ might_split_p = false;
+ }
return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
}
@@ -2721,6 +2979,17 @@ mips_idiv_insns (void)
count++;
return count;
}
+
+/* Return the number of instructions needed for an MSA integer division. */
+
+int
+mips_msa_idiv_insns (void)
+{
+ if (TARGET_CHECK_ZERO_DIV)
+ return 3;
+ else
+ return 1;
+}
/* Emit a move from SRC to DEST. Assume that the move expanders can
handle all moves if !can_create_pseudo_p (). The distinction is
@@ -3980,6 +4249,10 @@ mips_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
case NE:
case UNORDERED:
case LTGT:
+ case UNGE:
+ case UNGT:
+ case UNLE:
+ case UNLT:
/* Branch comparisons have VOIDmode, so use the first operand's
mode instead. */
mode = GET_MODE (XEXP (x, 0));
@@ -4036,6 +4309,21 @@ mips_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
return false;
}
+ /* If it's an add + mult (which is equivalent to shift left)
+ * and it's immediate operand satisfies const_immlsa_operand
+ * predicate. */
+ if (ISA_HAS_LSA
+ && mode == SImode
+ && GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ rtx op2 = XEXP (XEXP (x, 0), 1);
+ if (const_immlsa_operand (op2, mode))
+ {
+ *total = 0;
+ return true;
+ }
+ }
+
/* Double-word operations require three single-word operations and
an SLTU. The MIPS16 version then needs to move the result of
the SLTU from $24 to a MIPS16 register. */
@@ -4075,13 +4363,14 @@ mips_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
if (float_mode_p)
*total = mips_fp_mult_cost (mode);
else if (mode == DImode && !TARGET_64BIT)
+ /* R6 impact ??? */
/* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
where the mulsidi3 always includes an MFHI and an MFLO. */
*total = (speed
? mips_cost->int_mult_si * 3 + 6
: COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9));
else if (!speed)
- *total = COSTS_N_INSNS (ISA_HAS_MUL3 ? 1 : 2) + 1;
+ *total = COSTS_N_INSNS ((ISA_HAS_MUL3 || ISA_HAS_R6MUL) ? 1 : 2) + 1;
else if (mode == DImode)
*total = mips_cost->int_mult_di;
else
@@ -4133,6 +4422,10 @@ mips_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
return true;
}
*total = COSTS_N_INSNS (mips_idiv_insns ());
+ if (MSA_SUPPORTED_MODE_P (mode))
+ *total = COSTS_N_INSNS (mips_msa_idiv_insns ());
+ else
+ *total = COSTS_N_INSNS (mips_idiv_insns ());
}
else if (mode == DImode)
*total = mips_cost->int_div_di;
@@ -4157,6 +4450,52 @@ mips_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
}
*total = mips_zero_extend_cost (mode, XEXP (x, 0));
return false;
+ case TRUNCATE:
+ /* Costings for highpart multiplies. Matching patterns of the form:
+
+ (lshiftrt:DI (mult:DI (sign_extend:DI (...)
+ (sign_extend:DI (...))
+ (const_int 32)
+ */
+ if (ISA_HAS_R6MUL
+ && (GET_CODE (XEXP (x, 0)) == ASHIFTRT
+ || GET_CODE (XEXP (x, 0)) == LSHIFTRT)
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+ && ((INTVAL (XEXP (XEXP (x, 0), 1)) == 32
+ && GET_MODE (XEXP (x, 0)) == DImode)
+ || (ISA_HAS_R6DMUL
+ && INTVAL (XEXP (XEXP (x, 0), 1)) == 64
+ && GET_MODE (XEXP (x, 0)) == TImode))
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
+ && ((GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SIGN_EXTEND)
+ || (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1))
+ == ZERO_EXTEND))))
+ {
+ if (!speed)
+ *total = COSTS_N_INSNS (1) + 1;
+ else if (mode == DImode)
+ *total = mips_cost->int_mult_di;
+ else
+ *total = mips_cost->int_mult_si;
+
+ /* Sign extension is free, zero extension costs for DImode when
+ on a 64bit core / when DMUL is present. */
+ for (int i = 0; i < 2; ++i)
+ {
+ rtx op = XEXP (XEXP (XEXP (x, 0), 0), i);
+ if (ISA_HAS_R6DMUL
+ && GET_CODE (op) == ZERO_EXTEND
+ && GET_MODE (op) == DImode)
+ *total += rtx_cost (op, MULT, i, speed);
+ else
+ *total += rtx_cost (XEXP (op, 0), GET_CODE (op), 0, speed);
+ }
+
+ return true;
+ }
+ return false;
case FLOAT:
case UNSIGNED_FLOAT:
@@ -4342,6 +4681,26 @@ mips_subword (rtx op, bool high_p)
return simplify_gen_subreg (word_mode, op, mode, byte);
}
+/* Return one word of 128-bit value OP, taking into account the fixed
+ endianness of certain registers. BYTE selects from the byte address. */
+
+rtx
+mips_subword_at_byte (rtx op, unsigned int byte)
+{
+ enum machine_mode mode;
+
+ mode = GET_MODE (op);
+ if (mode == VOIDmode)
+ mode = TImode;
+
+ gcc_assert (!FP_REG_RTX_P (op));
+
+ if (MEM_P (op))
+ return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
+
+ return simplify_gen_subreg (word_mode, op, mode, byte);
+}
+
/* Return true if SRC should be moved into DEST using "MULT $0, $0".
SPLIT_TYPE is the condition under which moves should be split. */
@@ -4383,10 +4742,19 @@ mips_split_move_p (rtx dest, rtx src, enum mips_split_type split_type)
return false;
}
+ /* Check if MSA moves need splitting. */
+ if (MSA_SUPPORTED_MODE_P (GET_MODE (dest))
+ || MSA_SUPPORTED_MODE_P (GET_MODE (src)))
+ return mips_split_128bit_move_p (dest, src);
+
/* Otherwise split all multiword moves. */
return size > UNITS_PER_WORD;
}
+/* Determine if the DEST,SRC move insn applies to MSA. */
+#define MSA_SPLIT_P(DEST, SRC) \
+ (MSA_SUPPORTED_MODE_P (GET_MODE (DEST)) && MSA_SUPPORTED_MODE_P (GET_MODE (SRC)))
+
/* Split a move from SRC to DEST, given that mips_split_move_p holds.
SPLIT_TYPE describes the split condition. */
@@ -4396,7 +4764,8 @@ mips_split_move (rtx dest, rtx src, enum mips_split_type split_type)
rtx low_dest;
gcc_checking_assert (mips_split_move_p (dest, src, split_type));
- if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
+ if (!MSA_SPLIT_P (dest, src)
+ && (FP_REG_RTX_P (dest)|| FP_REG_RTX_P (src)))
{
if (!TARGET_64BIT && GET_MODE (dest) == DImode)
emit_insn (gen_move_doubleword_fprdi (dest, src));
@@ -4432,6 +4801,13 @@ mips_split_move (rtx dest, rtx src, enum mips_split_type split_type)
else
emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
}
+ else if (MSA_SPLIT_P (dest, src))
+ {
+ /* Temporary sanity check should only get here if
+ * a 128bit move needed spliting. */
+ gcc_assert (mips_split_128bit_move_p (dest, src));
+ mips_split_128bit_move (dest, src);
+ }
else
{
/* The operation can be split into two normal moves. Decide in
@@ -4469,6 +4845,227 @@ mips_insn_split_type (rtx insn)
return SPLIT_IF_NECESSARY;
}
+/* Return true if a 128-bit move from SRC to DEST should be split into two
+ or four. */
+bool
+mips_split_128bit_move_p (rtx dest, rtx src)
+{
+ /* MSA-to-MSA moves can be done in a single instruction. */
+ if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
+ return false;
+
+ /* Check for MSA loads and stores. */
+ if (FP_REG_RTX_P (dest) && MEM_P (src))
+ return false;
+ if (FP_REG_RTX_P (src) && MEM_P (dest))
+ return false;
+
+ /* Check for MSA set to an immediate const vector with valid replicated element. */
+ if (FP_REG_RTX_P (dest)
+ && mips_const_vector_same_int_p (src, GET_MODE (src), -512, 511))
+ return false;
+
+ return true;
+}
+
+/* Split a 128-bit move from SRC to DEST. */
+
+void
+mips_split_128bit_move (rtx dest, rtx src)
+{
+ int byte, index;
+ rtx low_dest, low_src, d, s, last_d, last_s;
+
+ if (FP_REG_RTX_P (dest))
+ {
+ gcc_assert (!MEM_P (src));
+
+ rtx new_dest = dest;
+ if (!TARGET_64BIT)
+ {
+ if (GET_MODE (dest) != V4SImode)
+ new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
+ }
+ else
+ {
+ if (GET_MODE (dest) != V2DImode)
+ new_dest = simplify_gen_subreg (V2DImode, dest, GET_MODE (dest), 0);
+ }
+
+ for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode);
+ byte += UNITS_PER_WORD, index++)
+ {
+ s = mips_subword_at_byte (src, byte);
+ if (!TARGET_64BIT)
+ emit_insn (gen_msa_insert_w (new_dest, new_dest, GEN_INT (index),
+ s));
+ else
+ emit_insn (gen_msa_insert_d (new_dest, new_dest, GEN_INT (index),
+ s));
+ }
+ }
+ else if (FP_REG_RTX_P (src))
+ {
+ gcc_assert (!MEM_P (dest));
+
+ rtx new_src = src;
+ if (!TARGET_64BIT)
+ {
+ if (GET_MODE (src) != V4SImode)
+ new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0);
+ }
+ else
+ {
+ if (GET_MODE (src) != V2DImode)
+ new_src = simplify_gen_subreg (V2DImode, src, GET_MODE (src), 0);
+ }
+
+ for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode);
+ byte += UNITS_PER_WORD, index++)
+ {
+ d = mips_subword_at_byte (dest, byte);
+ if (!TARGET_64BIT)
+ emit_insn (gen_msa_copy_s_w (d, new_src, GEN_INT (index)));
+ else
+ emit_insn (gen_msa_copy_s_d (d, new_src, GEN_INT (index)));
+ }
+ }
+ else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
+ {
+ gcc_assert (TARGET_64BIT);
+
+ low_dest = mips_subword (dest, false);
+ mips_emit_move (low_dest, mips_subword (src, false));
+ emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
+ }
+ else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
+ {
+ gcc_assert (TARGET_64BIT);
+
+ mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
+ emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
+ }
+ else
+ {
+ low_dest = mips_subword_at_byte (dest, false);
+ low_src = mips_subword_at_byte (src, false);
+ last_d = NULL;
+ last_s = NULL;
+ if (REG_P (low_dest) && REG_P (low_src))
+ {
+ /* Make sure the source register is not written before reading. */
+ if (REGNO (low_dest) <= REGNO (low_src))
+ {
+ for (byte = 0; byte < GET_MODE_SIZE (TImode);
+ byte += UNITS_PER_WORD)
+ {
+ d = mips_subword_at_byte (dest, byte);
+ s = mips_subword_at_byte (src, byte);
+ mips_emit_move (d, s);
+ }
+ }
+ else
+ {
+ for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0;
+ byte -= UNITS_PER_WORD)
+ {
+ d = mips_subword_at_byte (dest, byte);
+ s = mips_subword_at_byte (src, byte);
+ mips_emit_move (d, s);
+ }
+ }
+ }
+ else
+ {
+ for (byte = 0; byte < GET_MODE_SIZE (TImode); byte += UNITS_PER_WORD)
+ {
+ d = mips_subword_at_byte (dest, byte);
+ s = mips_subword_at_byte (src, byte);
+ if (REG_P (low_dest) && reg_overlap_mentioned_p (d, src))
+ {
+ gcc_assert (last_d == NULL && last_s == NULL);
+ last_d = d;
+ last_s = s;
+ }
+ else
+ mips_emit_move (d, s);
+ }
+ if (last_d != NULL && last_s != NULL)
+ mips_emit_move (last_d, last_s);
+ }
+ }
+}
+
+/* Split a COPY_S.D with operands DEST, SRC and INDEX. GEN is a function
+ * used to generate subregs. */
+
+void
+mips_split_msa_copy_d (rtx dest, rtx src, rtx index,
+ rtx (*gen_fn)(rtx, rtx, rtx))
+{
+ gcc_assert ((GET_MODE (src) == V2DImode && GET_MODE (dest) == DImode)
+ || (GET_MODE (src) == V2DFmode && GET_MODE (dest) == DFmode));
+
+ /* Note that low is always from the lower index, and high is always
+ from the higher index. */
+ rtx low = mips_subword (dest, false);
+ rtx high = mips_subword (dest, true);
+ rtx new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0);
+
+ emit_insn (gen_fn (low, new_src, GEN_INT (INTVAL (index) * 2)));
+ emit_insn (gen_fn (high, new_src, GEN_INT (INTVAL (index) * 2 + 1)));
+}
+
+/* Split a INSERT.D with operand DEST, SRC1.INDEX and SRC2. */
+
+void
+mips_split_msa_insert_d (rtx dest, rtx src1, rtx index, rtx src2)
+{
+ gcc_assert (GET_MODE (dest) == GET_MODE (src1));
+ gcc_assert ((GET_MODE (dest) == V2DImode
+ && (GET_MODE (src2) == DImode || src2 == const0_rtx))
+ || (GET_MODE (dest) == V2DFmode && GET_MODE (src2) == DFmode));
+
+ /* Note that low is always from the lower index, and high is always
+ from the higher index. */
+ rtx low = mips_subword (src2, false);
+ rtx high = mips_subword (src2, true);
+ rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
+ rtx new_src1 = simplify_gen_subreg (V4SImode, src1, GET_MODE (src1), 0);
+ emit_insn (gen_msa_insert_w (new_dest, new_src1,
+ GEN_INT (INTVAL (index) * 2), low));
+ emit_insn (gen_msa_insert_w (new_dest, new_dest,
+ GEN_INT (INTVAL (index) * 2 + 1), high));
+}
+
+/* Split fill.d. */
+
+void
+mips_split_msa_fill_d (rtx dest, rtx src)
+{
+ gcc_assert ((GET_MODE (dest) == V2DImode
+ && (GET_MODE (src) == DImode || src == const0_rtx))
+ || (GET_MODE (dest) == V2DFmode && GET_MODE (src) == DFmode));
+
+ /* Note that low is always from the lower index, and high is always
+ from the higher index. */
+ rtx low, high;
+ if (src == const0_rtx)
+ {
+ low = src;
+ high = src;
+ }
+ else
+ {
+ low = mips_subword (src, false);
+ high = mips_subword (src, true);
+ }
+ rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
+ emit_insn (gen_msa_fill_w (new_dest, low));
+ emit_insn (gen_msa_insert_w (new_dest, new_dest, const1_rtx, high));
+ emit_insn (gen_msa_insert_w (new_dest, new_dest, GEN_INT (3), high));
+}
+
/* Return true if a move from SRC to DEST in INSN should be split. */
bool
@@ -4495,12 +5092,13 @@ mips_output_move (rtx dest, rtx src)
enum rtx_code dest_code, src_code;
enum machine_mode mode;
enum mips_symbol_type symbol_type;
- bool dbl_p;
+ bool dbl_p, msa_p;
dest_code = GET_CODE (dest);
src_code = GET_CODE (src);
mode = GET_MODE (dest);
dbl_p = (GET_MODE_SIZE (mode) == 8);
+ msa_p = MSA_SUPPORTED_MODE_P (mode);
if (mips_split_move_p (dest, src, SPLIT_IF_NECESSARY))
return "#";
@@ -4535,7 +5133,24 @@ mips_output_move (rtx dest, rtx src)
}
if (FP_REG_P (REGNO (dest)))
- return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
+ {
+ if (msa_p)
+ {
+ enum machine_mode dstmode = GET_MODE (dest);
+
+ gcc_assert (src == CONST0_RTX (GET_MODE (src)));
+
+ if (MSA_SUPPORTED_MODE_P (dstmode))
+ {
+ if (dstmode == TImode)
+ return "ldi.b\t%w0,0";
+ else
+ return "ldi.%v0\t%w0,0";
+ }
+ }
+
+ return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
+ }
if (ALL_COP_REG_P (REGNO (dest)))
{
@@ -4552,6 +5167,7 @@ mips_output_move (rtx dest, rtx src)
case 2: return "sh\t%z1,%0";
case 4: return "sw\t%z1,%0";
case 8: return "sd\t%z1,%0";
+ default: gcc_unreachable ();
}
}
if (dest_code == REG && GP_REG_P (REGNO (dest)))
@@ -4580,7 +5196,10 @@ mips_output_move (rtx dest, rtx src)
}
if (FP_REG_P (REGNO (src)))
- return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
+ {
+ gcc_assert (!msa_p);
+ return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
+ }
if (ALL_COP_REG_P (REGNO (src)))
{
@@ -4598,6 +5217,7 @@ mips_output_move (rtx dest, rtx src)
case 2: return "lhu\t%0,%1";
case 4: return "lw\t%0,%1";
case 8: return "ld\t%0,%1";
+ default: gcc_unreachable ();
}
if (src_code == CONST_INT)
@@ -4644,17 +5264,45 @@ mips_output_move (rtx dest, rtx src)
{
if (GET_MODE (dest) == V2SFmode)
return "mov.ps\t%0,%1";
+ else if (msa_p)
+ return "move.v\t%w0,%w1";
else
return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
}
if (dest_code == MEM)
- return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
+ {
+ if (MSA_SUPPORTED_MODE_P (mode))
+ {
+ if (mode == TImode)
+ {
+ /* Just use st.d/st.w to store. */
+ return TARGET_64BIT ? "st.d\t%w1,%0" : "st.w\t%w1,%0";
+ }
+ else
+ return "st.%v1\t%w1,%0";
+ }
+
+ return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
+ }
}
if (dest_code == REG && FP_REG_P (REGNO (dest)))
{
if (src_code == MEM)
- return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
+ {
+ if (MSA_SUPPORTED_MODE_P (mode))
+ {
+ if (mode == TImode)
+ {
+ /* Just use ld.d/ld.w to load. */
+ return TARGET_64BIT ? "ld.d\t%w0,%1" : "ld.w\t%w0,%1";
+ }
+ else
+ return "ld.%v0\t%w0,%1";
+ }
+
+ return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
+ }
}
if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
{
@@ -4940,17 +5588,32 @@ mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
{
enum rtx_code cmp_code;
- /* Floating-point tests use a separate C.cond.fmt comparison to
- set a condition code register. The branch or conditional move
- will then compare that register against zero.
+ /* Floating-point tests use a separate C.cond.fmt or CMP.cond.fmt
+ comparison to set a register. The branch or conditional move will
+ then compare that register against zero.
Set CMP_CODE to the code of the comparison instruction and
*CODE to the code that the branch or move should use. */
cmp_code = *code;
- *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
- *op0 = (ISA_HAS_8CC
- ? mips_allocate_fcc (CCmode)
- : gen_rtx_REG (CCmode, FPSW_REGNUM));
+ if (ISA_HAS_CCF)
+ {
+ /* All FP conditions can be implemented directly with CMP.cond.fmt
+ or by reversing the operands. */
+ *code = NE;
+ *op0 = gen_reg_rtx (CCFmode);
+ }
+ else
+ {
+ /* Three FP conditions cannot be implemented by reversing the
+ operands for C.cond.fmt, instead a reversed condition code is
+ required and a test for false. */
+ *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
+ if (ISA_HAS_8CC)
+ *op0 = mips_allocate_fcc (CCmode);
+ else
+ *op0 = gen_rtx_REG (CCmode, FPSW_REGNUM);
+ }
+
*op1 = const0_rtx;
mips_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1);
}
@@ -5003,6 +5666,30 @@ mips_expand_conditional_branch (rtx *operands)
emit_jump_insn (gen_condjump (condition, operands[3]));
}
+/* Generate RTL to test OPERAND[1] the test is specified by GEN_FUN
+ * then set OPERANDS[0] to 1 or 0 if test is true/false repectiveltyi
+ * according to GEN_FN. */
+
+void
+mips_expand_msa_branch (rtx *operands, rtx (*gen_fn)(rtx, rtx, rtx))
+{
+ rtx labelT = gen_label_rtx ();
+ rtx labelE = gen_label_rtx ();
+ rtx tmp = gen_fn (labelT, operands[1], const0_rtx);
+
+ tmp = emit_jump_insn (tmp);
+ JUMP_LABEL (tmp) = labelT;
+ emit_move_insn (operands[0], const0_rtx);
+ tmp = emit_jump_insn (gen_jump (labelE));
+ emit_barrier ();
+ JUMP_LABEL (tmp) = labelE;
+ emit_label (labelT);
+ LABEL_NUSES (labelT) = 1;
+ emit_move_insn (operands[0], const1_rtx);
+ emit_label (labelE);
+ LABEL_NUSES (labelE) = 1;
+}
+
/* Implement:
(set temp (COND:CCV2 CMP_OP0 CMP_OP1))
@@ -5040,9 +5727,45 @@ mips_expand_conditional_move (rtx *operands)
mips_emit_compare (&code, &op0, &op1, true);
cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1);
- emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
- operands[2], operands[3])));
+
+ /* There is no direct support for general conditional GP move involving
+ two registers using SEL. */
+ if (ISA_HAS_SEL
+ && INTEGRAL_MODE_P (GET_MODE (operands[2]))
+ && register_operand (operands[2], VOIDmode)
+ && register_operand (operands[3], VOIDmode))
+ {
+ enum machine_mode mode = GET_MODE (operands[0]);
+ rtx temp = gen_reg_rtx (mode);
+ rtx temp2 = gen_reg_rtx (mode);
+
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_IF_THEN_ELSE (mode, cond,
+ operands[2], const0_rtx)));
+
+ /* Flip the test for the second operand. */
+ cond = gen_rtx_fmt_ee ((code == EQ) ? NE : EQ, GET_MODE (op0), op0, op1);
+
+ emit_insn (gen_rtx_SET (VOIDmode, temp2,
+ gen_rtx_IF_THEN_ELSE (mode, cond,
+ operands[3], const0_rtx)));
+
+ /* Merge the two results, at least one is guaranteed to be zero. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IOR (mode, temp, temp2)));
+ }
+ else
+ {
+ if (FLOAT_MODE_P (GET_MODE (operands[2])) && !ISA_HAS_SEL)
+ {
+ operands[2] = force_reg (GET_MODE (operands[0]), operands[2]);
+ operands[3] = force_reg (GET_MODE (operands[0]), operands[3]);
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
+ operands[2], operands[3])));
+ }
}
/* Perform the comparison in COMPARISON, then trap if the condition holds. */
@@ -5076,7 +5799,9 @@ mips_expand_conditional_trap (rtx comparison)
mode = GET_MODE (XEXP (comparison, 0));
op0 = force_reg (mode, op0);
- if (!arith_operand (op1, mode))
+ if (!(ISA_HAS_COND_TRAPI
+ ? arith_operand (op1, mode)
+ : reg_or_0_operand (op1, mode)))
op1 = force_reg (mode, op1);
emit_insn (gen_rtx_TRAP_IF (VOIDmode,
@@ -5130,6 +5855,7 @@ mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
/* Only leading floating-point scalars are passed in
floating-point registers. We also handle vector floats the same
say, which is OK because they are not covered by the standard ABI. */
+ gcc_assert (TARGET_PAIRED_SINGLE_FLOAT || mode != V2SFmode);
info->fpr_p = (!cum->gp_reg_found
&& cum->arg_number < 2
&& (type == 0
@@ -5145,7 +5871,9 @@ mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
/* Scalar, complex and vector floating-point types are passed in
floating-point registers, as long as this is a named rather
than a variable argument. */
+ gcc_assert (TARGET_PAIRED_SINGLE_FLOAT || mode != V2SFmode);
info->fpr_p = (named
+ && named
&& (type == 0 || FLOAT_TYPE_P (type))
&& (GET_MODE_CLASS (mode) == MODE_FLOAT
|| GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
@@ -5428,6 +6156,17 @@ mips_function_arg_boundary (enum machine_mode mode, const_tree type)
return alignment;
}
+/* Implement TARGET_GET_RAW_*_MODE. */
+
+static enum machine_mode
+mips_get_reg_raw_mode (int regno)
+{
+ if ((mips_abi == ABI_32 && !TARGET_FLOAT32)
+ && FP_REG_P (regno))
+ return DFmode;
+ return default_get_reg_raw_mode(regno);
+}
+
/* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
upward rather than downward. In other words, return true if the
first byte of the stack slot has useful data, false if the last
@@ -5585,6 +6324,7 @@ mips_return_in_msb (const_tree valtype)
static bool
mips_return_mode_in_fpr_p (enum machine_mode mode)
{
+ gcc_assert (TARGET_PAIRED_SINGLE_FLOAT || mode != V2SFmode);
return ((GET_MODE_CLASS (mode) == MODE_FLOAT
|| mode == V2SFmode
|| GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
@@ -5631,7 +6371,7 @@ mips_return_fpr_pair (enum machine_mode mode,
{
int inc;
- inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
+ inc = (TARGET_NEWABI || mips_abi == ABI_32 ? 2 : MAX_FPRS_PER_FMT);
return gen_rtx_PARALLEL
(mode,
gen_rtvec (2,
@@ -5747,19 +6487,26 @@ mips_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
/* Implement TARGET_FUNCTION_VALUE_REGNO_P.
- On the MIPS, R2 R3 and F0 F2 are the only register thus used.
- Currently, R2 and F0 are only implemented here (C has no complex type). */
+ On the MIPS, R2 R3 and F0 F2 are the only register thus used. */
static bool
mips_function_value_regno_p (const unsigned int regno)
{
if (regno == GP_RETURN
|| regno == FP_RETURN
+ || regno == FP_RETURN + 2
|| (LONG_DOUBLE_TYPE_SIZE == 128
&& FP_RETURN != GP_RETURN
&& regno == FP_RETURN + 2))
return true;
+ if ((regno == FP_RETURN + 1
+ || regno == FP_RETURN + 3)
+ && FP_RETURN != GP_RETURN
+ && (mips_abi == ABI_32 && TARGET_FLOAT32)
+ && FP_REG_P (regno))
+ return true;
+
return false;
}
@@ -6479,7 +7226,10 @@ mips16_call_stub_mode_suffix (enum machine_mode mode)
else if (mode == DCmode)
return "dc";
else if (mode == V2SFmode)
- return "df";
+ {
+ gcc_assert (TARGET_PAIRED_SINGLE_FLOAT);
+ return "df";
+ }
else
gcc_unreachable ();
}
@@ -6503,13 +7253,27 @@ mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
if (TARGET_64BIT)
fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
reg_names[gpreg], reg_names[fpreg]);
- else if (TARGET_FLOAT64)
+ else if (ISA_HAS_MXHC1)
{
fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
}
+ else if (TARGET_FLOATXX && direction == 't')
+ {
+ /* Use the argument save area to move via memory. */
+ fprintf (asm_out_file, "\tsw\t%s,0($sp)\n", reg_names[gpreg]);
+ fprintf (asm_out_file, "\tsw\t%s,4($sp)\n", reg_names[gpreg + 1]);
+ fprintf (asm_out_file, "\tldc1\t%s,0($sp)\n", reg_names[fpreg]);
+ }
+ else if (TARGET_FLOATXX && direction == 'f')
+ {
+ /* Use the argument save area to move via memory. */
+ fprintf (asm_out_file, "\tsdc1\t%s,0($sp)\n", reg_names[fpreg]);
+ fprintf (asm_out_file, "\tlw\t%s,0($sp)\n", reg_names[gpreg]);
+ fprintf (asm_out_file, "\tlw\t%s,4($sp)\n", reg_names[gpreg + 1]);
+ }
else
{
/* Move the least-significant word. */
@@ -6915,11 +7679,11 @@ mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
case SCmode:
mips_output_32bit_xfer ('f', GP_RETURN + TARGET_BIG_ENDIAN,
TARGET_BIG_ENDIAN
- ? FP_REG_FIRST + MAX_FPRS_PER_FMT
+ ? FP_REG_FIRST + 2
: FP_REG_FIRST);
mips_output_32bit_xfer ('f', GP_RETURN + TARGET_LITTLE_ENDIAN,
TARGET_LITTLE_ENDIAN
- ? FP_REG_FIRST + MAX_FPRS_PER_FMT
+ ? FP_REG_FIRST + 2
: FP_REG_FIRST);
if (GET_MODE (retval) == SCmode && TARGET_64BIT)
{
@@ -6948,10 +7712,12 @@ mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
case DCmode:
mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
- FP_REG_FIRST + MAX_FPRS_PER_FMT);
+ FP_REG_FIRST + 2);
/* Fall though. */
case DFmode:
case V2SFmode:
+ gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
+ || GET_MODE (retval) != V2SFmode);
mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
break;
@@ -7168,35 +7934,6 @@ mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
return true;
}
-/* Emit code to move general operand SRC into condition-code
- register DEST given that SCRATCH is a scratch TFmode FPR.
- The sequence is:
-
- FP1 = SRC
- FP2 = 0.0f
- DEST = FP2 < FP1
-
- where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
-
-void
-mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
-{
- rtx fp1, fp2;
-
- /* Change the source to SFmode. */
- if (MEM_P (src))
- src = adjust_address (src, SFmode, 0);
- else if (REG_P (src) || GET_CODE (src) == SUBREG)
- src = gen_rtx_REG (SFmode, true_regnum (src));
-
- fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
- fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
-
- mips_emit_move (copy_rtx (fp1), src);
- mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
- emit_insn (gen_slt_sf (dest, fp2, fp1));
-}
-
/* Implement MOVE_BY_PIECES_P. */
bool
@@ -7422,6 +8159,10 @@ mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
bool
mips_expand_block_move (rtx dest, rtx src, rtx length)
{
+ /* Disable entirely for R6 initially. */
+ if (!ISA_HAS_LWL_LWR)
+ return false;
+
if (CONST_INT_P (length))
{
if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
@@ -8183,11 +8924,17 @@ mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
switch (code)
{
case EQ:
- fputs ("c1f", file);
+ if (ISA_HAS_CCF)
+ fputs ("c1eqz", file);
+ else
+ fputs ("c1f", file);
break;
case NE:
- fputs ("c1t", file);
+ if (ISA_HAS_CCF)
+ fputs ("c1nez", file);
+ else
+ fputs ("c1t", file);
break;
default:
@@ -8209,6 +8956,7 @@ mips_print_operand_punct_valid_p (unsigned char code)
'X' Print CONST_INT OP in hexadecimal format.
'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
'd' Print CONST_INT OP in decimal.
+ 'B' Print CONST_INT as an unsigned byte [0..255].
'm' Print one less than CONST_INT OP in decimal.
'h' Print the high-part relocation associated with OP, after stripping
any outermost HIGH.
@@ -8217,6 +8965,7 @@ mips_print_operand_punct_valid_p (unsigned char code)
'N' Print the inverse of the integer branch condition for comparison OP.
'F' Print the FPU branch condition for comparison OP.
'W' Print the inverse of the FPU branch condition for comparison OP.
+ 'w' Print a MSA register.
'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
'z' for (eq:?I ...), 'n' for (ne:?I ...).
't' Like 'T', but with the EQ/NE cases reversed
@@ -8227,7 +8976,10 @@ mips_print_operand_punct_valid_p (unsigned char code)
'L' Print the low-order register in a double-word register operand.
'M' Print high-order register in a double-word register operand.
'z' Print $0 if OP is zero, otherwise print OP normally.
- 'b' Print the address of a memory operand, without offset. */
+ 'y' Print exact log2 of CONST_INT OP in decimal.
+ 'b' Print the address of a memory operand, without offset.
+ 'v' Print the insn size suffix b,h,w,d,f or d for vector modes V16QI,V8HI,V4SI,
+ V2SI,V4DF and V2DF. */
static void
mips_print_operand (FILE *file, rtx op, int letter)
@@ -8266,6 +9018,26 @@ mips_print_operand (FILE *file, rtx op, int letter)
output_operand_lossage ("invalid use of '%%%c'", letter);
break;
+ case 'B':
+ if (CONST_INT_P (op))
+ {
+ HOST_WIDE_INT val = INTVAL (op);
+ if (val < 0)
+ {
+ gcc_assert (val >= -128);
+ val += 256;
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, val);
+ }
+ else
+ {
+ gcc_assert (val <= 255);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, val);
+ }
+ }
+ else
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ break;
+
case 'm':
if (CONST_INT_P (op))
fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
@@ -8273,6 +9045,19 @@ mips_print_operand (FILE *file, rtx op, int letter)
output_operand_lossage ("invalid use of '%%%c'", letter);
break;
+ case 'y':
+ if (CONST_INT_P (op))
+ {
+ int val = exact_log2 (INTVAL (op));
+ if (val != -1)
+ fprintf (file, "%d", val);
+ else
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ }
+ else
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ break;
+
case 'h':
if (code == HIGH)
op = XEXP (op, 0);
@@ -8317,7 +9102,7 @@ mips_print_operand (FILE *file, rtx op, int letter)
break;
case 'Z':
- if (ISA_HAS_8CC)
+ if (ISA_HAS_8CC || ISA_HAS_CCF)
{
mips_print_operand (file, op, 0);
fputc (',', file);
@@ -8333,6 +9118,39 @@ mips_print_operand (FILE *file, rtx op, int letter)
output_operand_lossage ("invalid use of '%%%c'", letter);
break;
+ case 'w':
+ if (code == REG && MSA_REG_P (REGNO (op)))
+ fprintf (file, "$w%s", &reg_names[REGNO (op)][2]);
+ else
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ break;
+
+ case 'v':
+ switch (GET_MODE (op))
+ {
+ case V16QImode:
+ fprintf (file, "b");
+ break;
+ case V8HImode:
+ fprintf (file, "h");
+ break;
+ case V4SImode:
+ fprintf (file, "w");
+ break;
+ case V2DImode:
+ fprintf (file, "d");
+ break;
+ case V4SFmode:
+ fprintf (file, "w");
+ break;
+ case V2DFmode:
+ fprintf (file, "d");
+ break;
+ default:
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ }
+ break;
+
default:
switch (code)
{
@@ -8686,15 +9504,30 @@ mips_dwarf_register_span (rtx reg)
rtx high, low;
enum machine_mode mode;
- /* By default, GCC maps increasing register numbers to increasing
- memory locations, but paired FPRs are always little-endian,
- regardless of the prevailing endianness. */
+ /* TARGET_FLOATXX is implemented as 32-bit floating-point registers but
+ ensures that double precision registers are treated as if they were
+ 64-bit physical registers. The code will run correctly with 32-bit or
+ 64-bit registers which means that dwarf information cannot be precisely
+ correct for all scenarios. We choose to state that the 64-bit values
+ are stored in a single 64-bit 'piece'. This slightly unusual
+ construct can then be interpreted as either a pair of registers if the
+ registers are 32-bit or a single 64-bit register depending on
+ hardware. */
mode = GET_MODE (reg);
if (FP_REG_P (REGNO (reg))
- && TARGET_BIG_ENDIAN
- && MAX_FPRS_PER_FMT > 1
+ && TARGET_FLOATXX
&& GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
{
+ return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, reg));
+ }
+ /* By default, GCC maps increasing register numbers to increasing
+ memory locations, but paired FPRs are always little-endian,
+ regardless of the prevailing endianness. */
+ else if (FP_REG_P (REGNO (reg))
+ && TARGET_BIG_ENDIAN
+ && MAX_FPRS_PER_FMT > 1
+ && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
+ {
gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
high = mips_subword (reg, true);
low = mips_subword (reg, false);
@@ -8983,6 +9816,31 @@ mips_file_start (void)
fprintf (asm_out_file, "\t.nan\t%s\n",
mips_nan == MIPS_IEEE_754_2008 ? "2008" : "legacy");
+#ifdef HAVE_AS_MODULE
+ /* Record the FP ABI. See below for comments. */
+ if (TARGET_NO_FLOAT)
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+ fputs ("\t.gnu_attribute 4, 0\n", asm_out_file);
+#else
+ ;
+#endif
+ else if (!TARGET_HARD_FLOAT_ABI)
+ fputs ("\t.module\tsoftfloat\n", asm_out_file);
+ else if (!TARGET_DOUBLE_FLOAT)
+ fputs ("\t.module\tsinglefloat\n", asm_out_file);
+ else if (TARGET_FLOATXX)
+ fputs ("\t.module\tfp=xx\n", asm_out_file);
+ else if (TARGET_FLOAT64)
+ fputs ("\t.module\tfp=64\n", asm_out_file);
+ else
+ fputs ("\t.module\tfp=32\n", asm_out_file);
+
+ if (TARGET_ODD_SPREG)
+ fputs ("\t.module\toddspreg\n", asm_out_file);
+ else
+ fputs ("\t.module\tnooddspreg\n", asm_out_file);
+
+#else
#ifdef HAVE_AS_GNU_ATTRIBUTE
{
int attr;
@@ -8996,16 +9854,31 @@ mips_file_start (void)
/* Single-float code, -msingle-float. */
else if (!TARGET_DOUBLE_FLOAT)
attr = 2;
- /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64. */
- else if (!TARGET_64BIT && TARGET_FLOAT64)
- attr = 4;
+ /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64.
+ Reserved attr=4.
+ This case used 12 callee save double precision registers
+ and is deprecated. */
+ /* 64-bit or 32-bit FP registers on a 32-bit target, -mfpxx. */
+ else if (TARGET_FLOATXX)
+ attr = 5;
+ /* 64-bit FP registers on a 32-bit target, -mfp64 -modd-spreg. */
+ else if (mips_abi == ABI_32 && TARGET_FLOAT64 && TARGET_ODD_SPREG)
+ attr = 6;
+ /* 64-bit FP registers on a 32-bit target, -mfp64 -mno-odd-spreg. */
+ else if (mips_abi == ABI_32 && TARGET_FLOAT64)
+ attr = 7;
/* Regular FP code, FP regs same size as GP regs, -mdouble-float. */
else
attr = 1;
fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", attr);
+
+ /* 128-bit MSA. */
+ if (TARGET_MSA)
+ fprintf (asm_out_file, "\t.gnu_attribute 8, 1\n");
}
#endif
+#endif
/* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
if (TARGET_ABICALLS)
@@ -9805,7 +10678,8 @@ mips_must_initialize_gp_p (void)
static bool
mips_interrupt_extra_call_saved_reg_p (unsigned int regno)
{
- if (MD_REG_P (regno))
+ if ((ISA_HAS_HILO || TARGET_DSP)
+ && MD_REG_P (regno))
return true;
if (TARGET_DSP && DSP_ACC_REG_P (regno))
@@ -10016,10 +10890,8 @@ mips_compute_frame_info (void)
/* Set this function's interrupt properties. */
if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
{
- if (!ISA_MIPS32R2)
- error ("the %<interrupt%> attribute requires a MIPS32r2 processor");
- else if (TARGET_HARD_FLOAT)
- error ("the %<interrupt%> attribute requires %<-msoft-float%>");
+ if (mips_isa_rev < 2)
+ error ("the %<interrupt%> attribute requires a MIPS32r2 processor or greater");
else if (TARGET_MIPS16)
error ("interrupt handlers cannot be MIPS16 functions");
else
@@ -10494,7 +11366,9 @@ mips_for_each_saved_acc (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
static void
mips_save_reg (rtx reg, rtx mem)
{
- if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
+ if (GET_MODE (reg) == DFmode
+ && (!TARGET_FLOAT64
+ || mips_abi == ABI_32))
{
rtx x1, x2;
@@ -11247,6 +12121,14 @@ mips_expand_prologue (void)
GEN_INT (5),
GEN_INT (SR_IE),
gen_rtx_REG (SImode, GP_REG_FIRST)));
+
+ if (TARGET_HARD_FLOAT)
+ /* Disable COP1 for hard-float. This will lead to an exception
+ if floating-point code is executed in an ISR. */
+ emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
+ GEN_INT (1),
+ GEN_INT (SR_COP1),
+ gen_rtx_REG (SImode, GP_REG_FIRST)));
}
else
{
@@ -11419,7 +12301,9 @@ mips_restore_reg (rtx reg, rtx mem)
$7 instead and adjust the return insn appropriately. */
if (TARGET_MIPS16 && REGNO (reg) == RETURN_ADDR_REGNUM)
reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
- else if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
+ else if (GET_MODE (reg) == DFmode
+ && (!TARGET_FLOAT64
+ || mips_abi == ABI_32))
{
mips_add_cfa_restore (mips_subword (reg, true));
mips_add_cfa_restore (mips_subword (reg, false));
@@ -11761,13 +12645,32 @@ mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
size = GET_MODE_SIZE (mode);
mclass = GET_MODE_CLASS (mode);
- if (GP_REG_P (regno))
+ if (GP_REG_P (regno) && mode != CCFmode)
return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
+ /* For MSA, allow TImode and 128-bit vector modes in all FPR. */
+ if (FP_REG_P (regno) && MSA_SUPPORTED_MODE_P (mode))
+ return true;
+
if (FP_REG_P (regno)
&& (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
|| (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
{
+ /* Deny use of odd-numbered registers for 32-bit data for
+ the O32 FP64A ABI. */
+ if (mips_abi == ABI_32 && TARGET_FLOAT64 && !TARGET_ODD_SPREG
+ && size <= 4 && (regno & 1) != 0)
+ return false;
+
+ /* Prevent the use of odd-numbered registers for CCFmode with the
+ O32-FPXX ABI, otherwise allow them.
+ The FPXX ABI does not permit double-precision data to be placed
+ in odd-numbered registers and double-precision compares write
+ them as 64-bit values. Without this restriction the R6 FPXX
+ ABI would not be able to execute in FR=1 FRE=1 mode. */
+ if (mode == CCFmode && ISA_HAS_CCF)
+ return !(TARGET_FLOATXX && (regno & 1) != 0);
+
/* Allow 64-bit vector modes for Loongson-2E/2F. */
if (TARGET_LOONGSON_VECTORS
&& (mode == V2SImode
@@ -11789,7 +12692,9 @@ mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
}
+ /* Don't allow MSA vector modes in accumulators. */
if (ACC_REG_P (regno)
+ && !MSA_SUPPORTED_VECTOR_MODE_P (mode)
&& (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
{
if (MD_REG_P (regno))
@@ -11838,7 +12743,12 @@ mips_hard_regno_nregs (int regno, enum machine_mode mode)
return (GET_MODE_SIZE (mode) + 3) / 4;
if (FP_REG_P (regno))
- return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
+ {
+ if (MSA_SUPPORTED_MODE_P (mode))
+ return 1;
+
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
+ }
/* All other registers are word-sized. */
return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
@@ -11858,13 +12768,25 @@ mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
{
if (HARD_REGNO_MODE_OK (ST_REG_FIRST, mode))
- size = MIN (size, 4);
+ {
+ if (MSA_SUPPORTED_MODE_P (mode))
+ size = MIN (size, UNITS_PER_MSA_REG);
+ else
+ size = MIN (size, UNITS_PER_FPREG);
+ }
+
AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
}
if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
{
if (HARD_REGNO_MODE_OK (FP_REG_FIRST, mode))
- size = MIN (size, UNITS_PER_FPREG);
+ {
+ if (MSA_SUPPORTED_MODE_P (mode))
+ size = MIN (size, UNITS_PER_MSA_REG);
+ else
+ size = MIN (size, UNITS_PER_FPREG);
+ }
+
AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
}
if (!hard_reg_set_empty_p (left))
@@ -11885,6 +12807,10 @@ mips_cannot_change_mode_class (enum machine_mode from,
&& INTEGRAL_MODE_P (from) && INTEGRAL_MODE_P (to))
return false;
+ /* Allow conversions between different MSA vector modes and TImode. */
+ if (MSA_SUPPORTED_MODE_P (from) && MSA_SUPPORTED_MODE_P (to))
+ return false;
+
/* Otherwise, there are several problems with changing the modes of
values in floating-point registers:
@@ -11920,13 +12846,15 @@ mips_small_register_classes_for_mode_p (enum machine_mode mode
return TARGET_MIPS16;
}
-/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
+/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction,
+ or use the MSA's move.v instruction. */
static bool
mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
{
switch (mode)
{
+ case CCFmode:
case SFmode:
return TARGET_HARD_FLOAT;
@@ -11934,10 +12862,10 @@ mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
case V2SFmode:
- return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
+ return TARGET_HARD_FLOAT;
default:
- return false;
+ return MSA_SUPPORTED_MODE_P (mode);
}
}
@@ -11999,13 +12927,16 @@ mips_canonicalize_move_class (reg_class_t rclass)
classes handled by this function. */
static int
-mips_move_to_gpr_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
- reg_class_t from)
+mips_move_to_gpr_cost (enum machine_mode mode, reg_class_t from)
{
switch (from)
{
case M16_REGS:
case GENERAL_REGS:
+ /* Two or four move. */
+ if (MSA_SUPPORTED_MODE_P (mode))
+ return TARGET_64BIT ? 4 : 8;
+
/* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
return 2;
@@ -12014,12 +12945,16 @@ mips_move_to_gpr_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
return 6;
case FP_REGS:
+ /* Two or four copy_s.*. */
+ if (MSA_SUPPORTED_MODE_P (mode))
+ return TARGET_64BIT ? 8 : 16;
+
/* MFC1, etc. */
return 4;
case ST_REGS:
- /* LUI followed by MOVF. */
- return 4;
+ /* Not possible. ST_REGS are never moved. */
+ return 0;
case COP0_REGS:
case COP2_REGS:
@@ -12043,6 +12978,10 @@ mips_move_from_gpr_cost (enum machine_mode mode, reg_class_t to)
{
case M16_REGS:
case GENERAL_REGS:
+ /* Two or four move. */
+ if (MSA_SUPPORTED_MODE_P (mode))
+ return TARGET_64BIT ? 4: 8;
+
/* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
return 2;
@@ -12051,13 +12990,16 @@ mips_move_from_gpr_cost (enum machine_mode mode, reg_class_t to)
return 6;
case FP_REGS:
+ /* Two or four insv.*. */
+ if (MSA_SUPPORTED_MODE_P (mode))
+ return TARGET_64BIT ? 8: 16;
+
/* MTC1, etc. */
return 4;
case ST_REGS:
- /* A secondary reload through an FPR scratch. */
- return (mips_register_move_cost (mode, GENERAL_REGS, FP_REGS)
- + mips_register_move_cost (mode, FP_REGS, ST_REGS));
+ /* Not possible. ST_REGS are never moved. */
+ return 0;
case COP0_REGS:
case COP2_REGS:
@@ -12090,9 +13032,6 @@ mips_register_move_cost (enum machine_mode mode,
if (to == FP_REGS && mips_mode_ok_for_mov_fmt_p (mode))
/* MOV.FMT. */
return 4;
- if (to == ST_REGS)
- /* The sequence generated by mips_expand_fcc_reload. */
- return 8;
}
/* Handle cases in which only one class deviates from the ideal. */
@@ -12114,14 +13053,51 @@ mips_register_move_cost (enum machine_mode mode,
return 0;
}
+/* Implement TARGET_REGISTER_PRIORITY. */
+
+static int
+mips_register_priority (int hard_regno)
+{
+ /* Treat MIPS16 registers with higher priority than other regs. */
+ if (TARGET_MIPS16
+ && TEST_HARD_REG_BIT (reg_class_contents[M16_REGS], hard_regno))
+ return 1;
+ return 0;
+}
+
/* Implement TARGET_MEMORY_MOVE_COST. */
static int
mips_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
{
- return (mips_cost->memory_latency
+ int multiplier = 1;
+ /* Acount for the numder of losds md nd stores that are needed to
+ * handle MSA type in GPRs. */
+ if (MSA_SUPPORTED_MODE_P (mode) && rclass != FP_REGS)
+ multiplier = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
+
+ return (mips_cost->memory_latency * multiplier
+ memory_move_secondary_cost (mode, rclass, in));
-}
+}
+
+/* Implement SECONDARY_MEMORY_NEEDED. */
+
+bool
+mips_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
+ enum machine_mode mode)
+{
+ /* Ignore spilled pseudos. */
+ if (lra_in_progress && (class1 == NO_REGS || class2 == NO_REGS))
+ return false;
+
+ if (((class1 == FP_REGS) != (class2 == FP_REGS))
+ && ((TARGET_FLOATXX && !ISA_HAS_MXHC1)
+ || (mips_abi == ABI_32 && TARGET_FLOAT64 && !TARGET_ODD_SPREG))
+ && GET_MODE_SIZE (mode) >= 8)
+ return true;
+
+ return false;
+}
/* Return the register class required for a secondary register when
copying between one of the registers in RCLASS and value X, which
@@ -12176,12 +13152,17 @@ mips_secondary_reload_class (enum reg_class rclass,
if (reg_class_subset_p (rclass, FP_REGS))
{
- if (MEM_P (x)
+ /* We don't need a reload if the pseudo is in memory. */
+ if ((MEM_P (x) || regno == -1)
&& (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
/* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
return NO_REGS;
+ if (MEM_P (x) && MSA_SUPPORTED_MODE_P (mode))
+ /* In this case we can use MSA LD.* and ST.*. */
+ return NO_REGS;
+
if (GP_REG_P (regno) || x == CONST0_RTX (mode))
/* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
return NO_REGS;
@@ -12197,7 +13178,8 @@ mips_secondary_reload_class (enum reg_class rclass,
return NO_REGS;
/* Otherwise, we need to reload through an integer register. */
- return GR_REGS;
+ if (regno >= 0)
+ return GR_REGS;
}
if (FP_REG_P (regno))
return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
@@ -12251,7 +13233,7 @@ mips_vector_mode_supported_p (enum machine_mode mode)
return TARGET_LOONGSON_VECTORS;
default:
- return false;
+ return TARGET_MSA && MSA_SUPPORTED_VECTOR_MODE_P (mode);
}
}
@@ -12270,14 +13252,44 @@ mips_scalar_mode_supported_p (enum machine_mode mode)
/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
static enum machine_mode
-mips_preferred_simd_mode (enum machine_mode mode ATTRIBUTE_UNUSED)
+mips_preferred_simd_mode (enum machine_mode mode)
{
if (TARGET_PAIRED_SINGLE_FLOAT
&& mode == SFmode)
return V2SFmode;
+
+ if (! TARGET_MSA)
+ return word_mode;
+
+ switch (mode)
+ {
+ case QImode:
+ return V16QImode;
+ case HImode:
+ return V8HImode;
+ case SImode:
+ return V4SImode;
+ case DImode:
+ return V2DImode;
+
+ case SFmode:
+ return V4SFmode;
+
+ case DFmode:
+ return V2DFmode;
+
+ default:
+ break;
+ }
return word_mode;
}
+static unsigned int
+mips_autovectorize_vector_sizes (void)
+{
+ return TARGET_MSA ? 16 : 0;
+}
+
/* Implement TARGET_INIT_LIBFUNCS. */
static void
@@ -12731,7 +13743,7 @@ mips_process_sync_loop (rtx insn, rtx *operands)
is specified. */
#define READ_OPERAND(WHAT, DEFAULT) \
WHAT = mips_get_sync_operand (operands, (int) get_attr_sync_##WHAT (insn), \
- DEFAULT)
+ DEFAULT)
/* Read the memory. */
READ_OPERAND (mem, 0);
@@ -13050,15 +14062,55 @@ mips_output_division (const char *division, rtx *operands)
}
}
else
- {
+ {
output_asm_insn ("%(bne\t%2,%.,1f", operands);
output_asm_insn (s, operands);
s = "break\t7%)\n1:";
- }
+ }
+ }
+ return s;
+}
+
+const char *
+mips_msa_output_division (const char *division, rtx *operands)
+{
+ const char *s;
+
+ s = division;
+ if (TARGET_CHECK_ZERO_DIV)
+ {
+ output_asm_insn ("%(bnz.%v0\t%w2,1f", operands);
+ output_asm_insn (s, operands);
+ s = "break\t7%)\n1:";
}
return s;
}
+/* Return true if destination of IN_INSN is used as add source in
+ OUT_INSN. Both IN_INSN and OUT_INSN are of type fmadd. Example:
+ madd.s dst, x, y, z
+ madd.s a, dst, b, c */
+
+bool
+mips_fmadd_bypass (rtx out_insn, rtx in_insn)
+{
+ int dst_reg, src_reg;
+
+ gcc_assert (get_attr_type (in_insn) == TYPE_FMADD);
+ gcc_assert (get_attr_type (out_insn) == TYPE_FMADD);
+
+ extract_insn (in_insn);
+ dst_reg = REG_P (recog_data.operand[0]);
+
+ extract_insn (out_insn);
+ src_reg = REG_P (recog_data.operand[1]);
+
+ if (dst_reg == src_reg)
+ return true;
+
+ return false;
+}
+
/* Return true if IN_INSN is a multiply-add or multiply-subtract
instruction and if OUT_INSN assigns to the accumulator operand. */
@@ -13194,6 +14246,7 @@ mips_issue_rate (void)
case PROCESSOR_LOONGSON_2E:
case PROCESSOR_LOONGSON_2F:
case PROCESSOR_LOONGSON_3A:
+ case PROCESSOR_P5600:
return 4;
case PROCESSOR_XLP:
@@ -13329,6 +14382,9 @@ mips_multipass_dfa_lookahead (void)
if (TUNE_OCTEON)
return 2;
+ if (TUNE_P5600)
+ return 4;
+
return 0;
}
@@ -13579,6 +14635,194 @@ mips_74k_agen_reorder (rtx *ready, int nready)
break;
}
}
+
+/* These functions are called when -msched-weight is set.
+ They calculate register weight for given register type. */
+
+/* Find GP and vector register weight for given X. */
+
+static void
+find_regtype_weight (rtx x, int insn_uid)
+{
+ if (GET_CODE (x) == CLOBBER)
+ {
+ if (GET_MODE_SIZE (GET_MODE (SET_DEST (x))) <= GET_MODE_SIZE (DImode))
+ regtype_weight[insn_uid].reg_weight_gp++;
+ else
+ regtype_weight[insn_uid].reg_weight_vec++;
+ }
+
+ if (GET_CODE (x) == SET)
+ {
+ if (REG_P (SET_DEST (x)) && reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
+ return;
+
+ if (GET_MODE_SIZE (GET_MODE (SET_DEST (x))) <= GET_MODE_SIZE (DImode))
+ regtype_weight[insn_uid].reg_weight_gp++;
+ else
+ regtype_weight[insn_uid].reg_weight_vec++;
+ }
+}
+
+/* Calculate register weights for all instructions and modes
+ of all basic blocks. */
+
+static void
+mips_weight_init_global (int old_max_uid)
+{
+ rtx x, insn;
+ basic_block b;
+
+ regtype_weight = XCNEWVEC (struct msched_weight_info, old_max_uid);
+
+ FOR_EACH_BB_REVERSE_FN (b, cfun)
+ FOR_BB_INSNS (b, insn)
+ if (NONDEBUG_INSN_P (insn))
+ {
+ /* Increment weight for each register born here. */
+ x = PATTERN (insn);
+ find_regtype_weight (x, INSN_UID (insn));
+
+ if (GET_CODE (x) == PARALLEL)
+ {
+ int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ x = XVECEXP (PATTERN (insn), 0, i);
+ find_regtype_weight (x, INSN_UID (insn));
+ }
+ }
+
+ /* Decrement weight for each register that dies here. */
+ for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
+ if (REG_NOTE_KIND (x) == REG_DEAD
+ || REG_NOTE_KIND (x) == REG_UNUSED)
+ {
+ rtx note = XEXP (x, 0);
+ if (REG_P (note))
+ {
+ if (GET_MODE_SIZE (GET_MODE (note))
+ <= GET_MODE_SIZE (DImode))
+ regtype_weight[INSN_UID (insn)].reg_weight_gp--;
+ else
+ regtype_weight[INSN_UID (insn)].reg_weight_vec--;
+ }
+ }
+ }
+
+ CURR_REGTYPE_PRESSURE (GPREG) = 0;
+ CURR_REGTYPE_PRESSURE (VECREG) = 0;
+}
+
+/* Implement TARGET_SCHED_INIT_GLOBAL. */
+
+static void
+mips_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
+ int old_max_uid)
+{
+ if (TARGET_SCHED_WEIGHT)
+ mips_weight_init_global (old_max_uid);
+}
+
+static void
+mips_weight_finish_global ()
+{
+ if (regtype_weight != NULL)
+ XDELETEVEC (regtype_weight);
+}
+
+/* Implement TARGET_SCHED_FINISH_GLOBAL. */
+
+static void
+mips_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED)
+{
+ if (TARGET_SCHED_WEIGHT)
+ mips_weight_finish_global ();
+}
+
+/* This is a TARGET_SCHED_WEIGHT (option -msched-weight) helper
+ function which is called during reordering of instructions in
+ the first pass of the scheduler. The function swaps the instruction
+ at the bottom (NREADY - 1) of the READY list with another instruction
+ in READY list as per following algorithm. The scheduler then picks the
+ instruction at READY[NREADY - 1] and schedules it.
+
+ When an instruction is scheduled its register weight is accumulated
+ in CURR_REGTYPE_PRESSURE (mode). Which is number of live registers
+ at that instruction.
+
+ If the current register pressure (CURR_REGTYPE_PRESSURE) is
+ more than PROMOTE_HIGH_PRIORITY_PRESSURE (25 registers) and if the
+ priority of the consumer of the instruction in question (INSN) is
+ more than the priority of READY[NREADY - 1] then INSN is swapped
+ with READY[NREADY - 1].
+
+ If the current register pressure (CURR_REGTYPE_PRESSURE) is
+ more than PROMOTE_MAX_DEP_PRESSURE (15 registers) then INSN
+ with maximum forward dependencies is swapped with the
+ READY[NREADY - 1]. */
+
+static void
+mips_sched_weight (rtx *ready, int nready)
+{
+ int mode, toswap, i;
+ int max_forw_dependency = 0;
+
+ toswap = nready - 1;
+
+#define INSN_TICK(INSN) (HID (INSN)->tick)
+
+ mode = CURR_REGTYPE_PRESSURE (GPREG) > CURR_REGTYPE_PRESSURE (VECREG)
+ ? GPREG : VECREG;
+
+ for (i = nready - 1; i >= 0; i--)
+ {
+ rtx insn = ready[i], consumer_insn = NULL_RTX;
+ sd_iterator_def sd_it;
+ dep_t dep;
+ int forw_dependency_count = 0;
+
+ FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
+ {
+ if (! DEBUG_INSN_P (DEP_CON (dep)))
+ forw_dependency_count++;
+ consumer_insn = DEP_CON (dep);
+ }
+
+ if (CURR_REGTYPE_PRESSURE (mode) > PROMOTE_HIGH_PRIORITY_PRESSURE)
+ {
+ if (consumer_insn != NULL_RTX
+ && INSN_PRIORITY_KNOWN (consumer_insn)
+ && (INSN_PRIORITY (consumer_insn)
+ > INSN_PRIORITY (ready[toswap])))
+ {
+ max_forw_dependency = forw_dependency_count;
+ toswap = i;
+ }
+ }
+ else if (CURR_REGTYPE_PRESSURE (mode) > PROMOTE_MAX_DEP_PRESSURE)
+ {
+ if (forw_dependency_count > max_forw_dependency
+ || ((forw_dependency_count == max_forw_dependency)
+ && (INSN_TICK (insn) >= INSN_TICK (ready[toswap]))
+ && (INSN_UID (insn) < INSN_UID (ready[toswap]))))
+ {
+ max_forw_dependency = forw_dependency_count;
+ toswap = i;
+ }
+ }
+ }
+
+ if (toswap != (nready-1))
+ {
+ rtx temp = ready[nready-1];
+ ready[nready-1] = ready[toswap];
+ ready[toswap] = temp;
+ }
+#undef INSN_TICK
+}
/* Implement TARGET_SCHED_INIT. */
@@ -13595,6 +14839,12 @@ mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
pointed to ALU2. */
mips_ls2.alu1_turn_p = false;
mips_ls2.falu1_turn_p = true;
+
+ if (TARGET_SCHED_WEIGHT)
+ {
+ CURR_REGTYPE_PRESSURE (GPREG) = 0;
+ CURR_REGTYPE_PRESSURE (VECREG) = 0;
+ }
}
/* Subroutine used by TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
@@ -13616,6 +14866,11 @@ mips_sched_reorder_1 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
if (TUNE_74K)
mips_74k_agen_reorder (ready, *nreadyp);
+
+ if (! reload_completed
+ && TARGET_SCHED_WEIGHT
+ && *nreadyp > 1)
+ mips_sched_weight (ready, *nreadyp);
}
/* Implement TARGET_SCHED_REORDER. */
@@ -13687,6 +14942,16 @@ mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
mips_74k_agen_init (insn);
else if (TUNE_LOONGSON_2EF)
mips_ls2_variable_issue (insn);
+ else if (TARGET_SCHED_WEIGHT)
+ {
+ if (regtype_weight != NULL)
+ {
+ CURR_REGTYPE_PRESSURE (GPREG)
+ += INSN_GPREG_WEIGHT (insn);
+ CURR_REGTYPE_PRESSURE (VECREG)
+ += INSN_VECREG_WEIGHT (insn);
+ }
+ }
}
/* Instructions of type 'multi' should all be split before
@@ -13780,6 +15045,7 @@ AVAIL_NON_MIPS16 (dsp_64, TARGET_64BIT && TARGET_DSP)
AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
+AVAIL_NON_MIPS16 (msa, TARGET_MSA)
/* Construct a mips_builtin_description from the given arguments.
@@ -13896,6 +15162,22 @@ AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
#define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
+/* Define a MSA MIPS_BUILTIN_DIRECT function __builtin_msa_<INSN>
+ for instruction CODE_FOR_msa_<INSN>. FUNCTION_TYPE is a
+ builtin_description field. */
+#define MSA_BUILTIN(INSN, FUNCTION_TYPE) \
+ { CODE_FOR_msa_ ## INSN, MIPS_FP_COND_f, \
+ "__builtin_msa_" #INSN, MIPS_BUILTIN_DIRECT, \
+ FUNCTION_TYPE, mips_builtin_avail_msa }
+
+/* Define a MSA MIPS_BUILTIN_DIRECT_NO_TARGET function __builtin_msa_<INSN>
+ for instruction CODE_FOR_msa_<INSN>. FUNCTION_TYPE is a
+ builtin_description field. */
+#define MSA_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \
+ { CODE_FOR_msa_ ## INSN, MIPS_FP_COND_f, \
+ "__builtin_msa_" #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
+ FUNCTION_TYPE, mips_builtin_avail_msa }
+
#define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
#define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
#define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
@@ -13936,6 +15218,119 @@ AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
#define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
#define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
+#define CODE_FOR_msa_adds_s_b CODE_FOR_ssaddv16qi3
+#define CODE_FOR_msa_adds_s_h CODE_FOR_ssaddv8hi3
+#define CODE_FOR_msa_adds_s_w CODE_FOR_ssaddv4si3
+#define CODE_FOR_msa_adds_s_d CODE_FOR_ssaddv2di3
+#define CODE_FOR_msa_adds_u_b CODE_FOR_usaddv16qi3
+#define CODE_FOR_msa_adds_u_h CODE_FOR_usaddv8hi3
+#define CODE_FOR_msa_adds_u_w CODE_FOR_usaddv4si3
+#define CODE_FOR_msa_adds_u_d CODE_FOR_usaddv2di3
+#define CODE_FOR_msa_addv_b CODE_FOR_addv16qi3
+#define CODE_FOR_msa_addv_h CODE_FOR_addv8hi3
+#define CODE_FOR_msa_addv_w CODE_FOR_addv4si3
+#define CODE_FOR_msa_addv_d CODE_FOR_addv2di3
+#define CODE_FOR_msa_and_v CODE_FOR_andv16qi3
+#define CODE_FOR_msa_bmnz_v CODE_FOR_msa_bmnz_v_b
+#define CODE_FOR_msa_bmz_v CODE_FOR_msa_bmz_v_b
+#define CODE_FOR_msa_bnz_v CODE_FOR_msa_bnz_v_b
+#define CODE_FOR_msa_bz_v CODE_FOR_msa_bz_v_b
+#define CODE_FOR_msa_bsel_v CODE_FOR_msa_bsel_v_b
+#define CODE_FOR_msa_div_s_b CODE_FOR_divv16qi3
+#define CODE_FOR_msa_div_s_h CODE_FOR_divv8hi3
+#define CODE_FOR_msa_div_s_w CODE_FOR_divv4si3
+#define CODE_FOR_msa_div_s_d CODE_FOR_divv2di3
+#define CODE_FOR_msa_div_u_b CODE_FOR_udivv16qi3
+#define CODE_FOR_msa_div_u_h CODE_FOR_udivv8hi3
+#define CODE_FOR_msa_div_u_w CODE_FOR_udivv4si3
+#define CODE_FOR_msa_div_u_d CODE_FOR_udivv2di3
+#define CODE_FOR_msa_fadd_w CODE_FOR_addv4sf3
+#define CODE_FOR_msa_fadd_d CODE_FOR_addv2df3
+#define CODE_FOR_msa_ffint_s_w CODE_FOR_floatv4sfv4si2
+#define CODE_FOR_msa_ffint_s_d CODE_FOR_floatv2dfv2di2
+#define CODE_FOR_msa_ffint_u_w CODE_FOR_floatunsv4sfv4si2
+#define CODE_FOR_msa_ffint_u_d CODE_FOR_floatunsv2dfv2di2
+#define CODE_FOR_msa_fsub_w CODE_FOR_subv4sf3
+#define CODE_FOR_msa_fsub_d CODE_FOR_subv2df3
+#define CODE_FOR_msa_fmul_w CODE_FOR_mulv4sf3
+#define CODE_FOR_msa_fmul_d CODE_FOR_mulv2df3
+#define CODE_FOR_msa_fdiv_w CODE_FOR_divv4sf3
+#define CODE_FOR_msa_fdiv_d CODE_FOR_divv2df3
+#define CODE_FOR_msa_fmax_w CODE_FOR_smaxv4sf3
+#define CODE_FOR_msa_fmax_d CODE_FOR_smaxv2df3
+#define CODE_FOR_msa_fmax_a_w CODE_FOR_smaxv4sf3
+#define CODE_FOR_msa_fmax_a_d CODE_FOR_smaxv2sf3
+#define CODE_FOR_msa_fmin_w CODE_FOR_sminv4sf3
+#define CODE_FOR_msa_fmin_d CODE_FOR_sminv2df3
+#define CODE_FOR_msa_fmin_a_w CODE_FOR_sminv4sf3
+#define CODE_FOR_msa_fmin_a_d CODE_FOR_sminv2df3
+#define CODE_FOR_msa_fsqrt_w CODE_FOR_sqrtv4sf2
+#define CODE_FOR_msa_fsqrt_d CODE_FOR_sqrtv2df2
+#define CODE_FOR_msa_mod_s_b CODE_FOR_modv16qi3
+#define CODE_FOR_msa_mod_s_h CODE_FOR_modv8hi3
+#define CODE_FOR_msa_mod_s_w CODE_FOR_modv4si3
+#define CODE_FOR_msa_mod_s_d CODE_FOR_modv2di3
+#define CODE_FOR_msa_mod_u_b CODE_FOR_umodv16qi3
+#define CODE_FOR_msa_mod_u_h CODE_FOR_umodv8hi3
+#define CODE_FOR_msa_mod_u_w CODE_FOR_umodv4si3
+#define CODE_FOR_msa_mod_u_d CODE_FOR_umodv2di3
+#define CODE_FOR_msa_mod_s_b CODE_FOR_modv16qi3
+#define CODE_FOR_msa_mod_s_h CODE_FOR_modv8hi3
+#define CODE_FOR_msa_mod_s_w CODE_FOR_modv4si3
+#define CODE_FOR_msa_mod_s_d CODE_FOR_modv2di3
+#define CODE_FOR_msa_mod_u_b CODE_FOR_umodv16qi3
+#define CODE_FOR_msa_mod_u_h CODE_FOR_umodv8hi3
+#define CODE_FOR_msa_mod_u_w CODE_FOR_umodv4si3
+#define CODE_FOR_msa_mod_u_d CODE_FOR_umodv2di3
+#define CODE_FOR_msa_mulv_b CODE_FOR_mulv16qi3
+#define CODE_FOR_msa_mulv_h CODE_FOR_mulv8hi3
+#define CODE_FOR_msa_mulv_w CODE_FOR_mulv4si3
+#define CODE_FOR_msa_mulv_d CODE_FOR_mulv2di3
+#define CODE_FOR_msa_nlzc_b CODE_FOR_clzv16qi2
+#define CODE_FOR_msa_nlzc_h CODE_FOR_clzv8hi2
+#define CODE_FOR_msa_nlzc_w CODE_FOR_clzv4si2
+#define CODE_FOR_msa_nlzc_d CODE_FOR_clzv2di2
+#define CODE_FOR_msa_nor_v CODE_FOR_msa_nor_v_b
+#define CODE_FOR_msa_or_v CODE_FOR_iorv16qi3
+#define CODE_FOR_msa_pcnt_b CODE_FOR_popcountv16qi2
+#define CODE_FOR_msa_pcnt_h CODE_FOR_popcountv8hi2
+#define CODE_FOR_msa_pcnt_w CODE_FOR_popcountv4si2
+#define CODE_FOR_msa_pcnt_d CODE_FOR_popcountv2di2
+#define CODE_FOR_msa_xor_v CODE_FOR_xorv16qi3
+#define CODE_FOR_msa_sll_b CODE_FOR_vashlv16qi3
+#define CODE_FOR_msa_sll_h CODE_FOR_vashlv8hi3
+#define CODE_FOR_msa_sll_w CODE_FOR_vashlv4si3
+#define CODE_FOR_msa_sll_d CODE_FOR_vashlv2di3
+#define CODE_FOR_msa_sra_b CODE_FOR_vashrv16qi3
+#define CODE_FOR_msa_sra_h CODE_FOR_vashrv8hi3
+#define CODE_FOR_msa_sra_w CODE_FOR_vashrv4si3
+#define CODE_FOR_msa_sra_d CODE_FOR_vashrv2di3
+#define CODE_FOR_msa_srl_b CODE_FOR_vlshrv16qi3
+#define CODE_FOR_msa_srl_h CODE_FOR_vlshrv8hi3
+#define CODE_FOR_msa_srl_w CODE_FOR_vlshrv4si3
+#define CODE_FOR_msa_srl_d CODE_FOR_vlshrv2di3
+#define CODE_FOR_msa_subv_b CODE_FOR_subv16qi3
+#define CODE_FOR_msa_subv_h CODE_FOR_subv8hi3
+#define CODE_FOR_msa_subv_w CODE_FOR_subv4si3
+#define CODE_FOR_msa_subv_d CODE_FOR_subv2di3
+
+#define CODE_FOR_msa_move_v CODE_FOR_movv16qi
+
+#define CODE_FOR_msa_vshf_b CODE_FOR_msa_vshfv16qi
+#define CODE_FOR_msa_vshf_h CODE_FOR_msa_vshfv8hi
+#define CODE_FOR_msa_vshf_w CODE_FOR_msa_vshfv4si
+#define CODE_FOR_msa_vshf_d CODE_FOR_msa_vshfv2di
+
+#define CODE_FOR_msa_ldi_b CODE_FOR_msa_ldiv16qi
+#define CODE_FOR_msa_ldi_h CODE_FOR_msa_ldiv8hi
+#define CODE_FOR_msa_ldi_w CODE_FOR_msa_ldiv4si
+#define CODE_FOR_msa_ldi_d CODE_FOR_msa_ldiv2di
+
+#define CODE_FOR_msa_cast_to_vector_float CODE_FOR_msa_cast_to_vector_w_f
+#define CODE_FOR_msa_cast_to_vector_double CODE_FOR_msa_cast_to_vector_d_f
+#define CODE_FOR_msa_cast_to_scalar_float CODE_FOR_msa_cast_to_scalar_w_f
+#define CODE_FOR_msa_cast_to_scalar_double CODE_FOR_msa_cast_to_scalar_d_f
+
static const struct mips_builtin_description mips_builtins[] = {
#define MIPS_GET_FCSR 0
DIRECT_BUILTIN (get_fcsr, MIPS_USI_FTYPE_VOID, hard_float),
@@ -14224,7 +15619,543 @@ static const struct mips_builtin_description mips_builtins[] = {
LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
/* Sundry other built-in functions. */
- DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache)
+ DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache),
+
+ /* Built-in functions for MSA. */
+ MSA_BUILTIN (sll_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (sll_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (sll_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (sll_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (slli_b, MIPS_V16QI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (slli_h, MIPS_V8HI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (slli_w, MIPS_V4SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (slli_d, MIPS_V2DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (sra_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (sra_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (sra_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (sra_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (srai_b, MIPS_V16QI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (srai_h, MIPS_V8HI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (srai_w, MIPS_V4SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (srai_d, MIPS_V2DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (srar_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (srar_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (srar_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (srar_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (srari_b, MIPS_V16QI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (srari_h, MIPS_V8HI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (srari_w, MIPS_V4SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (srari_d, MIPS_V2DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (srl_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (srl_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (srl_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (srl_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (srli_b, MIPS_V16QI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (srli_h, MIPS_V8HI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (srli_w, MIPS_V4SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (srli_d, MIPS_V2DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (srlr_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (srlr_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (srlr_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (srlr_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (srlri_b, MIPS_V16QI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (srlri_h, MIPS_V8HI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (srlri_w, MIPS_V4SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (srlri_d, MIPS_V2DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (bclr_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (bclr_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (bclr_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (bclr_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (bclri_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (bclri_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
+ MSA_BUILTIN (bclri_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
+ MSA_BUILTIN (bclri_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
+ MSA_BUILTIN (bset_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (bset_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (bset_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (bset_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (bseti_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (bseti_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
+ MSA_BUILTIN (bseti_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
+ MSA_BUILTIN (bseti_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
+ MSA_BUILTIN (bneg_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (bneg_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (bneg_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (bneg_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (bnegi_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (bnegi_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
+ MSA_BUILTIN (bnegi_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
+ MSA_BUILTIN (bnegi_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
+ MSA_BUILTIN (binsl_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
+ MSA_BUILTIN (binsl_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UV8HI),
+ MSA_BUILTIN (binsl_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UV4SI),
+ MSA_BUILTIN (binsl_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI),
+ MSA_BUILTIN (binsli_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI),
+ MSA_BUILTIN (binsli_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UQI),
+ MSA_BUILTIN (binsli_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UQI),
+ MSA_BUILTIN (binsli_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UQI),
+ MSA_BUILTIN (binsr_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
+ MSA_BUILTIN (binsr_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UV8HI),
+ MSA_BUILTIN (binsr_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UV4SI),
+ MSA_BUILTIN (binsr_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI),
+ MSA_BUILTIN (binsri_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI),
+ MSA_BUILTIN (binsri_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UQI),
+ MSA_BUILTIN (binsri_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UQI),
+ MSA_BUILTIN (binsri_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UQI),
+ MSA_BUILTIN (addv_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (addv_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (addv_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (addv_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (addvi_b, MIPS_V16QI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (addvi_h, MIPS_V8HI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (addvi_w, MIPS_V4SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (addvi_d, MIPS_V2DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (subv_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (subv_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (subv_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (subv_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (subvi_b, MIPS_V16QI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (subvi_h, MIPS_V8HI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (subvi_w, MIPS_V4SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (subvi_d, MIPS_V2DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (max_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (max_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (max_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (max_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (maxi_s_b, MIPS_V16QI_FTYPE_V16QI_QI),
+ MSA_BUILTIN (maxi_s_h, MIPS_V8HI_FTYPE_V8HI_QI),
+ MSA_BUILTIN (maxi_s_w, MIPS_V4SI_FTYPE_V4SI_QI),
+ MSA_BUILTIN (maxi_s_d, MIPS_V2DI_FTYPE_V2DI_QI),
+ MSA_BUILTIN (max_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (max_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (max_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (max_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (maxi_u_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (maxi_u_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
+ MSA_BUILTIN (maxi_u_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
+ MSA_BUILTIN (maxi_u_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
+ MSA_BUILTIN (min_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (min_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (min_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (min_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (mini_s_b, MIPS_V16QI_FTYPE_V16QI_QI),
+ MSA_BUILTIN (mini_s_h, MIPS_V8HI_FTYPE_V8HI_QI),
+ MSA_BUILTIN (mini_s_w, MIPS_V4SI_FTYPE_V4SI_QI),
+ MSA_BUILTIN (mini_s_d, MIPS_V2DI_FTYPE_V2DI_QI),
+ MSA_BUILTIN (min_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (min_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (min_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (min_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (mini_u_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (mini_u_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
+ MSA_BUILTIN (mini_u_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
+ MSA_BUILTIN (mini_u_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
+ MSA_BUILTIN (max_a_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (max_a_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (max_a_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (max_a_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (min_a_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (min_a_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (min_a_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (min_a_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (ceq_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (ceq_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (ceq_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (ceq_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (ceqi_b, MIPS_V16QI_FTYPE_V16QI_QI),
+ MSA_BUILTIN (ceqi_h, MIPS_V8HI_FTYPE_V8HI_QI),
+ MSA_BUILTIN (ceqi_w, MIPS_V4SI_FTYPE_V4SI_QI),
+ MSA_BUILTIN (ceqi_d, MIPS_V2DI_FTYPE_V2DI_QI),
+ MSA_BUILTIN (clt_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (clt_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (clt_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (clt_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (clti_s_b, MIPS_V16QI_FTYPE_V16QI_QI),
+ MSA_BUILTIN (clti_s_h, MIPS_V8HI_FTYPE_V8HI_QI),
+ MSA_BUILTIN (clti_s_w, MIPS_V4SI_FTYPE_V4SI_QI),
+ MSA_BUILTIN (clti_s_d, MIPS_V2DI_FTYPE_V2DI_QI),
+ MSA_BUILTIN (clt_u_b, MIPS_V16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (clt_u_h, MIPS_V8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (clt_u_w, MIPS_V4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (clt_u_d, MIPS_V2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (clti_u_b, MIPS_V16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (clti_u_h, MIPS_V8HI_FTYPE_UV8HI_UQI),
+ MSA_BUILTIN (clti_u_w, MIPS_V4SI_FTYPE_UV4SI_UQI),
+ MSA_BUILTIN (clti_u_d, MIPS_V2DI_FTYPE_UV2DI_UQI),
+ MSA_BUILTIN (cle_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (cle_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (cle_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (cle_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (clei_s_b, MIPS_V16QI_FTYPE_V16QI_QI),
+ MSA_BUILTIN (clei_s_h, MIPS_V8HI_FTYPE_V8HI_QI),
+ MSA_BUILTIN (clei_s_w, MIPS_V4SI_FTYPE_V4SI_QI),
+ MSA_BUILTIN (clei_s_d, MIPS_V2DI_FTYPE_V2DI_QI),
+ MSA_BUILTIN (cle_u_b, MIPS_V16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (cle_u_h, MIPS_V8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (cle_u_w, MIPS_V4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (cle_u_d, MIPS_V2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (clei_u_b, MIPS_V16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (clei_u_h, MIPS_V8HI_FTYPE_UV8HI_UQI),
+ MSA_BUILTIN (clei_u_w, MIPS_V4SI_FTYPE_UV4SI_UQI),
+ MSA_BUILTIN (clei_u_d, MIPS_V2DI_FTYPE_UV2DI_UQI),
+ MSA_BUILTIN (ld_b, MIPS_V16QI_FTYPE_POINTER_SI),
+ MSA_BUILTIN (ld_h, MIPS_V8HI_FTYPE_POINTER_SI),
+ MSA_BUILTIN (ld_w, MIPS_V4SI_FTYPE_POINTER_SI),
+ MSA_BUILTIN (ld_d, MIPS_V2DI_FTYPE_POINTER_SI),
+ MSA_NO_TARGET_BUILTIN (st_b, MIPS_VOID_FTYPE_V16QI_POINTER_SI),
+ MSA_NO_TARGET_BUILTIN (st_h, MIPS_VOID_FTYPE_V8HI_POINTER_SI),
+ MSA_NO_TARGET_BUILTIN (st_w, MIPS_VOID_FTYPE_V4SI_POINTER_SI),
+ MSA_NO_TARGET_BUILTIN (st_d, MIPS_VOID_FTYPE_V2DI_POINTER_SI),
+ MSA_BUILTIN (sat_s_b, MIPS_V16QI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (sat_s_h, MIPS_V8HI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (sat_s_w, MIPS_V4SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (sat_s_d, MIPS_V2DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (sat_u_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (sat_u_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
+ MSA_BUILTIN (sat_u_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
+ MSA_BUILTIN (sat_u_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
+ MSA_BUILTIN (add_a_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (add_a_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (add_a_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (add_a_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (adds_a_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (adds_a_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (adds_a_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (adds_a_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (adds_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (adds_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (adds_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (adds_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (adds_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (adds_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (adds_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (adds_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (ave_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (ave_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (ave_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (ave_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (ave_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (ave_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (ave_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (ave_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (aver_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (aver_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (aver_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (aver_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (aver_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (aver_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (aver_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (aver_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (subs_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (subs_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (subs_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (subs_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (subs_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (subs_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (subs_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (subs_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (subsuu_s_b, MIPS_V16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (subsuu_s_h, MIPS_V8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (subsuu_s_w, MIPS_V4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (subsuu_s_d, MIPS_V2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (subsus_u_b, MIPS_UV16QI_FTYPE_UV16QI_V16QI),
+ MSA_BUILTIN (subsus_u_h, MIPS_UV8HI_FTYPE_UV8HI_V8HI),
+ MSA_BUILTIN (subsus_u_w, MIPS_UV4SI_FTYPE_UV4SI_V4SI),
+ MSA_BUILTIN (subsus_u_d, MIPS_UV2DI_FTYPE_UV2DI_V2DI),
+ MSA_BUILTIN (asub_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (asub_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (asub_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (asub_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (asub_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (asub_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (asub_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (asub_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (mulv_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (mulv_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (mulv_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (mulv_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (maddv_b, MIPS_V16QI_FTYPE_V16QI_V16QI_V16QI),
+ MSA_BUILTIN (maddv_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
+ MSA_BUILTIN (maddv_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
+ MSA_BUILTIN (maddv_d, MIPS_V2DI_FTYPE_V2DI_V2DI_V2DI),
+ MSA_BUILTIN (msubv_b, MIPS_V16QI_FTYPE_V16QI_V16QI_V16QI),
+ MSA_BUILTIN (msubv_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
+ MSA_BUILTIN (msubv_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
+ MSA_BUILTIN (msubv_d, MIPS_V2DI_FTYPE_V2DI_V2DI_V2DI),
+ MSA_BUILTIN (div_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (div_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (div_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (div_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (div_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (div_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (div_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (div_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (hadd_s_h, MIPS_V8HI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (hadd_s_w, MIPS_V4SI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (hadd_s_d, MIPS_V2DI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (hadd_u_h, MIPS_UV8HI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (hadd_u_w, MIPS_UV4SI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (hadd_u_d, MIPS_UV2DI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (hsub_s_h, MIPS_V8HI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (hsub_s_w, MIPS_V4SI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (hsub_s_d, MIPS_V2DI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (hsub_u_h, MIPS_V8HI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (hsub_u_w, MIPS_V4SI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (hsub_u_d, MIPS_V2DI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (mod_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (mod_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (mod_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (mod_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (mod_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (mod_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (mod_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (mod_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
+ MSA_BUILTIN (dotp_s_h, MIPS_V8HI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (dotp_s_w, MIPS_V4SI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (dotp_s_d, MIPS_V2DI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (dotp_u_h, MIPS_UV8HI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (dotp_u_w, MIPS_UV4SI_FTYPE_UV8HI_UV8HI),
+ MSA_BUILTIN (dotp_u_d, MIPS_UV2DI_FTYPE_UV4SI_UV4SI),
+ MSA_BUILTIN (dpadd_s_h, MIPS_V8HI_FTYPE_V8HI_V16QI_V16QI),
+ MSA_BUILTIN (dpadd_s_w, MIPS_V4SI_FTYPE_V4SI_V8HI_V8HI),
+ MSA_BUILTIN (dpadd_s_d, MIPS_V2DI_FTYPE_V2DI_V4SI_V4SI),
+ MSA_BUILTIN (dpadd_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI),
+ MSA_BUILTIN (dpadd_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI),
+ MSA_BUILTIN (dpadd_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI),
+ MSA_BUILTIN (dpsub_s_h, MIPS_V8HI_FTYPE_V8HI_V16QI_V16QI),
+ MSA_BUILTIN (dpsub_s_w, MIPS_V4SI_FTYPE_V4SI_V8HI_V8HI),
+ MSA_BUILTIN (dpsub_s_d, MIPS_V2DI_FTYPE_V2DI_V4SI_V4SI),
+ MSA_BUILTIN (dpsub_u_h, MIPS_V8HI_FTYPE_V8HI_UV16QI_UV16QI),
+ MSA_BUILTIN (dpsub_u_w, MIPS_V4SI_FTYPE_V4SI_UV8HI_UV8HI),
+ MSA_BUILTIN (dpsub_u_d, MIPS_V2DI_FTYPE_V2DI_UV4SI_UV4SI),
+ MSA_BUILTIN (sld_b, MIPS_V16QI_FTYPE_V16QI_V16QI_SI),
+ MSA_BUILTIN (sld_h, MIPS_V8HI_FTYPE_V8HI_V8HI_SI),
+ MSA_BUILTIN (sld_w, MIPS_V4SI_FTYPE_V4SI_V4SI_SI),
+ MSA_BUILTIN (sld_d, MIPS_V2DI_FTYPE_V2DI_V2DI_SI),
+ MSA_BUILTIN (sldi_b, MIPS_V16QI_FTYPE_V16QI_V16QI_UQI),
+ MSA_BUILTIN (sldi_h, MIPS_V8HI_FTYPE_V8HI_V8HI_UQI),
+ MSA_BUILTIN (sldi_w, MIPS_V4SI_FTYPE_V4SI_V4SI_UQI),
+ MSA_BUILTIN (sldi_d, MIPS_V2DI_FTYPE_V2DI_V2DI_UQI),
+ MSA_BUILTIN (splat_b, MIPS_V16QI_FTYPE_V16QI_SI),
+ MSA_BUILTIN (splat_h, MIPS_V8HI_FTYPE_V8HI_SI),
+ MSA_BUILTIN (splat_w, MIPS_V4SI_FTYPE_V4SI_SI),
+ MSA_BUILTIN (splat_d, MIPS_V2DI_FTYPE_V2DI_SI),
+ MSA_BUILTIN (splati_b, MIPS_V16QI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (splati_h, MIPS_V8HI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (splati_w, MIPS_V4SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (splati_d, MIPS_V2DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (pckev_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (pckev_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (pckev_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (pckev_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (pckod_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (pckod_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (pckod_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (pckod_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (ilvl_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (ilvl_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (ilvl_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (ilvl_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (ilvr_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (ilvr_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (ilvr_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (ilvr_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (ilvev_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (ilvev_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (ilvev_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (ilvev_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (ilvod_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
+ MSA_BUILTIN (ilvod_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (ilvod_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (ilvod_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
+ MSA_BUILTIN (vshf_b, MIPS_V16QI_FTYPE_V16QI_V16QI_V16QI),
+ MSA_BUILTIN (vshf_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
+ MSA_BUILTIN (vshf_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
+ MSA_BUILTIN (vshf_d, MIPS_V2DI_FTYPE_V2DI_V2DI_V2DI),
+ MSA_BUILTIN (and_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (andi_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (or_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (ori_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (nor_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (nori_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (xor_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
+ MSA_BUILTIN (xori_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
+ MSA_BUILTIN (bmnz_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
+ MSA_BUILTIN (bmnzi_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI),
+ MSA_BUILTIN (bmz_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
+ MSA_BUILTIN (bmzi_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI),
+ MSA_BUILTIN (bsel_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
+ MSA_BUILTIN (bseli_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI),
+ MSA_BUILTIN (shf_b, MIPS_V16QI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (shf_h, MIPS_V8HI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (shf_w, MIPS_V4SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (bnz_v, MIPS_SI_FTYPE_UV16QI),
+ MSA_BUILTIN (bz_v, MIPS_SI_FTYPE_UV16QI),
+ MSA_BUILTIN (fill_b, MIPS_V16QI_FTYPE_SI),
+ MSA_BUILTIN (fill_h, MIPS_V8HI_FTYPE_SI),
+ MSA_BUILTIN (fill_w, MIPS_V4SI_FTYPE_SI),
+ MSA_BUILTIN (fill_d, MIPS_V2DI_FTYPE_DI),
+ MSA_BUILTIN (pcnt_b, MIPS_V16QI_FTYPE_V16QI),
+ MSA_BUILTIN (pcnt_h, MIPS_V8HI_FTYPE_V8HI),
+ MSA_BUILTIN (pcnt_w, MIPS_V4SI_FTYPE_V4SI),
+ MSA_BUILTIN (pcnt_d, MIPS_V2DI_FTYPE_V2DI),
+ MSA_BUILTIN (nloc_b, MIPS_V16QI_FTYPE_V16QI),
+ MSA_BUILTIN (nloc_h, MIPS_V8HI_FTYPE_V8HI),
+ MSA_BUILTIN (nloc_w, MIPS_V4SI_FTYPE_V4SI),
+ MSA_BUILTIN (nloc_d, MIPS_V2DI_FTYPE_V2DI),
+ MSA_BUILTIN (nlzc_b, MIPS_V16QI_FTYPE_V16QI),
+ MSA_BUILTIN (nlzc_h, MIPS_V8HI_FTYPE_V8HI),
+ MSA_BUILTIN (nlzc_w, MIPS_V4SI_FTYPE_V4SI),
+ MSA_BUILTIN (nlzc_d, MIPS_V2DI_FTYPE_V2DI),
+ MSA_BUILTIN (copy_s_b, MIPS_SI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (copy_s_h, MIPS_SI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (copy_s_w, MIPS_SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (copy_s_d, MIPS_DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (copy_u_b, MIPS_SI_FTYPE_V16QI_UQI),
+ MSA_BUILTIN (copy_u_h, MIPS_SI_FTYPE_V8HI_UQI),
+ MSA_BUILTIN (copy_u_w, MIPS_SI_FTYPE_V4SI_UQI),
+ MSA_BUILTIN (copy_u_d, MIPS_DI_FTYPE_V2DI_UQI),
+ MSA_BUILTIN (insert_b, MIPS_V16QI_FTYPE_V16QI_UQI_SI),
+ MSA_BUILTIN (insert_h, MIPS_V8HI_FTYPE_V8HI_UQI_SI),
+ MSA_BUILTIN (insert_w, MIPS_V4SI_FTYPE_V4SI_UQI_SI),
+ MSA_BUILTIN (insert_d, MIPS_V2DI_FTYPE_V2DI_UQI_DI),
+ MSA_BUILTIN (insve_b, MIPS_V16QI_FTYPE_V16QI_UQI_V16QI),
+ MSA_BUILTIN (insve_h, MIPS_V8HI_FTYPE_V8HI_UQI_V8HI),
+ MSA_BUILTIN (insve_w, MIPS_V4SI_FTYPE_V4SI_UQI_V4SI),
+ MSA_BUILTIN (insve_d, MIPS_V2DI_FTYPE_V2DI_UQI_V2DI),
+ MSA_BUILTIN (bnz_b, MIPS_SI_FTYPE_UV16QI),
+ MSA_BUILTIN (bnz_h, MIPS_SI_FTYPE_UV8HI),
+ MSA_BUILTIN (bnz_w, MIPS_SI_FTYPE_UV4SI),
+ MSA_BUILTIN (bnz_d, MIPS_SI_FTYPE_UV2DI),
+ MSA_BUILTIN (bz_b, MIPS_SI_FTYPE_UV16QI),
+ MSA_BUILTIN (bz_h, MIPS_SI_FTYPE_UV8HI),
+ MSA_BUILTIN (bz_w, MIPS_SI_FTYPE_UV4SI),
+ MSA_BUILTIN (bz_d, MIPS_SI_FTYPE_UV2DI),
+ MSA_BUILTIN (ldi_b, MIPS_V16QI_FTYPE_HI),
+ MSA_BUILTIN (ldi_h, MIPS_V8HI_FTYPE_HI),
+ MSA_BUILTIN (ldi_w, MIPS_V4SI_FTYPE_HI),
+ MSA_BUILTIN (ldi_d, MIPS_V2DI_FTYPE_HI),
+ MSA_BUILTIN (fcaf_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fcaf_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fcor_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fcor_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fcun_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fcun_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fcune_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fcune_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fcueq_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fcueq_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fceq_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fceq_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fcne_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fcne_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fclt_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fclt_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fcult_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fcult_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fcle_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fcle_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fcule_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fcule_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fsaf_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fsaf_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fsor_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fsor_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fsun_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fsun_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fsune_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fsune_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fsueq_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fsueq_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fseq_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fseq_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fsne_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fsne_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fslt_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fslt_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fsult_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fsult_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fsle_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fsle_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fsule_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fsule_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fadd_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fadd_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fsub_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fsub_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fmul_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fmul_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fdiv_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fdiv_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fmadd_w, MIPS_V4SF_FTYPE_V4SF_V4SF_V4SF),
+ MSA_BUILTIN (fmadd_d, MIPS_V2DF_FTYPE_V2DF_V2DF_V2DF),
+ MSA_BUILTIN (fmsub_w, MIPS_V4SF_FTYPE_V4SF_V4SF_V4SF),
+ MSA_BUILTIN (fmsub_d, MIPS_V2DF_FTYPE_V2DF_V2DF_V2DF),
+ MSA_BUILTIN (fexp2_w, MIPS_V4SF_FTYPE_V4SF_V4SI),
+ MSA_BUILTIN (fexp2_d, MIPS_V2DF_FTYPE_V2DF_V2DI),
+ MSA_BUILTIN (fexdo_h, MIPS_V8HI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fexdo_w, MIPS_V4SF_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (ftq_h, MIPS_V8HI_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (ftq_w, MIPS_V4SI_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fmin_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fmin_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fmin_a_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fmin_a_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fmax_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fmax_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (fmax_a_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
+ MSA_BUILTIN (fmax_a_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
+ MSA_BUILTIN (mul_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (mul_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (mulr_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
+ MSA_BUILTIN (mulr_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
+ MSA_BUILTIN (madd_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
+ MSA_BUILTIN (madd_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
+ MSA_BUILTIN (maddr_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
+ MSA_BUILTIN (maddr_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
+ MSA_BUILTIN (msub_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
+ MSA_BUILTIN (msub_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
+ MSA_BUILTIN (msubr_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
+ MSA_BUILTIN (msubr_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
+ MSA_BUILTIN (fclass_w, MIPS_V4SI_FTYPE_V4SF),
+ MSA_BUILTIN (fclass_d, MIPS_V2DI_FTYPE_V2DF),
+ MSA_BUILTIN (fsqrt_w, MIPS_V4SF_FTYPE_V4SF),
+ MSA_BUILTIN (fsqrt_d, MIPS_V2DF_FTYPE_V2DF),
+ MSA_BUILTIN (frcp_w, MIPS_V4SF_FTYPE_V4SF),
+ MSA_BUILTIN (frcp_d, MIPS_V2DF_FTYPE_V2DF),
+ MSA_BUILTIN (frint_w, MIPS_V4SF_FTYPE_V4SF),
+ MSA_BUILTIN (frint_d, MIPS_V2DF_FTYPE_V2DF),
+ MSA_BUILTIN (frsqrt_w, MIPS_V4SF_FTYPE_V4SF),
+ MSA_BUILTIN (frsqrt_d, MIPS_V2DF_FTYPE_V2DF),
+ MSA_BUILTIN (flog2_w, MIPS_V4SF_FTYPE_V4SF),
+ MSA_BUILTIN (flog2_d, MIPS_V2DF_FTYPE_V2DF),
+ MSA_BUILTIN (fexupl_w, MIPS_V4SF_FTYPE_V8HI),
+ MSA_BUILTIN (fexupl_d, MIPS_V2DF_FTYPE_V4SF),
+ MSA_BUILTIN (fexupr_w, MIPS_V4SF_FTYPE_V8HI),
+ MSA_BUILTIN (fexupr_d, MIPS_V2DF_FTYPE_V4SF),
+ MSA_BUILTIN (ffql_w, MIPS_V4SF_FTYPE_V8HI),
+ MSA_BUILTIN (ffql_d, MIPS_V2DF_FTYPE_V4SI),
+ MSA_BUILTIN (ffqr_w, MIPS_V4SF_FTYPE_V8HI),
+ MSA_BUILTIN (ffqr_d, MIPS_V2DF_FTYPE_V4SI),
+ MSA_BUILTIN (ftint_s_w, MIPS_V4SI_FTYPE_V4SF),
+ MSA_BUILTIN (ftint_s_d, MIPS_V2DI_FTYPE_V2DF),
+ MSA_BUILTIN (ftint_u_w, MIPS_UV4SI_FTYPE_V4SF),
+ MSA_BUILTIN (ftint_u_d, MIPS_UV2DI_FTYPE_V2DF),
+ MSA_BUILTIN (ftrunc_s_w, MIPS_V4SI_FTYPE_V4SF),
+ MSA_BUILTIN (ftrunc_s_d, MIPS_V2DI_FTYPE_V2DF),
+ MSA_BUILTIN (ftrunc_u_w, MIPS_UV4SI_FTYPE_V4SF),
+ MSA_BUILTIN (ftrunc_u_d, MIPS_UV2DI_FTYPE_V2DF),
+ MSA_BUILTIN (ffint_s_w, MIPS_V4SF_FTYPE_V4SI),
+ MSA_BUILTIN (ffint_s_d, MIPS_V2DF_FTYPE_V2DI),
+ MSA_BUILTIN (ffint_u_w, MIPS_V4SF_FTYPE_UV4SI),
+ MSA_BUILTIN (ffint_u_d, MIPS_V2DF_FTYPE_UV2DI),
+ MSA_NO_TARGET_BUILTIN (ctcmsa, MIPS_VOID_FTYPE_UQI_SI),
+ MSA_BUILTIN (cfcmsa, MIPS_SI_FTYPE_UQI),
+ MSA_BUILTIN (move_v, MIPS_V16QI_FTYPE_V16QI),
+ MSA_BUILTIN (cast_to_vector_float, MIPS_V4SF_FTYPE_SF),
+ MSA_BUILTIN (cast_to_vector_double, MIPS_V2DF_FTYPE_DF),
+ MSA_BUILTIN (cast_to_scalar_float, MIPS_SF_FTYPE_V4SF),
+ MSA_BUILTIN (cast_to_scalar_double, MIPS_DF_FTYPE_V2DF)
};
/* Index I is the function declaration for mips_builtins[I], or null if the
@@ -14271,7 +16202,9 @@ mips_build_cvpointer_type (void)
#define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
/* Standard mode-based argument types. */
+#define MIPS_ATYPE_QI intQI_type_node
#define MIPS_ATYPE_UQI unsigned_intQI_type_node
+#define MIPS_ATYPE_HI intHI_type_node
#define MIPS_ATYPE_SI intSI_type_node
#define MIPS_ATYPE_USI unsigned_intSI_type_node
#define MIPS_ATYPE_DI intDI_type_node
@@ -14286,6 +16219,18 @@ mips_build_cvpointer_type (void)
#define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
#define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
#define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
+#define MIPS_ATYPE_V2DI mips_builtin_vector_type (intDI_type_node, V2DImode)
+#define MIPS_ATYPE_V4SI mips_builtin_vector_type (intSI_type_node, V4SImode)
+#define MIPS_ATYPE_V8HI mips_builtin_vector_type (intHI_type_node, V8HImode)
+#define MIPS_ATYPE_V16QI mips_builtin_vector_type (intQI_type_node, V16QImode)
+#define MIPS_ATYPE_V2DF mips_builtin_vector_type (double_type_node, V2DFmode)
+#define MIPS_ATYPE_V4SF mips_builtin_vector_type (float_type_node, V4SFmode)
+
+#define MIPS_ATYPE_UV2DI mips_builtin_vector_type (unsigned_intDI_type_node, V2DImode)
+#define MIPS_ATYPE_UV4SI mips_builtin_vector_type (unsigned_intSI_type_node, V4SImode)
+#define MIPS_ATYPE_UV8HI mips_builtin_vector_type (unsigned_intHI_type_node, V8HImode)
+#define MIPS_ATYPE_UV16QI mips_builtin_vector_type (unsigned_intQI_type_node, V16QImode)
+
#define MIPS_ATYPE_UV2SI \
mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
#define MIPS_ATYPE_UV4HI \
@@ -15766,8 +17711,10 @@ mips_mult_zero_zero_cost (struct mips_sim *state, bool setting)
static void
mips_set_fast_mult_zero_zero_p (struct mips_sim *state)
{
- if (TARGET_MIPS16)
- /* No MTLO or MTHI available. */
+ if (TARGET_MIPS16 || !ISA_HAS_HILO)
+ /* No MTLO or MTHI available for MIPS16. Also, when there are no HI or LO
+ registers then there is no reason to zero them, arbitrarily choose to
+ say that "MULT $0,$0" would be faster. */
mips_tuning_info.fast_mult_zero_zero_p = true;
else
{
@@ -16768,6 +18715,9 @@ mips_set_compression_mode (unsigned int compression_mode)
if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
+
+ if (TARGET_MSA)
+ sorry ("MSA MIPS16 code");
}
else
{
@@ -16900,6 +18850,10 @@ mips_set_architecture (const struct mips_cpu_info *info)
mips_arch_info = info;
mips_arch = info->cpu;
mips_isa = info->isa;
+ if (mips_isa < 32)
+ mips_isa_rev = 0;
+ else
+ mips_isa_rev = (mips_isa & 31) + 1;
}
}
@@ -17009,7 +18963,10 @@ mips_option_override (void)
if ((target_flags_explicit & MASK_FLOAT64) != 0)
{
- if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
+ if (mips_isa_rev >= 6 && !TARGET_FLOAT64)
+ error ("the %qs architecture does not support %<-mfp32%>",
+ mips_arch_info->name);
+ else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
error ("unsupported combination: %s", "-mfp64 -msingle-float");
else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
@@ -17025,14 +18982,30 @@ mips_option_override (void)
}
else
{
- /* -msingle-float selects 32-bit float registers. Otherwise the
- float registers should be the same size as the integer ones. */
- if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
+ /* -msingle-float selects 32-bit float registers. On r6 and later,
+ -mdouble-float selects 64-bit float registers, since the old paired
+ register model is not supported. -mmsa selects 64-bit registers for
+ O32. In other cases the float registers should be the same size as
+ the integer ones. */
+ if (mips_isa_rev >= 6 && TARGET_DOUBLE_FLOAT && !TARGET_FLOATXX)
+ target_flags |= MASK_FLOAT64;
+ else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
+ target_flags |= MASK_FLOAT64;
+ else if (mips_abi == ABI_32 && TARGET_MSA && !TARGET_FLOATXX)
target_flags |= MASK_FLOAT64;
else
target_flags &= ~MASK_FLOAT64;
}
+ if (mips_abi != ABI_32 && TARGET_FLOATXX)
+ error ("%<-mfpxx%> can only be used with the o32 ABI");
+ else if (TARGET_FLOAT64 && TARGET_FLOATXX)
+ error ("unsupported combination: %s", "-mfp64 -mfpxx");
+ else if (ISA_MIPS1 && !TARGET_FLOAT32)
+ error ("%<-march=%s%> requires %<-mfp32%>", mips_arch_info->name);
+ else if (TARGET_FLOATXX && !mips_lra_flag)
+ error ("%<-mfpxx%> requires %<-mlra%>");
+
/* End of code shared with GAS. */
/* The R5900 FPU only supports single precision. */
@@ -17120,6 +19093,24 @@ mips_option_override (void)
warning (0, "the %qs architecture does not support madd or msub"
" instructions", mips_arch_info->name);
+ /* If neither -modd-spreg nor -mno-odd-spreg was given on the command
+ line, set MASK_ODD_SPREG based on the ISA. */
+ if ((target_flags_explicit & MASK_ODD_SPREG) == 0)
+ {
+ /* Disable TARGET_ODD_SPREG for generic architectures when using the
+ O32 FPXX ABI to make them compatible with those implementations
+ which are !ISA_HAS_ODD_SPREG. */
+ if (!ISA_HAS_ODD_SPREG
+ || (TARGET_FLOATXX
+ && (strncmp (mips_arch_info->name, "mips", 4) == 0)))
+ target_flags &= ~MASK_ODD_SPREG;
+ else
+ target_flags |= MASK_ODD_SPREG;
+ }
+ else if (TARGET_ODD_SPREG && !ISA_HAS_ODD_SPREG)
+ warning (0, "the %qs architecture does not support odd single-precision"
+ " registers", mips_arch_info->name);
+
/* The effect of -mabicalls isn't defined for the EABI. */
if (mips_abi == ABI_EABI && TARGET_ABICALLS)
{
@@ -17183,6 +19174,27 @@ mips_option_override (void)
}
}
+ /* Set NaN and ABS defaults. */
+ if (mips_nan == MIPS_IEEE_754_DEFAULT && !ISA_HAS_IEEE_754_LEGACY)
+ mips_nan = MIPS_IEEE_754_2008;
+ if (mips_abs == MIPS_IEEE_754_DEFAULT && !ISA_HAS_IEEE_754_LEGACY)
+ mips_abs = MIPS_IEEE_754_2008;
+
+ /* Check for IEEE 754 legacy/2008 support. */
+ if ((mips_nan == MIPS_IEEE_754_LEGACY
+ || mips_abs == MIPS_IEEE_754_LEGACY)
+ && !ISA_HAS_IEEE_754_LEGACY)
+ warning (0, "the %qs architecture does not support %<-m%s=legacy%>",
+ mips_arch_info->name,
+ mips_nan == MIPS_IEEE_754_LEGACY ? "nan" : "abs");
+
+ if ((mips_nan == MIPS_IEEE_754_2008
+ || mips_abs == MIPS_IEEE_754_2008)
+ && !ISA_HAS_IEEE_754_2008)
+ warning (0, "the %qs architecture does not support %<-m%s=2008%>",
+ mips_arch_info->name,
+ mips_nan == MIPS_IEEE_754_2008 ? "nan" : "abs");
+
/* Pre-IEEE 754-2008 MIPS hardware has a quirky almost-IEEE format
for all its floating point. */
if (mips_nan != MIPS_IEEE_754_2008)
@@ -17214,6 +19226,11 @@ mips_option_override (void)
TARGET_MIPS3D = 0;
}
+ /* Make sure that when TARGET_MSA is true, TARGET_FLOAT64 and
+ TARGET_HARD_FLOAT_ABI and both true. */
+ if (TARGET_MSA && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
+ error ("%<-mmsa%> must be used with %<-mfp64%> and %<-mhard-float%>");
+
/* Make sure that -mpaired-single is only used on ISAs that support it.
We must disable it otherwise since it relies on other ISA properties
like ISA_HAS_8CC having their normal values. */
@@ -17237,6 +19254,14 @@ mips_option_override (void)
if (TARGET_DSPR2)
TARGET_DSP = true;
+ if (TARGET_DSP && mips_isa_rev >= 6)
+ {
+ error ("the %qs architecture does not support DSP instructions",
+ mips_arch_info->name);
+ TARGET_DSP = false;
+ TARGET_DSPR2 = false;
+ }
+
/* .eh_frame addresses should be the same width as a C pointer.
Most MIPS ABIs support only one pointer size, so the assembler
will usually know exactly how big an .eh_frame address is.
@@ -17417,6 +19442,10 @@ mips_conditional_register_usage (void)
AND_COMPL_HARD_REG_SET (accessible_reg_set,
reg_class_contents[(int) DSP_ACC_REGS]);
+ if (!ISA_HAS_HILO)
+ AND_COMPL_HARD_REG_SET (accessible_reg_set,
+ reg_class_contents[(int) MD_REGS]);
+
if (!TARGET_HARD_FLOAT)
{
AND_COMPL_HARD_REG_SET (accessible_reg_set,
@@ -17431,7 +19460,8 @@ mips_conditional_register_usage (void)
RTL that refers directly to ST_REG_FIRST. */
AND_COMPL_HARD_REG_SET (accessible_reg_set,
reg_class_contents[(int) ST_REGS]);
- SET_HARD_REG_BIT (accessible_reg_set, FPSW_REGNUM);
+ if (!ISA_HAS_CCF)
+ SET_HARD_REG_BIT (accessible_reg_set, FPSW_REGNUM);
fixed_regs[FPSW_REGNUM] = call_used_regs[FPSW_REGNUM] = 1;
}
if (TARGET_MIPS16)
@@ -17482,8 +19512,10 @@ mips_conditional_register_usage (void)
call_really_used_regs[regno] = call_used_regs[regno] = 1;
}
/* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
- for n32. */
- if (mips_abi == ABI_N32)
+ for n32 and o32 FP64. */
+ if (mips_abi == ABI_N32
+ || (mips_abi == ABI_32
+ && TARGET_FLOAT64))
{
int regno;
for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
@@ -17632,6 +19664,8 @@ mips_mulsidi3_gen_fn (enum rtx_code ext_code)
the extension is not needed for signed multiplication. In order to
ensure that we always remove the redundant sign-extension in this
case we still expand mulsidi3 for DMUL. */
+ if (ISA_HAS_R6DMUL)
+ return signed_p ? gen_mulsidi3_64bit_r6dmul : NULL;
if (ISA_HAS_DMUL3)
return signed_p ? gen_mulsidi3_64bit_dmul : NULL;
if (TARGET_MIPS16)
@@ -17644,6 +19678,8 @@ mips_mulsidi3_gen_fn (enum rtx_code ext_code)
}
else
{
+ if (ISA_HAS_R6MUL)
+ return (signed_p ? gen_mulsidi3_32bit_r6 : gen_umulsidi3_32bit_r6);
if (TARGET_MIPS16)
return (signed_p
? gen_mulsidi3_32bit_mips16
@@ -18646,8 +20682,9 @@ mips_expand_vi_constant (enum machine_mode vmode, unsigned nelt,
for (i = 0; i < nelt; ++i)
{
- if (!mips_constant_elt_p (RTVEC_ELT (vec, i)))
- RTVEC_ELT (vec, i) = const0_rtx;
+ rtx elem = RTVEC_ELT (vec, i);
+ if (!mips_constant_elt_p (elem))
+ RTVEC_ELT (vec, i) = CONST0_RTX (GET_MODE (elem));
}
emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec));
@@ -18708,6 +20745,123 @@ mips_expand_vector_init (rtx target, rtx vals)
all_same = false;
}
+ if (TARGET_MSA)
+ {
+ if (all_same)
+ {
+ rtx same = XVECEXP (vals, 0, 0);
+ rtx temp;
+ rtx temp2;
+
+ if (CONST_INT_P (same) && nvar == 0 && mips_signed_immediate_p (INTVAL (same), 10, 0))
+ {
+ switch (vmode)
+ {
+ case V16QImode:
+ emit_insn (gen_msa_ldiv16qi (target, same));
+ return;
+
+ case V8HImode:
+ emit_insn (gen_msa_ldiv8hi (target, same));
+ return;
+
+ case V4SImode:
+ emit_insn (gen_msa_ldiv4si (target, same));
+ return;
+
+ case V2DImode:
+ emit_insn (gen_msa_ldiv2di (target, same));
+ return;
+
+ default:
+ break;
+ }
+ }
+ temp = gen_reg_rtx (imode);
+ if (imode == GET_MODE (same))
+ emit_move_insn (temp, same);
+ else
+ emit_move_insn (temp, simplify_gen_subreg (imode, same, GET_MODE (same), 0));
+ switch (vmode)
+ {
+ case V16QImode:
+ temp2 = simplify_gen_subreg (SImode, temp, imode, 0);
+ emit_insn (gen_msa_fill_b (target, temp2));
+ break;
+
+ case V8HImode:
+ temp2 = simplify_gen_subreg (SImode, temp, imode, 0);
+ emit_insn (gen_msa_fill_h (target, temp2));
+ break;
+
+ case V4SImode:
+ emit_insn (gen_msa_fill_w (target, temp));
+ break;
+
+ case V2DImode:
+ emit_insn (gen_msa_fill_d (target, temp));
+ break;
+
+ case V4SFmode:
+ emit_insn (gen_msa_splati_w_f_s (target, temp, const0_rtx));
+ break;
+
+ case V2DFmode:
+ emit_insn (gen_msa_splati_d_f_s (target, temp, const0_rtx));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return;
+ }
+
+ rtvec vec = shallow_copy_rtvec (XVEC (vals, 0));
+
+ for (i = 0; i < nelt; ++i)
+ RTVEC_ELT (vec, i) = CONST0_RTX (imode);
+
+ emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec));
+
+ for (i = 0; i < nelt; ++i)
+ {
+ rtx temp = gen_reg_rtx (imode);
+ emit_move_insn (temp, XVECEXP (vals, 0, i));
+ switch (vmode)
+ {
+ case V16QImode:
+ emit_insn (gen_vec_setv16qi (target, temp, GEN_INT (i)));
+ break;
+
+ case V8HImode:
+ emit_insn (gen_vec_setv8hi (target, temp, GEN_INT (i)));
+ break;
+
+ case V4SImode:
+ emit_insn (gen_vec_setv4si (target, temp, GEN_INT (i)));
+ break;
+
+ case V2DImode:
+ emit_insn (gen_vec_setv2di (target, temp, GEN_INT (i)));
+ break;
+
+ case V4SFmode:
+ emit_insn (gen_vec_setv4sf (target, temp, GEN_INT (i)));
+ break;
+
+ case V2DFmode:
+ emit_insn (gen_vec_setv2df (target, temp, GEN_INT (i)));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ return;
+ }
+
/* Load constants from the pool, or whatever's handy. */
if (nvar == 0)
{
@@ -18843,6 +20997,437 @@ mips_expand_vec_minmax (rtx target, rtx op0, rtx op1,
emit_insn (gen_rtx_SET (VOIDmode, target, x));
}
+static void
+mips_expand_msa_one_cmpl (rtx dest, rtx src)
+{
+ enum machine_mode mode = GET_MODE (dest);
+ switch (mode)
+ {
+ case V16QImode:
+ emit_insn (gen_msa_nor_v_b (dest, src, src));
+ break;
+ case V8HImode:
+ emit_insn (gen_msa_nor_v_h (dest, src, src));
+ break;
+ case V4SImode:
+ emit_insn (gen_msa_nor_v_w (dest, src, src));
+ break;
+ case V2DImode:
+ emit_insn (gen_msa_nor_v_d (dest, src, src));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+static void
+mips_expand_msa_cmp (rtx dest, enum rtx_code cond, rtx op0, rtx op1)
+{
+ enum machine_mode cmp_mode = GET_MODE (op0);
+
+ switch (cmp_mode)
+ {
+ case V16QImode:
+ switch (cond)
+ {
+ case EQ:
+ emit_insn (gen_msa_ceq_b (dest, op0, op1));
+ break;
+ case LT:
+ emit_insn (gen_msa_clt_s_b (dest, op0, op1));
+ break;
+ case LE:
+ emit_insn (gen_msa_cle_s_b (dest, op0, op1));
+ break;
+ case LTU:
+ emit_insn (gen_msa_clt_u_b (dest, op0, op1));
+ break;
+ case LEU:
+ emit_insn (gen_msa_cle_u_b (dest, op0, op1));
+ break;
+ case GE: // swap
+ emit_insn (gen_msa_clt_s_b (dest, op1, op0));
+ break;
+ case GT: // swap
+ emit_insn (gen_msa_clt_s_b (dest, op1, op0));
+ break;
+ case GEU: // swap
+ emit_insn (gen_msa_clt_u_b (dest, op1, op0));
+ break;
+ case GTU: // swap
+ emit_insn (gen_msa_clt_u_b (dest, op1, op0));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case V8HImode:
+ switch (cond)
+ {
+ case EQ:
+ emit_insn (gen_msa_ceq_h (dest, op0, op1));
+ break;
+ case LT:
+ emit_insn (gen_msa_clt_s_h (dest, op0, op1));
+ break;
+ case LE:
+ emit_insn (gen_msa_cle_s_h (dest, op0, op1));
+ break;
+ case LTU:
+ emit_insn (gen_msa_clt_u_h (dest, op0, op1));
+ break;
+ case LEU:
+ emit_insn (gen_msa_cle_u_h (dest, op0, op1));
+ break;
+ case GE: // swap
+ emit_insn (gen_msa_clt_s_h (dest, op1, op0));
+ break;
+ case GT: // swap
+ emit_insn (gen_msa_clt_s_h (dest, op1, op0));
+ break;
+ case GEU: // swap
+ emit_insn (gen_msa_clt_u_h (dest, op1, op0));
+ break;
+ case GTU: // swap
+ emit_insn (gen_msa_clt_u_h (dest, op1, op0));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case V4SImode:
+ switch (cond)
+ {
+ case EQ:
+ emit_insn (gen_msa_ceq_w (dest, op0, op1));
+ break;
+ case LT:
+ emit_insn (gen_msa_clt_s_w (dest, op0, op1));
+ break;
+ case LE:
+ emit_insn (gen_msa_cle_s_w (dest, op0, op1));
+ break;
+ case LTU:
+ emit_insn (gen_msa_clt_u_w (dest, op0, op1));
+ break;
+ case LEU:
+ emit_insn (gen_msa_cle_u_w (dest, op0, op1));
+ break;
+ case GE: // swap
+ emit_insn (gen_msa_clt_s_w (dest, op1, op0));
+ break;
+ case GT: // swap
+ emit_insn (gen_msa_clt_s_w (dest, op1, op0));
+ break;
+ case GEU: // swap
+ emit_insn (gen_msa_clt_u_w (dest, op1, op0));
+ break;
+ case GTU: // swap
+ emit_insn (gen_msa_clt_u_w (dest, op1, op0));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case V2DImode:
+ switch (cond)
+ {
+ case EQ:
+ emit_insn (gen_msa_ceq_d (dest, op0, op1));
+ break;
+ case LT:
+ emit_insn (gen_msa_clt_s_d (dest, op0, op1));
+ break;
+ case LE:
+ emit_insn (gen_msa_cle_s_d (dest, op0, op1));
+ break;
+ case LTU:
+ emit_insn (gen_msa_clt_u_d (dest, op0, op1));
+ break;
+ case LEU:
+ emit_insn (gen_msa_cle_u_d (dest, op0, op1));
+ break;
+ case GE: // swap
+ emit_insn (gen_msa_clt_s_d (dest, op1, op0));
+ break;
+ case GT: // swap
+ emit_insn (gen_msa_clt_s_d (dest, op1, op0));
+ break;
+ case GEU: // swap
+ emit_insn (gen_msa_clt_u_d (dest, op1, op0));
+ break;
+ case GTU: // swap
+ emit_insn (gen_msa_clt_u_d (dest, op1, op0));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case V4SFmode:
+ switch (cond)
+ {
+ case UNORDERED:
+ emit_insn (gen_msa_fcun_w (dest, op0, op1));
+ break;
+ case EQ:
+ emit_insn (gen_msa_fceq_w (dest, op0, op1));
+ break;
+ case LTGT:
+ emit_insn (gen_msa_fcne_w (dest, op0, op1));
+ break;
+ case GT: // use slt, swap op0 and op1
+ emit_insn (gen_msa_fslt_w (dest, op1, op0));
+ break;
+ case GE: // use sle, swap op0 and op1
+ emit_insn (gen_msa_fsle_w (dest, op1, op0));
+ break;
+ case LT: // use slt
+ emit_insn (gen_msa_fslt_w (dest, op0, op1));
+ break;
+ case LE: // use sle
+ emit_insn (gen_msa_fsle_w (dest, op0, op1));
+ break;
+ case UNGE: // use cule, swap op0 and op1
+ emit_insn (gen_msa_fcule_w (dest, op1, op0));
+ break;
+ case UNGT: // use cult, swap op0 and op1
+ emit_insn (gen_msa_fcult_w (dest, op1, op0));
+ break;
+ case UNLE:
+ emit_insn (gen_msa_fcule_w (dest, op0, op1));
+ break;
+ case UNLT:
+ emit_insn (gen_msa_fcult_w (dest, op0, op1));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case V2DFmode:
+ switch (cond)
+ {
+ case UNORDERED:
+ emit_insn (gen_msa_fcun_d (dest, op0, op1));
+ break;
+ case EQ:
+ emit_insn (gen_msa_fceq_d (dest, op0, op1));
+ break;
+ case LTGT:
+ emit_insn (gen_msa_fcne_d (dest, op0, op1));
+ break;
+ case GT: // use slt, swap op0 and op1
+ emit_insn (gen_msa_fslt_d (dest, op1, op0));
+ break;
+ case GE: // use sle, swap op0 and op1
+ emit_insn (gen_msa_fsle_d (dest, op1, op0));
+ break;
+ case LT: // use slt
+ emit_insn (gen_msa_fslt_d (dest, op0, op1));
+ break;
+ case LE: // use sle
+ emit_insn (gen_msa_fsle_d (dest, op0, op1));
+ break;
+ case UNGE: // use cule, swap op0 and op1
+ emit_insn (gen_msa_fcule_d (dest, op1, op0));
+ break;
+ case UNGT: // use uclt, swap op0 and op1
+ emit_insn (gen_msa_fcult_d (dest, op1, op0));
+ break;
+ case UNLE:
+ emit_insn (gen_msa_fcule_d (dest, op0, op1));
+ break;
+ case UNLT:
+ emit_insn (gen_msa_fcult_d (dest, op0, op1));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ break;
+ }
+}
+
+static bool
+mips_msa_reversed_int_cond (enum rtx_code *cond)
+{
+ switch (*cond)
+ {
+ case NE:
+ *cond = EQ;
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool
+mips_msa_reversed_fp_cond (enum rtx_code *code)
+{
+ switch (*code)
+ {
+ case NE:
+ case ORDERED:
+ case UNEQ:
+ *code = reverse_condition_maybe_unordered (*code);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static void
+mips_expand_msa_vcond (rtx dest, rtx true_src, rtx false_src,
+ enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
+{
+ enum machine_mode dest_mode = GET_MODE (dest);
+ enum machine_mode cmp_mode = GET_MODE (cmp_op0);
+ bool reversed_p;
+
+ if (FLOAT_MODE_P (cmp_mode))
+ reversed_p = mips_msa_reversed_fp_cond (&cond);
+ else
+ reversed_p = mips_msa_reversed_int_cond (&cond);
+
+ mips_expand_msa_cmp (dest, cond, cmp_op0, cmp_op1);
+ if (reversed_p)
+ mips_expand_msa_one_cmpl (dest, dest);
+
+ /* MSA vcond only produces result -1 and 0 for true and false. */
+ gcc_assert ((true_src == CONSTM1_RTX (dest_mode))
+ && (false_src == CONST0_RTX (dest_mode)));
+}
+
+/* Expand VEC_COND_EXPR
+ * MODE of result
+ * VIMODE equivalent integer mode
+ * OPERANDS operands of VEC_COND_EXPR
+ * gen_msa_and_fn used to generate a VIMODE vector msa AND
+ * gen_msa_nor_fn used to generate a VIMODE vector msa NOR
+ * gen_msa_ior_fn used to generate a VIMODE vector msa AND.
+ */
+
+void
+mips_expand_vec_cond_expr (enum machine_mode mode,
+ enum machine_mode vimode,
+ rtx *operands,
+ rtx (*gen_msa_and_fn)(rtx, rtx, rtx),
+ rtx (*gen_msa_nor_fn)(rtx, rtx, rtx),
+ rtx (*gen_msa_ior_fn)(rtx, rtx, rtx))
+{
+ rtx true_val = CONSTM1_RTX (vimode);
+ rtx false_val = CONST0_RTX (vimode);
+
+ if (operands[1] == true_val && operands[2] == false_val)
+ mips_expand_msa_vcond (operands[0], operands[1], operands[2],
+ GET_CODE (operands[3]), operands[4], operands[5]);
+ else
+ {
+ rtx res = gen_reg_rtx (vimode);
+ rtx temp1 = gen_reg_rtx (vimode);
+ rtx temp2 = gen_reg_rtx (vimode);
+ rtx xres = gen_reg_rtx (vimode);
+
+ mips_expand_msa_vcond (res, true_val, false_val,
+ GET_CODE (operands[3]), operands[4], operands[5]);
+
+ // This results in a vector result with whose T/F elements having
+ // the value -1 or 0 for (T/F repectively). This result may need
+ // adjusting if needed results operands[]/operands[1] are different.
+
+ // Adjust True elements to be operand[1].
+ emit_move_insn (xres, res);
+ if (operands[1] != true_val)
+ {
+ rtx xop1 = operands[1]; /* Assume we can use operands[1] */
+
+ if (mode != vimode)
+ {
+ xop1 = gen_reg_rtx (vimode);
+ if (GET_CODE (operands[1]) == CONST_VECTOR)
+ {
+ rtx xtemp = gen_reg_rtx (mode);
+ emit_move_insn (xtemp, operands[1]);
+ emit_move_insn (xop1,
+ gen_rtx_SUBREG (vimode, xtemp, 0));
+ }
+ else
+ emit_move_insn (xop1,
+ gen_rtx_SUBREG (vimode, operands[1], 0));
+ }
+ else if (GET_CODE (operands[1]) == CONST_VECTOR)
+ {
+ xop1 = gen_reg_rtx (mode);
+ emit_move_insn (xop1, operands[1]);
+ }
+
+ emit_insn (gen_msa_and_fn (temp1, xres, xop1));
+ }
+ else
+ emit_move_insn (temp1, xres);
+
+ // Adjust False elements to be operand[0].
+ emit_insn (gen_msa_nor_fn (temp2, xres, xres));
+ if (operands[2] != false_val)
+ {
+ rtx xop2 = operands[2]; ; /* Assume we can use operands[2] */
+
+ if (mode != vimode)
+ {
+ xop2 = gen_reg_rtx (vimode);
+ if (GET_CODE (operands[2]) == CONST_VECTOR)
+ {
+ rtx xtemp = gen_reg_rtx (mode);
+ emit_move_insn (xtemp, operands[2]);
+ emit_move_insn (xop2,
+ gen_rtx_SUBREG (vimode, xtemp, 0));
+ }
+ else
+ emit_move_insn (xop2,
+ gen_rtx_SUBREG (vimode, operands[2], 0));
+ }
+ else if (GET_CODE (operands[2]) == CONST_VECTOR)
+ {
+ xop2 = gen_reg_rtx (mode);
+ emit_move_insn (xop2, operands[2]);
+ }
+
+ emit_insn (gen_msa_and_fn (temp2, temp2, xop2));
+ }
+ else
+ emit_insn (gen_msa_and_fn (temp2, temp2, xres));
+
+ // Combine together into result.
+ emit_insn (gen_msa_ior_fn (xres, temp1, temp2));
+ emit_move_insn (operands[0],
+ gen_rtx_SUBREG (mode, xres, 0));
+ }
+}
+
+/* Implement HARD_REGNO_CALLER_SAVE_MODE. */
+
+enum machine_mode
+mips_hard_regno_caller_save_mode (unsigned int regno,
+ unsigned int nregs,
+ enum machine_mode mode)
+{
+ /* For performance, to avoid saving/restoring upper parts of a register,
+ we return MODE as save mode when MODE is not VOIDmode. */
+ if (mode == VOIDmode)
+ return choose_hard_reg_mode (regno, nregs, false);
+ else
+ return mode;
+}
+
/* Implement TARGET_CASE_VALUES_THRESHOLD. */
unsigned int
@@ -18897,6 +21482,25 @@ mips_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
*update = build2 (COMPOUND_EXPR, void_type_node, *update,
atomic_feraiseexcept_call);
}
+
+/* Implement TARGET_SPILL_CLASS. */
+
+static reg_class_t
+mips_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ if (TARGET_MIPS16)
+ return SPILL_REGS;
+ return NO_REGS;
+}
+
+/* Implement TARGET_LRA_P. */
+
+static bool
+mips_lra_p (void)
+{
+ return mips_lra_flag;
+}
/* Initialize the GCC target structure. */
#undef TARGET_ASM_ALIGNED_HI_OP
@@ -18960,6 +21564,8 @@ mips_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
#define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
#undef TARGET_REGISTER_MOVE_COST
#define TARGET_REGISTER_MOVE_COST mips_register_move_cost
+#undef TARGET_REGISTER_PRIORITY
+#define TARGET_REGISTER_PRIORITY mips_register_priority
#undef TARGET_MEMORY_MOVE_COST
#define TARGET_MEMORY_MOVE_COST mips_memory_move_cost
#undef TARGET_RTX_COSTS
@@ -19041,6 +21647,10 @@ mips_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
#define TARGET_FUNCTION_ARG_ADVANCE mips_function_arg_advance
#undef TARGET_FUNCTION_ARG_BOUNDARY
#define TARGET_FUNCTION_ARG_BOUNDARY mips_function_arg_boundary
+#undef TARGET_GET_RAW_RESULT_MODE
+#define TARGET_GET_RAW_RESULT_MODE mips_get_reg_raw_mode
+#undef TARGET_GET_RAW_ARG_MODE
+#define TARGET_GET_RAW_ARG_MODE mips_get_reg_raw_mode
#undef TARGET_MODE_REP_EXTENDED
#define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
@@ -19053,6 +21663,9 @@ mips_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE mips_preferred_simd_mode
+#undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
+#define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
+ mips_autovectorize_vector_sizes
#undef TARGET_INIT_BUILTINS
#define TARGET_INIT_BUILTINS mips_init_builtins
@@ -19134,6 +21747,17 @@ mips_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV mips_atomic_assign_expand_fenv
+#undef TARGET_SPILL_CLASS
+#define TARGET_SPILL_CLASS mips_spill_class
+#undef TARGET_LRA_P
+#define TARGET_LRA_P mips_lra_p
+
+#undef TARGET_SCHED_INIT_GLOBAL
+#define TARGET_SCHED_INIT_GLOBAL mips_sched_init_global
+
+#undef TARGET_SCHED_FINISH_GLOBAL
+#define TARGET_SCHED_FINISH_GLOBAL mips_sched_finish_global
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-mips.h"
diff --git a/gcc-4.9/gcc/config/mips/mips.h b/gcc-4.9/gcc/config/mips/mips.h
index bedc45b54..a4c70480b 100644
--- a/gcc-4.9/gcc/config/mips/mips.h
+++ b/gcc-4.9/gcc/config/mips/mips.h
@@ -208,8 +208,14 @@ struct mips_cpu_info {
#define ISA_MIPS4 (mips_isa == 4)
#define ISA_MIPS32 (mips_isa == 32)
#define ISA_MIPS32R2 (mips_isa == 33)
+#define ISA_MIPS32R3 (mips_isa == 34)
+#define ISA_MIPS32R5 (mips_isa == 36)
+#define ISA_MIPS32R6 (mips_isa == 37)
#define ISA_MIPS64 (mips_isa == 64)
#define ISA_MIPS64R2 (mips_isa == 65)
+#define ISA_MIPS64R3 (mips_isa == 66)
+#define ISA_MIPS64R5 (mips_isa == 68)
+#define ISA_MIPS64R6 (mips_isa == 69)
/* Architecture target defines. */
#define TARGET_LOONGSON_2E (mips_arch == PROCESSOR_LOONGSON_2E)
@@ -260,6 +266,7 @@ struct mips_cpu_info {
|| mips_tune == PROCESSOR_OCTEON2)
#define TUNE_SB1 (mips_tune == PROCESSOR_SB1 \
|| mips_tune == PROCESSOR_SB1A)
+#define TUNE_P5600 (mips_tune == PROCESSOR_P5600)
/* Whether vector modes and intrinsics for ST Microelectronics
Loongson-2E/2F processors should be enabled. In o32 pairs of
@@ -302,7 +309,8 @@ struct mips_cpu_info {
#define TUNE_MACC_CHAINS (TUNE_MIPS5500 \
|| TUNE_MIPS4120 \
|| TUNE_MIPS4130 \
- || TUNE_24K)
+ || TUNE_24K \
+ || TUNE_P5600)
#define TARGET_OLDABI (mips_abi == ABI_32 || mips_abi == ABI_O64)
#define TARGET_NEWABI (mips_abi == ABI_N32 || mips_abi == ABI_64)
@@ -314,6 +322,10 @@ struct mips_cpu_info {
#define TARGET_HARD_FLOAT (TARGET_HARD_FLOAT_ABI && !TARGET_MIPS16)
#define TARGET_SOFT_FLOAT (TARGET_SOFT_FLOAT_ABI || TARGET_MIPS16)
+/* TARGET_FLOAT64 represents -mfp64 and TARGET_FLOATXX represents
+ -mfpxx, derive TARGET_FLOAT32 to represent -mfp32. */
+#define TARGET_FLOAT32 (!TARGET_FLOAT64 && !TARGET_FLOATXX)
+
/* False if SC acts as a memory barrier with respect to itself,
otherwise a SYNC will be emitted after SC for atomic operations
that require ordering between the SC and following loads and
@@ -382,6 +394,8 @@ struct mips_cpu_info {
\
if (TARGET_FLOAT64) \
builtin_define ("__mips_fpr=64"); \
+ else if (TARGET_FLOATXX) \
+ builtin_define ("__mips_fpr=0"); \
else \
builtin_define ("__mips_fpr=32"); \
\
@@ -415,6 +429,12 @@ struct mips_cpu_info {
builtin_define ("__mips_dsp_rev=1"); \
} \
\
+ if (TARGET_MSA) \
+ { \
+ builtin_define ("__mips_msa"); \
+ builtin_define ("__mips_msa_width=128"); \
+ } \
+ \
MIPS_CPP_SET_PROCESSOR ("_MIPS_ARCH", mips_arch_info); \
MIPS_CPP_SET_PROCESSOR ("_MIPS_TUNE", mips_tune_info); \
\
@@ -438,30 +458,19 @@ struct mips_cpu_info {
builtin_define ("__mips=4"); \
builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS4"); \
} \
- else if (ISA_MIPS32) \
- { \
- builtin_define ("__mips=32"); \
- builtin_define ("__mips_isa_rev=1"); \
- builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS32"); \
- } \
- else if (ISA_MIPS32R2) \
+ else if (mips_isa >= 32 && mips_isa < 64) \
{ \
builtin_define ("__mips=32"); \
- builtin_define ("__mips_isa_rev=2"); \
builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS32"); \
} \
- else if (ISA_MIPS64) \
+ else if (mips_isa >= 64) \
{ \
builtin_define ("__mips=64"); \
- builtin_define ("__mips_isa_rev=1"); \
- builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS64"); \
- } \
- else if (ISA_MIPS64R2) \
- { \
- builtin_define ("__mips=64"); \
- builtin_define ("__mips_isa_rev=2"); \
builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS64"); \
} \
+ if (mips_isa_rev > 0) \
+ builtin_define_with_int_value ("__mips_isa_rev", \
+ mips_isa_rev); \
\
switch (mips_abi) \
{ \
@@ -491,6 +500,8 @@ struct mips_cpu_info {
builtin_define_with_int_value ("_MIPS_SZPTR", POINTER_SIZE); \
builtin_define_with_int_value ("_MIPS_FPSET", \
32 / MAX_FPRS_PER_FMT); \
+ builtin_define_with_int_value ("_MIPS_SPFPSET", \
+ TARGET_ODD_SPREG ? 32 : 16); \
\
/* These defines reflect the ABI in use, not whether the \
FPU is directly accessible. */ \
@@ -513,6 +524,12 @@ struct mips_cpu_info {
if (mips_nan == MIPS_IEEE_754_2008) \
builtin_define ("__mips_nan2008"); \
\
+ if (mips_c_lib == MIPS_LIB_SMALL) \
+ builtin_define ("__mips_clib_small"); \
+ \
+ if (mips_c_lib == MIPS_LIB_TINY) \
+ builtin_define ("__mips_clib_tiny"); \
+ \
if (TARGET_BIG_ENDIAN) \
{ \
builtin_define_std ("MIPSEB"); \
@@ -632,10 +649,14 @@ struct mips_cpu_info {
#define MULTILIB_ISA_DEFAULT "mips32"
#elif MIPS_ISA_DEFAULT == 33
#define MULTILIB_ISA_DEFAULT "mips32r2"
+#elif MIPS_ISA_DEFAULT == 37
+#define MULTILIB_ISA_DEFAULT "mips32r6"
#elif MIPS_ISA_DEFAULT == 64
#define MULTILIB_ISA_DEFAULT "mips64"
#elif MIPS_ISA_DEFAULT == 65
#define MULTILIB_ISA_DEFAULT "mips64r2"
+#elif MIPS_ISA_DEFAULT == 69
+#define MULTILIB_ISA_DEFAULT "mips64r6"
#else
#define MULTILIB_ISA_DEFAULT "mips1"
#endif
@@ -700,9 +721,15 @@ struct mips_cpu_info {
%{march=mips32|march=4kc|march=4km|march=4kp|march=4ksc:-mips32} \
%{march=mips32r2|march=m4k|march=4ke*|march=4ksd|march=24k* \
|march=34k*|march=74k*|march=m14k*|march=1004k*: -mips32r2} \
+ %{march=mips32r3: -mips32r3} \
+ %{march=mips32r5|march=p5600: -mips32r5} \
+ %{march=mips32r6: -mips32r6} \
%{march=mips64|march=5k*|march=20k*|march=sb1*|march=sr71000 \
|march=xlr: -mips64} \
%{march=mips64r2|march=loongson3a|march=octeon|march=xlp: -mips64r2} \
+ %{march=mips64r3: -mips64r3} \
+ %{march=mips64r5: -mips64r5} \
+ %{march=mips64r6: -mips64r6} \
%{!march=*: -" MULTILIB_ISA_DEFAULT "}}"
/* A spec that infers a -mhard-float or -msoft-float setting from an
@@ -722,10 +749,17 @@ struct mips_cpu_info {
#define MIPS_32BIT_OPTION_SPEC \
"mips1|mips2|mips32*|mgp32"
+/* A spec condition that matches architectures should be targetted with
+ O32 FPXX for compatibility reasons. */
+#define MIPS_FPXX_OPTION_SPEC \
+ "mips2|mips3|mips4|mips5|mips32|mips32r2|mips32r3|mips32r5| \
+ mips64|mips64r2|mips64r3|mips64r5"
+
/* Infer a -msynci setting from a -mips argument, on the assumption that
-msynci is desired where possible. */
#define MIPS_ISA_SYNCI_SPEC \
- "%{msynci|mno-synci:;:%{mips32r2|mips64r2:-msynci;:-mno-synci}}"
+ "%{msynci|mno-synci:;:%{mips32r2|mips32r3|mips32r5|mips32r6|mips64r2 \
+ |mips64r3|mips64r5|mips64r6:-msynci;:-mno-synci}}"
#if (MIPS_ABI_DEFAULT == ABI_O64 \
|| MIPS_ABI_DEFAULT == ABI_N32 \
@@ -746,6 +780,8 @@ struct mips_cpu_info {
--with-float is ignored if -mhard-float or -msoft-float are
specified.
--with-nan is ignored if -mnan is specified.
+ --with-fp is ignored if -mfp is specified.
+ --with-odd-spreg is ignored if -modd-spreg or -mno-odd-spreg are specified.
--with-divide is ignored if -mdivide-traps or -mdivide-breaks are
specified. */
#define OPTION_DEFAULT_SPECS \
@@ -759,6 +795,8 @@ struct mips_cpu_info {
{"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \
{"fpu", "%{!msingle-float:%{!mdouble-float:-m%(VALUE)-float}}" }, \
{"nan", "%{!mnan=*:-mnan=%(VALUE)}" }, \
+ {"fp_32", "%{" OPT_ARCH32 ":%{!mfp*:-mfp%(VALUE)}}" }, \
+ {"odd_spreg_32", "%{" OPT_ARCH32 ":%{!modd-spreg:%{!mno-odd-spreg:-m%(VALUE)}}}" }, \
{"divide", "%{!mdivide-traps:%{!mdivide-breaks:-mdivide-%(VALUE)}}" }, \
{"llsc", "%{!mllsc:%{!mno-llsc:-m%(VALUE)}}" }, \
{"mips-plt", "%{!mplt:%{!mno-plt:-m%(VALUE)}}" }, \
@@ -801,12 +839,22 @@ struct mips_cpu_info {
#define ISA_HAS_64BIT_REGS (ISA_MIPS3 \
|| ISA_MIPS4 \
|| ISA_MIPS64 \
- || ISA_MIPS64R2)
+ || ISA_MIPS64R2 \
+ || ISA_MIPS64R3 \
+ || ISA_MIPS64R5 \
+ || ISA_MIPS64R6)
+
+#define ISA_HAS_JR (mips_isa_rev <= 5)
/* ISA has branch likely instructions (e.g. mips2). */
/* Disable branchlikely for tx39 until compare rewrite. They haven't
been generated up to this point. */
-#define ISA_HAS_BRANCHLIKELY (!ISA_MIPS1)
+#define ISA_HAS_BRANCHLIKELY (!ISA_MIPS1 && mips_isa_rev <= 5)
+
+/* ISA has 32 single-precision registers. */
+#define ISA_HAS_ODD_SPREG ((mips_isa_rev >= 1 \
+ && !TARGET_LOONGSON_3A) \
+ || TARGET_FLOAT64)
/* ISA has a three-operand multiplication instruction (usually spelt "mul"). */
#define ISA_HAS_MUL3 ((TARGET_MIPS3900 \
@@ -816,10 +864,8 @@ struct mips_cpu_info {
|| TARGET_MIPS7000 \
|| TARGET_MIPS9000 \
|| TARGET_MAD \
- || ISA_MIPS32 \
- || ISA_MIPS32R2 \
- || ISA_MIPS64 \
- || ISA_MIPS64R2) \
+ || (mips_isa_rev >= 1 \
+ && mips_isa_rev <= 5)) \
&& !TARGET_MIPS16)
/* ISA has a three-operand multiplication instruction. */
@@ -827,33 +873,43 @@ struct mips_cpu_info {
&& TARGET_OCTEON \
&& !TARGET_MIPS16)
+/* ISA has HI and LO registers. */
+#define ISA_HAS_HILO (mips_isa_rev <= 5)
+
+
/* ISA supports instructions DMULT and DMULTU. */
-#define ISA_HAS_DMULT (TARGET_64BIT && !TARGET_MIPS5900)
+#define ISA_HAS_DMULT (TARGET_64BIT \
+ && !TARGET_MIPS5900 \
+ && ISA_HAS_HILO)
-/* ISA supports instructions MULT and MULTU.
- This is always true, but the macro is needed for ISA_HAS_<D>MULT
- in mips.md. */
-#define ISA_HAS_MULT (1)
+/* ISA supports instructions MULT and MULTU. */
+#define ISA_HAS_MULT ISA_HAS_HILO
+
+#define ISA_HAS_R6MUL (mips_isa_rev >= 6)
+#define ISA_HAS_R6DMUL (TARGET_64BIT && mips_isa_rev >= 6)
/* ISA supports instructions DDIV and DDIVU. */
-#define ISA_HAS_DDIV (TARGET_64BIT && !TARGET_MIPS5900)
+#define ISA_HAS_DDIV (TARGET_64BIT \
+ && !TARGET_MIPS5900 \
+ && mips_isa_rev <= 5)
/* ISA supports instructions DIV and DIVU.
This is always true, but the macro is needed for ISA_HAS_<D>DIV
in mips.md. */
-#define ISA_HAS_DIV (1)
+#define ISA_HAS_DIV (mips_isa_rev <= 5)
#define ISA_HAS_DIV3 ((TARGET_LOONGSON_2EF \
|| TARGET_LOONGSON_3A) \
&& !TARGET_MIPS16)
+#define ISA_HAS_R6DIV (mips_isa_rev >= 6)
+#define ISA_HAS_R6DDIV (TARGET_64BIT && mips_isa_rev >= 6)
+
/* ISA has the floating-point conditional move instructions introduced
in mips4. */
#define ISA_HAS_FP_CONDMOVE ((ISA_MIPS4 \
- || ISA_MIPS32 \
- || ISA_MIPS32R2 \
- || ISA_MIPS64 \
- || ISA_MIPS64R2) \
+ || (mips_isa_rev >= 1 \
+ && mips_isa_rev <= 5)) \
&& !TARGET_MIPS5500 \
&& !TARGET_MIPS16)
@@ -871,19 +927,23 @@ struct mips_cpu_info {
/* ISA has the mips4 FP condition code instructions: FP-compare to CC,
branch on CC, and move (both FP and non-FP) on CC. */
#define ISA_HAS_8CC (ISA_MIPS4 \
- || ISA_MIPS32 \
- || ISA_MIPS32R2 \
- || ISA_MIPS64 \
- || ISA_MIPS64R2)
+ || (mips_isa_rev >= 1 \
+ && mips_isa_rev <= 5))
+
+/* ISA has the FP condition code instructions that store the flag in an
+ FP register. */
+#define ISA_HAS_CCF (mips_isa_rev >= 6)
+
+#define ISA_HAS_SEL (mips_isa_rev >= 6)
/* This is a catch all for other mips4 instructions: indexed load, the
FP madd and msub instructions, and the FP recip and recip sqrt
instructions. Note that this macro should only be used by other
ISA_HAS_* macros. */
#define ISA_HAS_FP4 ((ISA_MIPS4 \
- || ISA_MIPS32R2 \
|| ISA_MIPS64 \
- || ISA_MIPS64R2) \
+ || (mips_isa_rev >= 2 \
+ && mips_isa_rev <= 5)) \
&& !TARGET_MIPS16)
/* ISA has floating-point indexed load and store instructions
@@ -891,17 +951,22 @@ struct mips_cpu_info {
#define ISA_HAS_LXC1_SXC1 ISA_HAS_FP4
/* ISA has paired-single instructions. */
-#define ISA_HAS_PAIRED_SINGLE (ISA_MIPS32R2 || ISA_MIPS64 || ISA_MIPS64R2)
+#define ISA_HAS_PAIRED_SINGLE (ISA_MIPS64 \
+ || (mips_isa_rev >= 2 \
+ && mips_isa_rev <= 5))
/* ISA has conditional trap instructions. */
#define ISA_HAS_COND_TRAP (!ISA_MIPS1 \
&& !TARGET_MIPS16)
+/* ISA has conditional trap with immediate instructions. */
+#define ISA_HAS_COND_TRAPI (!ISA_MIPS1 \
+ && mips_isa_rev <= 5 \
+ && !TARGET_MIPS16)
+
/* ISA has integer multiply-accumulate instructions, madd and msub. */
-#define ISA_HAS_MADD_MSUB (ISA_MIPS32 \
- || ISA_MIPS32R2 \
- || ISA_MIPS64 \
- || ISA_MIPS64R2)
+#define ISA_HAS_MADD_MSUB (mips_isa_rev >= 1 \
+ && mips_isa_rev <= 5)
/* Integer multiply-accumulate instructions should be generated. */
#define GENERATE_MADD_MSUB (TARGET_IMADD && !TARGET_MIPS16)
@@ -909,6 +974,9 @@ struct mips_cpu_info {
/* ISA has floating-point madd and msub instructions 'd = a * b [+-] c'. */
#define ISA_HAS_FP_MADD4_MSUB4 ISA_HAS_FP4
+/* ISA has floating-point maddf and msubf instructions 'd = d [+-] a * b'. */
+#define ISA_HAS_FP_MADDF_MSUBF (mips_isa_rev >= 6)
+
/* ISA has floating-point madd and msub instructions 'c = a * b [+-] c'. */
#define ISA_HAS_FP_MADD3_MSUB3 TARGET_LOONGSON_2EF
@@ -928,19 +996,23 @@ struct mips_cpu_info {
(((ISA_HAS_FP4 \
&& ((MODE) == SFmode \
|| ((TARGET_FLOAT64 \
- || ISA_MIPS32R2 \
- || ISA_MIPS64R2) \
+ || mips_isa_rev >= 2) \
&& (MODE) == DFmode))) \
+ || (((MODE) == SFmode \
+ || (MODE) == DFmode) \
+ && (mips_isa_rev >= 6)) \
|| (TARGET_SB1 \
&& (MODE) == V2SFmode)) \
&& !TARGET_MIPS16)
+#define ISA_HAS_LWL_LWR (mips_isa_rev <= 5 && !TARGET_MIPS16)
+
+#define ISA_HAS_IEEE_754_LEGACY (mips_isa_rev <= 5)
+
+#define ISA_HAS_IEEE_754_2008 (mips_isa_rev >= 2)
+
/* ISA has count leading zeroes/ones instruction (not implemented). */
-#define ISA_HAS_CLZ_CLO ((ISA_MIPS32 \
- || ISA_MIPS32R2 \
- || ISA_MIPS64 \
- || ISA_MIPS64R2) \
- && !TARGET_MIPS16)
+#define ISA_HAS_CLZ_CLO (mips_isa_rev >= 1 && !TARGET_MIPS16)
/* ISA has three operand multiply instructions that put
the high part in an accumulator: mulhi or mulhiu. */
@@ -978,8 +1050,7 @@ struct mips_cpu_info {
&& !TARGET_MIPS16)
/* ISA has the "ror" (rotate right) instructions. */
-#define ISA_HAS_ROR ((ISA_MIPS32R2 \
- || ISA_MIPS64R2 \
+#define ISA_HAS_ROR ((mips_isa_rev >= 2 \
|| TARGET_MIPS5400 \
|| TARGET_MIPS5500 \
|| TARGET_SR71K \
@@ -988,19 +1059,18 @@ struct mips_cpu_info {
/* ISA has the WSBH (word swap bytes within halfwords) instruction.
64-bit targets also provide DSBH and DSHD. */
-#define ISA_HAS_WSBH ((ISA_MIPS32R2 || ISA_MIPS64R2) \
- && !TARGET_MIPS16)
+#define ISA_HAS_WSBH (mips_isa_rev >= 2 && !TARGET_MIPS16)
/* ISA has data prefetch instructions. This controls use of 'pref'. */
#define ISA_HAS_PREFETCH ((ISA_MIPS4 \
|| TARGET_LOONGSON_2EF \
|| TARGET_MIPS5900 \
- || ISA_MIPS32 \
- || ISA_MIPS32R2 \
- || ISA_MIPS64 \
- || ISA_MIPS64R2) \
+ || mips_isa_rev >= 1) \
&& !TARGET_MIPS16)
+/* ISA has data prefetch with limited 9-bit displacement. */
+#define ISA_HAS_PREFETCH_9BIT (mips_isa_rev >= 6)
+
/* ISA has data indexed prefetch instructions. This controls use of
'prefx', along with TARGET_HARD_FLOAT and TARGET_DOUBLE_FLOAT.
(prefx is a cop1x instruction, so can only be used if FP is
@@ -1013,19 +1083,14 @@ struct mips_cpu_info {
#define ISA_HAS_TRUNC_W (!ISA_MIPS1)
/* ISA includes the MIPS32r2 seb and seh instructions. */
-#define ISA_HAS_SEB_SEH ((ISA_MIPS32R2 \
- || ISA_MIPS64R2) \
- && !TARGET_MIPS16)
+#define ISA_HAS_SEB_SEH (mips_isa_rev >= 2 && !TARGET_MIPS16)
/* ISA includes the MIPS32/64 rev 2 ext and ins instructions. */
-#define ISA_HAS_EXT_INS ((ISA_MIPS32R2 \
- || ISA_MIPS64R2) \
- && !TARGET_MIPS16)
+#define ISA_HAS_EXT_INS (mips_isa_rev >= 2 && !TARGET_MIPS16)
/* ISA has instructions for accessing top part of 64-bit fp regs. */
-#define ISA_HAS_MXHC1 (TARGET_FLOAT64 \
- && (ISA_MIPS32R2 \
- || ISA_MIPS64R2))
+#define ISA_HAS_MXHC1 (!TARGET_FLOAT32 \
+ && mips_isa_rev >= 2)
/* ISA has lwxs instruction (load w/scaled index address. */
#define ISA_HAS_LWXS ((TARGET_SMARTMIPS || TARGET_MICROMIPS) \
@@ -1047,6 +1112,12 @@ struct mips_cpu_info {
/* Revision 2 of the DSP ASE is available. */
#define ISA_HAS_DSPR2 (TARGET_DSPR2 && !TARGET_MIPS16)
+/* The MSA ASE is available. */
+#define ISA_HAS_MSA (TARGET_MSA && !TARGET_MIPS16)
+
+/* ISA has LSA available */
+#define ISA_HAS_LSA (TARGET_MSA && !TARGET_MIPS16)
+
/* True if the result of a load is not available to the next instruction.
A nop will then be needed between instructions like "lw $4,..."
and "addiu $4,$4,1". */
@@ -1078,18 +1149,13 @@ struct mips_cpu_info {
MIPS64 and later ISAs to have the interlocks, plus any specific
earlier-ISA CPUs for which CPU documentation declares that the
instructions are really interlocked. */
-#define ISA_HAS_HILO_INTERLOCKS (ISA_MIPS32 \
- || ISA_MIPS32R2 \
- || ISA_MIPS64 \
- || ISA_MIPS64R2 \
+#define ISA_HAS_HILO_INTERLOCKS (mips_isa_rev >= 1 \
|| TARGET_MIPS5500 \
|| TARGET_MIPS5900 \
|| TARGET_LOONGSON_2EF)
/* ISA includes synci, jr.hb and jalr.hb. */
-#define ISA_HAS_SYNCI ((ISA_MIPS32R2 \
- || ISA_MIPS64R2) \
- && !TARGET_MIPS16)
+#define ISA_HAS_SYNCI (mips_isa_rev >= 2 && !TARGET_MIPS16)
/* ISA includes sync. */
#define ISA_HAS_SYNC ((mips_isa >= 2 || TARGET_MIPS3900) && !TARGET_MIPS16)
@@ -1176,6 +1242,8 @@ struct mips_cpu_info {
%{mmcu} %{mno-mcu} \
%{meva} %{mno-eva} \
%{mvirt} %{mno-virt} \
+%{mxpa} %{mno-xpa} \
+%{mmsa} %{mno-msa} \
%{msmartmips} %{mno-smartmips} \
%{mmt} %{mno-mt} \
%{mmxu} %{mno-mxu} \
@@ -1186,7 +1254,10 @@ struct mips_cpu_info {
%(subtarget_asm_debugging_spec) \
%{mabi=*} %{!mabi=*: %(asm_abi_default_spec)} \
%{mgp32} %{mgp64} %{march=*} %{mxgot:-xgot} \
-%{mfp32} %{mfp64} %{mnan=*} \
+%{mfp32} %{mfpxx} %{mfp64} %{mnan=*} \
+%{mhard-float} %{msoft-float} \
+%{mdouble-float} %{msingle-float} \
+%{modd-spreg} %{mno-odd-spreg} \
%{mshared} %{mno-shared} \
%{msym32} %{mno-sym32} \
%{mtune=*} \
@@ -1269,6 +1340,12 @@ struct mips_cpu_info {
/* By default, turn on GDB extensions. */
#define DEFAULT_GDB_EXTENSIONS 1
+/* Registers may have a prefix which can be ignored when matching
+ user asm and register definitions. */
+#ifndef REGISTER_PREFIX
+#define REGISTER_PREFIX "$"
+#endif
+
/* Local compiler-generated symbols must have a prefix that the assembler
understands. By default, this is $, although some targets (e.g.,
NetBSD-ELF) need to override this. */
@@ -1298,6 +1375,11 @@ struct mips_cpu_info {
/* The DWARF 2 CFA column which tracks the return address. */
#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
+/* The mode to use to calculate the size of a DWARF 2 CFA column. */
+#define DWARF_REG_MODE(REGNO, MODE) \
+ (FP_REG_P (REGNO) && mips_abi == ABI_32 && TARGET_FLOAT64 \
+ ? SImode : (MODE))
+
/* Before the prologue, RA lives in r31. */
#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
@@ -1340,6 +1422,11 @@ struct mips_cpu_info {
#define MIN_UNITS_PER_WORD 4
#endif
+/* Width of a MSA vector register in bytes. */
+#define UNITS_PER_MSA_REG 16
+/* Width of a MSA vector register in bits. */
+#define BITS_PER_MSA_REG (UNITS_PER_MSA_REG * BITS_PER_UNIT)
+
/* For MIPS, width of a floating point register. */
#define UNITS_PER_FPREG (TARGET_FLOAT64 ? 8 : 4)
@@ -1350,8 +1437,7 @@ struct mips_cpu_info {
/* The number of consecutive floating-point registers needed to store the
smallest format supported by the FPU. */
#define MIN_FPRS_PER_FMT \
- (ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64 || ISA_MIPS64R2 \
- ? 1 : MAX_FPRS_PER_FMT)
+ (TARGET_ODD_SPREG ? 1 : MAX_FPRS_PER_FMT)
/* The largest size of value that can be held in floating-point
registers and moved with a single instruction. */
@@ -1392,8 +1478,10 @@ struct mips_cpu_info {
#define LONG_LONG_ACCUM_TYPE_SIZE (TARGET_64BIT ? 128 : 64)
/* long double is not a fixed mode, but the idea is that, if we
- support long double, we also want a 128-bit integer type. */
-#define MAX_FIXED_MODE_SIZE LONG_DOUBLE_TYPE_SIZE
+ support long double, we also want a 128-bit integer type.
+ For MSA, we support an integer type with a width of BITS_PER_MSA_REG. */
+#define MAX_FIXED_MODE_SIZE \
+ (TARGET_MSA ? BITS_PER_MSA_REG : LONG_DOUBLE_TYPE_SIZE)
#ifdef IN_LIBGCC2
#if ((defined _ABIN32 && _MIPS_SIM == _ABIN32) \
@@ -1422,8 +1510,11 @@ struct mips_cpu_info {
/* 8 is observed right on a DECstation and on riscos 4.02. */
#define STRUCTURE_SIZE_BOUNDARY 8
-/* There is no point aligning anything to a rounder boundary than this. */
-#define BIGGEST_ALIGNMENT LONG_DOUBLE_TYPE_SIZE
+/* There is no point aligning anything to a rounder boundary than
+ LONG_DOUBLE_TYPE_SIZE, unless under MSA the bigggest alignment is
+ BITS_PER_MSA_REG. */
+#define BIGGEST_ALIGNMENT \
+ (TARGET_MSA ? BITS_PER_MSA_REG : LONG_DOUBLE_TYPE_SIZE)
/* All accesses must be aligned. */
#define STRICT_ALIGNMENT 1
@@ -1621,7 +1712,7 @@ struct mips_cpu_info {
{ /* General registers. */ \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, \
- /* Floating-point registers. */ \
+ /* Floating-point registers. */ \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
/* Others. */ \
@@ -1661,6 +1752,10 @@ struct mips_cpu_info {
#define MD_REG_NUM (MD_REG_LAST - MD_REG_FIRST + 1)
#define MD_DBX_FIRST (FP_DBX_FIRST + FP_REG_NUM)
+#define MSA_REG_FIRST FP_REG_FIRST
+#define MSA_REG_LAST FP_REG_LAST
+#define MSA_REG_NUM FP_REG_NUM
+
/* The DWARF 2 CFA column which tracks the return address from a
signal handler context. This means that to maintain backwards
compatibility, no hard register can be assigned this column if it
@@ -1706,6 +1801,8 @@ struct mips_cpu_info {
/* Request Interrupt Priority Level is from bit 10 to bit 15 of
the cause register for the EIC interrupt mode. */
#define CAUSE_IPL 10
+/* COP1 Enable is at bit 29 of the status register */
+#define SR_COP1 29
/* Interrupt Priority Level is from bit 10 to bit 15 of the status register. */
#define SR_IPL 10
/* Exception Level is at bit 1 of the status register. */
@@ -1744,8 +1841,11 @@ struct mips_cpu_info {
/* Test if REGNO is hi, lo, or one of the 6 new DSP accumulators. */
#define ACC_REG_P(REGNO) \
(MD_REG_P (REGNO) || DSP_ACC_REG_P (REGNO))
+#define MSA_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - MSA_REG_FIRST) < MSA_REG_NUM)
#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
+#define MSA_REG_RTX_P(X) (REG_P (X) && MSA_REG_P (REGNO (X)))
/* True if X is (const (unspec [(const_int 0)] UNSPEC_GP)). This is used
to initialize the mips16 gp pseudo register. */
@@ -1766,6 +1866,19 @@ struct mips_cpu_info {
#define HARD_REGNO_MODE_OK(REGNO, MODE) \
mips_hard_regno_mode_ok[ (int)(MODE) ][ (REGNO) ]
+/* Select a register mode required for caller save of hard regno REGNO. */
+#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
+ mips_hard_regno_caller_save_mode (REGNO, NREGS, MODE)
+
+/* Odd-numbered single-precision registers are not considered call saved
+ for O32 FPXX as they will be clobbered when run on an FR=1 FPU. */
+/* MIPS ABIs can only save 32-bit/64-bit (single/double) FP registers.
+ Thus, MSA vector registers with MODE > 64 bits are part clobbered. */
+#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
+ ((TARGET_FLOATXX && hard_regno_nregs[REGNO][MODE] == 1 \
+ && FP_REG_P (REGNO) && (REGNO & 1)) \
+ || (TARGET_MSA && FP_REG_P (REGNO) && GET_MODE_SIZE (MODE) > 8))
+
#define MODES_TIEABLE_P mips_modes_tieable_p
/* Register to use for pushing function arguments. */
@@ -1871,11 +1984,14 @@ struct mips_cpu_info {
enum reg_class
{
NO_REGS, /* no registers in set */
+ M16_STORE_REGS, /* microMIPS store registers */
M16_REGS, /* mips16 directly accessible registers */
+ M16_SP_REGS, /* mips16 + $sp */
T_REG, /* mips16 T register ($24) */
M16_T_REGS, /* mips16 registers plus T register */
PIC_FN_ADDR_REG, /* SVR4 PIC function address register */
V1_REG, /* Register $v1 ($3) used for TLS access. */
+ SPILL_REGS, /* All but $sp and call preserved regs are in here */
LEA_REGS, /* Every GPR except $25 */
GR_REGS, /* integer registers */
FP_REGS, /* floating point registers */
@@ -1908,11 +2024,14 @@ enum reg_class
#define REG_CLASS_NAMES \
{ \
"NO_REGS", \
+ "M16_STORE_REGS", \
"M16_REGS", \
+ "M16_SP_REGS", \
"T_REG", \
"M16_T_REGS", \
"PIC_FN_ADDR_REG", \
"V1_REG", \
+ "SPILL_REGS", \
"LEA_REGS", \
"GR_REGS", \
"FP_REGS", \
@@ -1948,11 +2067,14 @@ enum reg_class
#define REG_CLASS_CONTENTS \
{ \
{ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0x000200fc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* M16_STORE_REGS */ \
{ 0x000300fc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* M16_REGS */ \
+ { 0x200300fc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* M16_SP_REGS */ \
{ 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* T_REG */ \
{ 0x010300fc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* M16_T_REGS */ \
{ 0x02000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* PIC_FN_ADDR_REG */ \
{ 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* V1_REG */ \
+ { 0x0303fffc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* SPILL_REGS */ \
{ 0xfdffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* LEA_REGS */ \
{ 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* GR_REGS */ \
{ 0x00000000, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* FP_REGS */ \
@@ -1985,7 +2107,7 @@ enum reg_class
valid base register must belong. A base register is one used in
an address which is the register value plus a displacement. */
-#define BASE_REG_CLASS (TARGET_MIPS16 ? M16_REGS : GR_REGS)
+#define BASE_REG_CLASS (TARGET_MIPS16 ? M16_SP_REGS : GR_REGS)
/* A macro whose definition is the name of the class to which a
valid index register must belong. An index register is one used
@@ -2087,6 +2209,7 @@ enum reg_class
#define SMALL_INT_UNSIGNED(X) SMALL_OPERAND_UNSIGNED (INTVAL (X))
#define LUI_INT(X) LUI_OPERAND (INTVAL (X))
#define UMIPS_12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047))
+#define MIPS_9BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -256, 255))
/* The HI and LO registers can only be reloaded via the general
registers. Condition code registers can only be loaded to the
@@ -2097,6 +2220,22 @@ enum reg_class
#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
mips_secondary_reload_class (CLASS, MODE, X, false)
+/* When targetting the O32 FPXX ABI then all doubleword or greater moves
+ to/from FP registers must be performed by FR-mode-aware instructions.
+ This can be achieved using mfhc1/mthc1 when these instructions are
+ available but otherwise moves must go via memory.
+ For the O32 FP64A ABI then all odd-numbered doubleword or greater
+ moves to/from FP registers must move via memory as it is not permitted
+ to access the lower-half of these registers with mtc1/mfc1 since that
+ constitutes a single-precision access (which is forbidden). This is
+ implemented by requiring all double-word moves to move via memory
+ as this check is register class based and not register based.
+ Splitting the FP_REGS into even and odd classes would allow the
+ precise restriction to be represented but this would have a
+ significant affect on other areas of the backend. */
+#define SECONDARY_MEMORY_NEEDED(CLASS1, CLASS2, MODE) \
+ mips_secondary_memory_needed ((CLASS1), (CLASS2), (MODE))
+
/* Return the maximum number of consecutive registers
needed to represent mode MODE in a register of class CLASS. */
@@ -2210,13 +2349,34 @@ enum reg_class
#define FP_ARG_FIRST (FP_REG_FIRST + 12)
#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
+/* True if MODE is vector and supported in a MSA vector register. */
+#define MSA_SUPPORTED_VECTOR_MODE_P(MODE) \
+ (GET_MODE_SIZE (MODE) == UNITS_PER_MSA_REG \
+ && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT))
+
+/* True if MODE is supported in a MSA vector register. */
+#define MSA_SUPPORTED_MODE_P(MODE) \
+ (TARGET_MSA && ((MODE) == TImode || MSA_SUPPORTED_VECTOR_MODE_P (MODE)))
+
+/* Temporary register that is used when restoring $gp after a call. $4 and $5
+ are used for returning complex double values in soft-float code, so $6 is the
+ first suitable candidate for TARGET_MIPS16. For !TARGET_MIPS16 we can use
+ $gp itself as the temporary. */
+#define POST_CALL_TMP_REG \
+ (TARGET_MIPS16 ? GP_ARG_FIRST + 2 : PIC_OFFSET_TABLE_REGNUM)
+
/* 1 if N is a possible register number for function argument passing.
We have no FP argument registers when soft-float. When FP registers
- are 32 bits, we can't directly reference the odd numbered ones. */
+ are 32 bits, we can't directly reference the odd numbered ones. */
+/* Ignore odd numbered registers for O32 FPXX and O32 FP64. */
#define FUNCTION_ARG_REGNO_P(N) \
((IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST) \
- || (IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST))) \
+ || (IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST) \
+ && (mips_abi != ABI_32 \
+ || TARGET_FLOAT32 \
+ || ((N) % 2 == 0)))) \
&& !fixed_regs[N])
/* This structure has to cope with two different argument allocation
@@ -2306,7 +2466,6 @@ typedef struct mips_args {
to the next fully-aligned offset. */
#define MIPS_STACK_ALIGN(LOC) \
(TARGET_NEWABI ? ((LOC) + 15) & -16 : ((LOC) + 7) & -8)
-
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function entry. */
@@ -2422,9 +2581,11 @@ typedef struct mips_args {
/* Although LDC1 and SDC1 provide 64-bit moves on 32-bit targets,
we generally don't want to use them for copying arbitrary data.
- A single N-word move is usually the same cost as N single-word moves. */
-#define MOVE_MAX UNITS_PER_WORD
-#define MAX_MOVE_MAX 8
+ A single N-word move is usually the same cost as N single-word moves.
+ For MSA, we set MOVE_MAX to 16 bytes.
+ Then, MAX_MOVE_MAX is 16 unconditionally. */
+#define MOVE_MAX (TARGET_MSA ? 16 : UNITS_PER_WORD)
+#define MAX_MOVE_MAX 16
/* Define this macro as a C expression which is nonzero if
accessing less than a word of memory (i.e. a `char' or a
@@ -2621,7 +2782,39 @@ typedef struct mips_args {
{ "gp", 28 + GP_REG_FIRST }, \
{ "sp", 29 + GP_REG_FIRST }, \
{ "fp", 30 + GP_REG_FIRST }, \
- { "ra", 31 + GP_REG_FIRST } \
+ { "ra", 31 + GP_REG_FIRST }, \
+ { "$w0", 0 + FP_REG_FIRST }, \
+ { "$w1", 1 + FP_REG_FIRST }, \
+ { "$w2", 2 + FP_REG_FIRST }, \
+ { "$w3", 3 + FP_REG_FIRST }, \
+ { "$w4", 4 + FP_REG_FIRST }, \
+ { "$w5", 5 + FP_REG_FIRST }, \
+ { "$w6", 6 + FP_REG_FIRST }, \
+ { "$w7", 7 + FP_REG_FIRST }, \
+ { "$w8", 8 + FP_REG_FIRST }, \
+ { "$w9", 9 + FP_REG_FIRST }, \
+ { "$w10", 10 + FP_REG_FIRST }, \
+ { "$w11", 11 + FP_REG_FIRST }, \
+ { "$w12", 12 + FP_REG_FIRST }, \
+ { "$w13", 13 + FP_REG_FIRST }, \
+ { "$w14", 14 + FP_REG_FIRST }, \
+ { "$w15", 15 + FP_REG_FIRST }, \
+ { "$w16", 16 + FP_REG_FIRST }, \
+ { "$w17", 17 + FP_REG_FIRST }, \
+ { "$w18", 18 + FP_REG_FIRST }, \
+ { "$w19", 19 + FP_REG_FIRST }, \
+ { "$w20", 20 + FP_REG_FIRST }, \
+ { "$w21", 21 + FP_REG_FIRST }, \
+ { "$w22", 22 + FP_REG_FIRST }, \
+ { "$w23", 23 + FP_REG_FIRST }, \
+ { "$w24", 24 + FP_REG_FIRST }, \
+ { "$w25", 25 + FP_REG_FIRST }, \
+ { "$w26", 26 + FP_REG_FIRST }, \
+ { "$w27", 27 + FP_REG_FIRST }, \
+ { "$w28", 28 + FP_REG_FIRST }, \
+ { "$w29", 29 + FP_REG_FIRST }, \
+ { "$w30", 30 + FP_REG_FIRST }, \
+ { "$w31", 31 + FP_REG_FIRST } \
}
#define DBR_OUTPUT_SEQEND(STREAM) \
@@ -2956,6 +3149,7 @@ extern const char *mips_hi_relocs[];
extern enum processor mips_arch; /* which cpu to codegen for */
extern enum processor mips_tune; /* which cpu to schedule for */
extern int mips_isa; /* architectural level */
+extern int mips_isa_rev;
extern const struct mips_cpu_info *mips_arch_info;
extern const struct mips_cpu_info *mips_tune_info;
extern unsigned int mips_base_compression_flags;
diff --git a/gcc-4.9/gcc/config/mips/mips.md b/gcc-4.9/gcc/config/mips/mips.md
index 9bf8cb7f4..1366362f3 100644
--- a/gcc-4.9/gcc/config/mips/mips.md
+++ b/gcc-4.9/gcc/config/mips/mips.md
@@ -65,6 +65,9 @@
sr71000
xlr
xlp
+ p5600
+ w32
+ w64
])
(define_c_enum "unspec" [
@@ -239,6 +242,13 @@
(const_string "yes")]
(const_string "no")))
+;; True if the main data type is four times of the size of a word.
+(define_attr "qword_mode" "no,yes"
+ (cond [(and (eq_attr "mode" "TI,TF")
+ (not (match_test "TARGET_64BIT")))
+ (const_string "yes")]
+ (const_string "no")))
+
;; Attributes describing a sync loop. These loops have the form:
;;
;; if (RELEASE_BARRIER == YES) sync
@@ -396,6 +406,11 @@
(eq_attr "move_type" "constN,shift_shift")
(const_string "multi")
+ ;; These types of move are split for quadword modes only.
+ (and (eq_attr "move_type" "move,const")
+ (eq_attr "qword_mode" "yes"))
+ (const_string "multi")
+
;; These types of move are split for doubleword modes only.
(and (eq_attr "move_type" "move,const")
(eq_attr "dword_mode" "yes"))
@@ -431,11 +446,21 @@
(const_string "none"))
(define_attr "enabled" "no,yes"
- (if_then_else (ior (eq_attr "compression" "all,none")
- (and (eq_attr "compression" "micromips")
- (match_test "TARGET_MICROMIPS")))
- (const_string "yes")
- (const_string "no")))
+ (cond [;; The O32 FPXX ABI prohibits direct moves between GR_REG and FR_REG
+ ;; for 64-bit values.
+ (and (eq_attr "move_type" "mtc,mfc")
+ (match_test "(TARGET_FLOATXX && !ISA_HAS_MXHC1)
+ || (mips_abi == ABI_32
+ && TARGET_FLOAT64 && !TARGET_ODD_SPREG)")
+ (eq_attr "dword_mode" "yes"))
+ (const_string "no")
+
+ ;; The micromips compressed instruction alternatives should only be
+ ;; considered when targetting micromips.
+ (and (eq_attr "compression" "micromips")
+ (match_test "!TARGET_MICROMIPS"))
+ (const_string "no")]
+ (const_string "yes")))
;; The number of individual instructions that a non-branch pattern generates,
;; using units of BASE_INSN_LENGTH.
@@ -467,6 +492,18 @@
(eq_attr "dword_mode" "yes"))
(const_int 2)
+ ;; Check for quadword moves that are decomposed into four
+ ;; instructions.
+ (and (eq_attr "move_type" "mtc,mfc,move")
+ (eq_attr "qword_mode" "yes"))
+ (const_int 16)
+
+ ;; Quadword CONST moves are split into four word
+ ;; CONST moves.
+ (and (eq_attr "move_type" "const")
+ (eq_attr "qword_mode" "yes"))
+ (symbol_ref "mips_split_128bit_const_insns (operands[1]) * 4")
+
;; Constants, loads and stores are handled by external routines.
(and (eq_attr "move_type" "const,constN")
(eq_attr "dword_mode" "yes"))
@@ -508,7 +545,9 @@
(const_int 2)
(eq_attr "type" "idiv,idiv3")
- (symbol_ref "mips_idiv_insns ()")
+ (cond [(eq_attr "mode" "TI")
+ (symbol_ref "mips_msa_idiv_insns () * 4")]
+ (symbol_ref "mips_idiv_insns () * 4"))
(not (eq_attr "sync_mem" "none"))
(symbol_ref "mips_sync_loop_insns (insn, operands)")]
@@ -758,6 +797,11 @@
&& !TARGET_LOONGSON_2EF
&& !TARGET_MIPS5900")])
+;; This mode iterator allows :FPCC to be used anywhere that an FP condition
+;; is needed.
+(define_mode_iterator FPCC [(CC "!ISA_HAS_CCF")
+ (CCF "ISA_HAS_CCF")])
+
;; 32-bit integer moves for which we provide move patterns.
(define_mode_iterator IMOVE32
[SI
@@ -847,14 +891,16 @@
;; This attribute gives the best constraint to use for registers of
;; a given mode.
-(define_mode_attr reg [(SI "d") (DI "d") (CC "z")])
+(define_mode_attr reg [(SI "d") (DI "d") (CC "z") (CCF "f")])
;; This attribute gives the format suffix for floating-point operations.
(define_mode_attr fmt [(SF "s") (DF "d") (V2SF "ps")])
;; This attribute gives the upper-case mode name for one unit of a
-;; floating-point mode.
-(define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF")])
+;; floating-point mode or vector mode.
+(define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF") (V4SF "SF")
+ (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI")
+ (V2DF "DF")])
;; This attribute gives the integer mode that has the same size as a
;; fixed-point mode.
@@ -887,6 +933,9 @@
(define_mode_attr sqrt_condition
[(SF "!ISA_MIPS1") (DF "!ISA_MIPS1") (V2SF "TARGET_SB1")])
+;; This attribute provides the correct nmemonic for each FP condition mode.
+(define_mode_attr fpcmp [(CC "c") (CCF "cmp")])
+
;; This code iterator allows signed and unsigned widening multiplications
;; to use the same template.
(define_code_iterator any_extend [sign_extend zero_extend])
@@ -909,7 +958,10 @@
;; This code iterator allows all native floating-point comparisons to be
;; generated from the same template.
-(define_code_iterator fcond [unordered uneq unlt unle eq lt le])
+(define_code_iterator fcond [unordered uneq unlt unle eq lt le
+ (ordered "ISA_HAS_CCF")
+ (ltgt "ISA_HAS_CCF")
+ (ne "ISA_HAS_CCF")])
;; This code iterator is used for comparisons that can be implemented
;; by swapping the operands.
@@ -982,7 +1034,10 @@
(unle "ule")
(eq "eq")
(lt "lt")
- (le "le")])
+ (le "le")
+ (ordered "or")
+ (ltgt "ne")
+ (ne "une")])
;; Similar, but for swapped conditions.
(define_code_attr swapped_fcond [(ge "le")
@@ -996,6 +1051,10 @@
;; This is the inverse value of bbv.
(define_code_attr bbinv [(eq "1") (ne "0")])
+
+;; The sel nmemonic to use depending on the condition test.
+(define_code_attr sel [(eq "seleqz") (ne "selnez")])
+(define_code_attr selinv [(eq "selnez") (ne "seleqz")])
;; .........................
;;
@@ -1050,6 +1109,7 @@
(eq_attr "type" "ghost")
"nothing")
+(include "p5600.md")
(include "4k.md")
(include "5k.md")
(include "20kc.md")
@@ -1103,18 +1163,27 @@
[(match_operand:GPR 1 "reg_or_0_operand")
(match_operand:GPR 2 "arith_operand")])
(match_operand 3 "const_0_operand"))]
- "ISA_HAS_COND_TRAP"
+ "ISA_HAS_COND_TRAPI || ISA_HAS_COND_TRAP"
{
mips_expand_conditional_trap (operands[0]);
DONE;
})
+(define_insn "*conditional_trap_reg<mode>"
+ [(trap_if (match_operator:GPR 0 "trap_comparison_operator"
+ [(match_operand:GPR 1 "reg_or_0_operand" "dJ")
+ (match_operand:GPR 2 "reg_or_0_operand" "dJ")])
+ (const_int 0))]
+ "ISA_HAS_COND_TRAP && !ISA_HAS_COND_TRAPI"
+ "t%C0\t%z1,%2"
+ [(set_attr "type" "trap")])
+
(define_insn "*conditional_trap<mode>"
[(trap_if (match_operator:GPR 0 "trap_comparison_operator"
[(match_operand:GPR 1 "reg_or_0_operand" "dJ")
(match_operand:GPR 2 "arith_operand" "dI")])
(const_int 0))]
- "ISA_HAS_COND_TRAP"
+ "ISA_HAS_COND_TRAPI"
"t%C0\t%z1,%2"
[(set_attr "type" "trap")])
@@ -1482,13 +1551,13 @@
[(set (match_operand:GPR 0 "register_operand")
(mult:GPR (match_operand:GPR 1 "register_operand")
(match_operand:GPR 2 "register_operand")))]
- "ISA_HAS_<D>MULT"
+ "ISA_HAS_<D>MULT || ISA_HAS_R6<D>MUL"
{
rtx lo;
- if (TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A)
- emit_insn (gen_mul<mode>3_mul3_loongson (operands[0], operands[1],
- operands[2]));
+ if (TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A || ISA_HAS_R6<D>MUL)
+ emit_insn (gen_mul<mode>3_mul3_nohilo (operands[0], operands[1],
+ operands[2]));
else if (ISA_HAS_<D>MUL3)
emit_insn (gen_mul<mode>3_mul3 (operands[0], operands[1], operands[2]));
else if (TARGET_MIPS16)
@@ -1505,16 +1574,18 @@
DONE;
})
-(define_insn "mul<mode>3_mul3_loongson"
+(define_insn "mul<mode>3_mul3_nohilo"
[(set (match_operand:GPR 0 "register_operand" "=d")
(mult:GPR (match_operand:GPR 1 "register_operand" "d")
(match_operand:GPR 2 "register_operand" "d")))]
- "TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A"
+ "TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A || ISA_HAS_R6<D>MUL"
{
if (TARGET_LOONGSON_2EF)
return "<d>multu.g\t%0,%1,%2";
- else
+ else if (TARGET_LOONGSON_3A)
return "gs<d>multu\t%0,%1,%2";
+ else
+ return "<d>mul\t%0,%1,%2";
}
[(set_attr "type" "imul3nc")
(set_attr "mode" "<MODE>")])
@@ -1622,40 +1693,66 @@
;; copy instructions. Reload therefore thinks that the second alternative
;; is two reloads more costly than the first. We add "*?*?" to the first
;; alternative as a counterweight.
+;;
+;; LRA simulates reload but the cost of reloading scratches is lower
+;; than of the classic reload. For the time being, removing the counterweight
+;; for LRA is more profitable.
(define_insn "*mul_acc_si"
- [(set (match_operand:SI 0 "register_operand" "=l*?*?,d?")
- (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "d,d")
- (match_operand:SI 2 "register_operand" "d,d"))
- (match_operand:SI 3 "register_operand" "0,d")))
- (clobber (match_scratch:SI 4 "=X,l"))
- (clobber (match_scratch:SI 5 "=X,&d"))]
+ [(set (match_operand:SI 0 "register_operand" "=l*?*?,l,d?")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "d,d,d")
+ (match_operand:SI 2 "register_operand" "d,d,d"))
+ (match_operand:SI 3 "register_operand" "0,0,d")))
+ (clobber (match_scratch:SI 4 "=X,X,l"))
+ (clobber (match_scratch:SI 5 "=X,X,&d"))]
"GENERATE_MADD_MSUB && !TARGET_MIPS16"
"@
madd\t%1,%2
+ madd\t%1,%2
#"
[(set_attr "type" "imadd")
(set_attr "accum_in" "3")
(set_attr "mode" "SI")
- (set_attr "insn_count" "1,2")])
+ (set_attr "insn_count" "1,1,2")
+ (set (attr "enabled")
+ (cond [(and (eq_attr "alternative" "0")
+ (match_test "!mips_lra_flag"))
+ (const_string "yes")
+ (and (eq_attr "alternative" "1")
+ (match_test "mips_lra_flag"))
+ (const_string "yes")
+ (eq_attr "alternative" "2")
+ (const_string "yes")]
+ (const_string "no")))])
;; The same idea applies here. The middle alternative needs one less
;; clobber than the final alternative, so we add "*?" as a counterweight.
(define_insn "*mul_acc_si_r3900"
- [(set (match_operand:SI 0 "register_operand" "=l*?*?,d*?,d?")
- (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "d,d,d")
- (match_operand:SI 2 "register_operand" "d,d,d"))
- (match_operand:SI 3 "register_operand" "0,l,d")))
- (clobber (match_scratch:SI 4 "=X,3,l"))
- (clobber (match_scratch:SI 5 "=X,X,&d"))]
+ [(set (match_operand:SI 0 "register_operand" "=l*?*?,l,d*?,d?")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "d,d,d,d")
+ (match_operand:SI 2 "register_operand" "d,d,d,d"))
+ (match_operand:SI 3 "register_operand" "0,0,l,d")))
+ (clobber (match_scratch:SI 4 "=X,X,3,l"))
+ (clobber (match_scratch:SI 5 "=X,X,X,&d"))]
"TARGET_MIPS3900 && !TARGET_MIPS16"
"@
madd\t%1,%2
+ madd\t%1,%2
madd\t%0,%1,%2
#"
[(set_attr "type" "imadd")
(set_attr "accum_in" "3")
(set_attr "mode" "SI")
- (set_attr "insn_count" "1,1,2")])
+ (set_attr "insn_count" "1,1,1,2")
+ (set (attr "enabled")
+ (cond [(and (eq_attr "alternative" "0")
+ (match_test "!mips_lra_flag"))
+ (const_string "yes")
+ (and (eq_attr "alternative" "1")
+ (match_test "mips_lra_flag"))
+ (const_string "yes")
+ (eq_attr "alternative" "2,3")
+ (const_string "yes")]
+ (const_string "no")))])
;; Split *mul_acc_si if both the source and destination accumulator
;; values are GPRs.
@@ -1859,20 +1956,31 @@
;; See the comment above *mul_add_si for details.
(define_insn "*mul_sub_si"
- [(set (match_operand:SI 0 "register_operand" "=l*?*?,d?")
- (minus:SI (match_operand:SI 1 "register_operand" "0,d")
- (mult:SI (match_operand:SI 2 "register_operand" "d,d")
- (match_operand:SI 3 "register_operand" "d,d"))))
- (clobber (match_scratch:SI 4 "=X,l"))
- (clobber (match_scratch:SI 5 "=X,&d"))]
+ [(set (match_operand:SI 0 "register_operand" "=l*?*?,l,d?")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,0,d")
+ (mult:SI (match_operand:SI 2 "register_operand" "d,d,d")
+ (match_operand:SI 3 "register_operand" "d,d,d"))))
+ (clobber (match_scratch:SI 4 "=X,X,l"))
+ (clobber (match_scratch:SI 5 "=X,X,&d"))]
"GENERATE_MADD_MSUB"
"@
msub\t%2,%3
+ msub\t%2,%3
#"
[(set_attr "type" "imadd")
(set_attr "accum_in" "1")
(set_attr "mode" "SI")
- (set_attr "insn_count" "1,2")])
+ (set_attr "insn_count" "1,1,2")
+ (set (attr "enabled")
+ (cond [(and (eq_attr "alternative" "0")
+ (match_test "!mips_lra_flag"))
+ (const_string "yes")
+ (and (eq_attr "alternative" "1")
+ (match_test "mips_lra_flag"))
+ (const_string "yes")
+ (eq_attr "alternative" "2")
+ (const_string "yes")]
+ (const_string "no")))])
;; Split *mul_sub_si if both the source and destination accumulator
;; values are GPRs.
@@ -1913,6 +2021,24 @@
DONE;
})
+(define_expand "<u>mulsidi3_32bit_r6"
+ [(set (match_operand:DI 0 "register_operand")
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand"))
+ (any_extend:DI (match_operand:SI 2 "register_operand"))))]
+ "!TARGET_64BIT && ISA_HAS_R6MUL"
+{
+ rtx dest = gen_reg_rtx (DImode);
+ rtx low = mips_subword (dest, 0);
+ rtx high = mips_subword (dest, 1);
+
+ emit_insn (gen_mulsi3_mul3_nohilo (low, operands[1], operands[2]));
+ emit_insn (gen_<su>mulsi3_highpart_r6 (high, operands[1], operands[2]));
+
+ emit_move_insn (mips_subword (operands[0], 0), low);
+ emit_move_insn (mips_subword (operands[0], 1), high);
+ DONE;
+})
+
(define_expand "<u>mulsidi3_32bit_mips16"
[(set (match_operand:DI 0 "register_operand")
(mult:DI (any_extend:DI (match_operand:SI 1 "register_operand"))
@@ -1934,7 +2060,7 @@
[(set (match_operand:DI 0 "muldiv_target_operand" "=ka")
(mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
(any_extend:DI (match_operand:SI 2 "register_operand" "d"))))]
- "!TARGET_64BIT && (!TARGET_FIX_R4000 || ISA_HAS_DSP)"
+ "!TARGET_64BIT && (!TARGET_FIX_R4000 || ISA_HAS_DSP) && ISA_HAS_MULT"
{
if (ISA_HAS_DSP_MULT)
return "mult<u>\t%q0,%1,%2";
@@ -1949,7 +2075,7 @@
(mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
(any_extend:DI (match_operand:SI 2 "register_operand" "d"))))
(clobber (match_scratch:DI 3 "=x"))]
- "!TARGET_64BIT && TARGET_FIX_R4000 && !ISA_HAS_DSP"
+ "!TARGET_64BIT && TARGET_FIX_R4000 && !ISA_HAS_DSP && ISA_HAS_MULT"
"mult<u>\t%1,%2\;mflo\t%L0\;mfhi\t%M0"
[(set_attr "type" "imul")
(set_attr "mode" "SI")
@@ -1961,7 +2087,8 @@
(any_extend:DI (match_operand:SI 2 "register_operand" "d"))))
(clobber (match_scratch:TI 3 "=x"))
(clobber (match_scratch:DI 4 "=d"))]
- "TARGET_64BIT && !TARGET_FIX_R4000 && !ISA_HAS_DMUL3 && !TARGET_MIPS16"
+ "TARGET_64BIT && !TARGET_FIX_R4000 && !ISA_HAS_DMUL3
+ && !TARGET_MIPS16 && ISA_HAS_MULT"
"#"
"&& reload_completed"
[(const_int 0)]
@@ -2044,6 +2171,15 @@
[(set_attr "type" "imul3")
(set_attr "mode" "DI")])
+(define_insn "mulsidi3_64bit_r6dmul"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "d"))))]
+ "ISA_HAS_R6DMUL"
+ "dmul\t%0,%1,%2"
+ [(set_attr "type" "imul3nc")
+ (set_attr "mode" "DI")])
+
;; Widening multiply with negation.
(define_insn "*muls<u>_di"
[(set (match_operand:DI 0 "muldiv_target_operand" "=x")
@@ -2101,12 +2237,27 @@
else if (TARGET_MIPS16)
emit_insn (gen_<su>mulsi3_highpart_split (operands[0], operands[1],
operands[2]));
+ else if (ISA_HAS_R6MUL)
+ emit_insn (gen_<su>mulsi3_highpart_r6 (operands[0], operands[1],
+ operands[2]));
else
emit_insn (gen_<su>mulsi3_highpart_internal (operands[0], operands[1],
operands[2]));
DONE;
})
+(define_insn "<su>mulsi3_highpart_r6"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d")))
+ (const_int 32))))]
+ "ISA_HAS_R6MUL"
+ "muh<u>\t%0,%1,%2"
+ [(set_attr "type" "imul3nc")
+ (set_attr "mode" "SI")])
+
(define_insn_and_split "<su>mulsi3_highpart_internal"
[(set (match_operand:SI 0 "register_operand" "=d")
(truncate:SI
@@ -2115,7 +2266,7 @@
(any_extend:DI (match_operand:SI 2 "register_operand" "d")))
(const_int 32))))
(clobber (match_scratch:SI 3 "=l"))]
- "!ISA_HAS_MULHI && !TARGET_MIPS16"
+ "ISA_HAS_MULT && !ISA_HAS_MULHI && !TARGET_MIPS16"
{ return TARGET_FIX_R4000 ? "mult<u>\t%1,%2\n\tmfhi\t%0" : "#"; }
"&& reload_completed && !TARGET_FIX_R4000"
[(const_int 0)]
@@ -2193,17 +2344,34 @@
(mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
(any_extend:TI (match_operand:DI 2 "register_operand")))
(const_int 64))))]
- "ISA_HAS_DMULT && !(<CODE> == ZERO_EXTEND && TARGET_FIX_VR4120)"
+ "ISA_HAS_R6DMUL
+ || (ISA_HAS_DMULT
+ && !(<CODE> == ZERO_EXTEND && TARGET_FIX_VR4120))"
{
if (TARGET_MIPS16)
emit_insn (gen_<su>muldi3_highpart_split (operands[0], operands[1],
operands[2]));
+ else if (ISA_HAS_R6DMUL)
+ emit_insn (gen_<su>muldi3_highpart_r6 (operands[0], operands[1],
+ operands[2]));
else
emit_insn (gen_<su>muldi3_highpart_internal (operands[0], operands[1],
operands[2]));
DONE;
})
+(define_insn "<su>muldi3_highpart_r6"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand" "d"))
+ (any_extend:TI (match_operand:DI 2 "register_operand" "d")))
+ (const_int 64))))]
+ "ISA_HAS_R6DMUL"
+ "dmuh<u>\t%0,%1,%2"
+ [(set_attr "type" "imul3nc")
+ (set_attr "mode" "DI")])
+
(define_insn_and_split "<su>muldi3_highpart_internal"
[(set (match_operand:DI 0 "register_operand" "=d")
(truncate:DI
@@ -2342,6 +2510,16 @@
(set_attr "accum_in" "3")
(set_attr "mode" "<UNITMODE>")])
+(define_insn "*maddf<mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (plus:ANYF (match_operand:ANYF 1 "register_operand" "0")
+ (mult:ANYF (match_operand:ANYF 2 "register_operand" "f")
+ (match_operand:ANYF 3 "register_operand" "f"))))]
+ "ISA_HAS_FP_MADDF_MSUBF"
+ "maddf.<fmt>\t%0,%2,%3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "<UNITMODE>")])
+
(define_insn "*madd3<mode>"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(plus:ANYF (mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
@@ -2364,6 +2542,16 @@
(set_attr "accum_in" "3")
(set_attr "mode" "<UNITMODE>")])
+(define_insn "*msubf<mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF (match_operand:ANYF 1 "register_operand" "0")
+ (mult:ANYF (match_operand:ANYF 2 "register_operand" "f")
+ (match_operand:ANYF 3 "register_operand" "f"))))]
+ "ISA_HAS_FP_MADDF_MSUBF"
+ "msubf.<fmt>\t%0,%2,%3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "<UNITMODE>")])
+
(define_insn "*msub3<mode>"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(minus:ANYF (mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
@@ -2725,6 +2913,40 @@
{ return mips_output_division ("<GPR:d>div<u>\t%.,%1,%2", operands); }
[(set_attr "type" "idiv")
(set_attr "mode" "<GPR:MODE>")])
+
+;; Integer division and modulus.
+
+(define_insn "<u>div<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=&d")
+ (any_div:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))]
+ "TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A || ISA_HAS_R6<D>DIV"
+ {
+ if (TARGET_LOONGSON_2EF)
+ return mips_output_division ("<d>div<u>.g\t%0,%1,%2", operands);
+ else if (TARGET_LOONGSON_3A)
+ return mips_output_division ("gs<d>div<u>\t%0,%1,%2", operands);
+ else
+ return mips_output_division ("<d>div<u>\t%0,%1,%2", operands);
+ }
+ [(set_attr "type" "idiv3")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "<u>mod<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=&d")
+ (any_mod:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))]
+ "TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A || ISA_HAS_R6<D>DIV"
+ {
+ if (TARGET_LOONGSON_2EF)
+ return mips_output_division ("<d>mod<u>.g\t%0,%1,%2", operands);
+ else if (TARGET_LOONGSON_3A)
+ return mips_output_division ("gs<d>mod<u>\t%0,%1,%2", operands);
+ else
+ return mips_output_division ("<d>mod<u>\t%0,%1,%2", operands);
+ }
+ [(set_attr "type" "idiv3")
+ (set_attr "mode" "<MODE>")])
;;
;; ....................
@@ -3870,7 +4092,7 @@
(sign_extract:GPR (match_operand:BLK 1 "memory_operand")
(match_operand 2 "const_int_operand")
(match_operand 3 "const_int_operand")))]
- "!TARGET_MIPS16"
+ "ISA_HAS_LWL_LWR"
{
if (mips_expand_ext_as_unaligned_load (operands[0], operands[1],
INTVAL (operands[2]),
@@ -3907,7 +4129,7 @@
(zero_extract:GPR (match_operand:BLK 1 "memory_operand")
(match_operand 2 "const_int_operand")
(match_operand 3 "const_int_operand")))]
- "!TARGET_MIPS16"
+ "ISA_HAS_LWL_LWR"
{
if (mips_expand_ext_as_unaligned_load (operands[0], operands[1],
INTVAL (operands[2]),
@@ -3958,7 +4180,7 @@
(match_operand 1 "const_int_operand")
(match_operand 2 "const_int_operand"))
(match_operand:GPR 3 "reg_or_0_operand"))]
- "!TARGET_MIPS16"
+ "ISA_HAS_LWL_LWR"
{
if (mips_expand_ins_as_unaligned_store (operands[0], operands[3],
INTVAL (operands[1]),
@@ -4139,7 +4361,10 @@
[(set (match_operand:DI 0 "register_operand" "=d")
(match_operand:DI 1 "absolute_symbolic_operand" ""))
(clobber (match_scratch:DI 2 "=&d"))]
- "TARGET_EXPLICIT_RELOCS && ABI_HAS_64BIT_SYMBOLS && cse_not_expected"
+ "!TARGET_MIPS16
+ && TARGET_EXPLICIT_RELOCS
+ && ABI_HAS_64BIT_SYMBOLS
+ && cse_not_expected"
"#"
"&& reload_completed"
[(set (match_dup 0) (high:DI (match_dup 3)))
@@ -4437,7 +4662,7 @@
(define_insn "*mov<mode>_internal"
[(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=d,!u,!u,d,e,!u,!ks,d,ZS,ZT,m,*f,*f,*d,*m,*d,*z,*a,*d,*B*C*D,*B*C*D,*d,*m")
- (match_operand:IMOVE32 1 "move_operand" "d,J,Udb7,Yd,Yf,ZT,ZS,m,!ks,!u,dJ,*d*J,*m,*f,*f,*z,*d,*J*d,*a,*d,*m,*B*C*D,*B*C*D"))]
+ (match_operand:IMOVE32 1 "move_operand" "d,J,Udb7,Yd,Yf,ZT,ZS,m,!ks,!kbJ,dJ,*d*J,*m,*f,*f,*z,*d,*J*d,*a,*d,*m,*B*C*D,*B*C*D"))]
"!TARGET_MIPS16
&& (register_operand (operands[0], <MODE>mode)
|| reg_or_0_operand (operands[1], <MODE>mode))"
@@ -4578,7 +4803,7 @@
(define_insn "*movhi_internal"
[(set (match_operand:HI 0 "nonimmediate_operand" "=d,!u,d,!u,d,ZU,m,*a,*d")
- (match_operand:HI 1 "move_operand" "d,J,I,ZU,m,!u,dJ,*d*J,*a"))]
+ (match_operand:HI 1 "move_operand" "d,J,I,ZU,m,!kbJ,dJ,*d*J,*a"))]
"!TARGET_MIPS16
&& (register_operand (operands[0], HImode)
|| reg_or_0_operand (operands[1], HImode))"
@@ -4654,7 +4879,7 @@
(define_insn "*movqi_internal"
[(set (match_operand:QI 0 "nonimmediate_operand" "=d,!u,d,!u,d,ZV,m,*a,*d")
- (match_operand:QI 1 "move_operand" "d,J,I,ZW,m,!u,dJ,*d*J,*a"))]
+ (match_operand:QI 1 "move_operand" "d,J,I,ZW,m,!kbJ,dJ,*d*J,*a"))]
"!TARGET_MIPS16
&& (register_operand (operands[0], QImode)
|| reg_or_0_operand (operands[1], QImode))"
@@ -4711,6 +4936,13 @@
DONE;
})
+(define_insn "movccf"
+ [(set (match_operand:CCF 0 "nonimmediate_operand" "=f,f,m")
+ (match_operand:CCF 1 "nonimmediate_operand" "f,m,f"))]
+ "ISA_HAS_CCF"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "fmove,fpload,fpstore")])
+
(define_insn "*movsf_hardfloat"
[(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*d,*d,*d,*m")
(match_operand:SF 1 "move_operand" "f,G,m,f,G,*d,*f,*G*d,*m,*d"))]
@@ -4787,7 +5019,7 @@
(define_expand "movti"
[(set (match_operand:TI 0)
(match_operand:TI 1))]
- "TARGET_64BIT"
+ "TARGET_64BIT || TARGET_MSA"
{
if (mips_legitimize_move (TImode, operands[0], operands[1]))
DONE;
@@ -4797,6 +5029,7 @@
[(set (match_operand:TI 0 "nonimmediate_operand" "=d,d,d,m,*a,*a,*d")
(match_operand:TI 1 "move_operand" "d,i,m,dJ,*J,*d,*a"))]
"TARGET_64BIT
+ && !TARGET_MSA
&& !TARGET_MIPS16
&& (register_operand (operands[0], TImode)
|| reg_or_0_operand (operands[1], TImode))"
@@ -4865,7 +5098,7 @@
(define_split
[(set (match_operand:MOVE128 0 "nonimmediate_operand")
(match_operand:MOVE128 1 "move_operand"))]
- "reload_completed && mips_split_move_insn_p (operands[0], operands[1], insn)"
+ "reload_completed && !TARGET_MSA && mips_split_move_insn_p (operands[0], operands[1], insn)"
[(const_int 0)]
{
mips_split_move_insn (operands[0], operands[1], curr_insn);
@@ -4954,7 +5187,7 @@
rtx low = mips_subword (operands[1], 0);
rtx high = mips_subword (operands[1], 1);
emit_insn (gen_load_low<mode> (operands[0], low));
- if (TARGET_FLOAT64 && !TARGET_64BIT)
+ if (ISA_HAS_MXHC1 && !TARGET_64BIT)
emit_insn (gen_mthc1<mode> (operands[0], high, operands[0]));
else
emit_insn (gen_load_high<mode> (operands[0], high, operands[0]));
@@ -4964,7 +5197,7 @@
rtx low = mips_subword (operands[0], 0);
rtx high = mips_subword (operands[0], 1);
emit_insn (gen_store_word<mode> (low, operands[1], const0_rtx));
- if (TARGET_FLOAT64 && !TARGET_64BIT)
+ if (ISA_HAS_MXHC1 && !TARGET_64BIT)
emit_insn (gen_mfhc1<mode> (high, operands[1]));
else
emit_insn (gen_store_word<mode> (high, operands[1], const1_rtx));
@@ -5229,7 +5462,7 @@
(define_insn "mips_cache"
[(set (mem:BLK (scratch))
(unspec:BLK [(match_operand:SI 0 "const_int_operand")
- (match_operand:QI 1 "address_operand" "p")]
+ (match_operand:QI 1 "address_operand" "ZD")]
UNSPEC_MIPS_CACHE))]
"ISA_HAS_CACHE"
"cache\t%X0,%a1")
@@ -5506,11 +5739,11 @@
;; Conditional branches on floating-point equality tests.
-(define_insn "*branch_fp"
+(define_insn "*branch_fp_<mode>"
[(set (pc)
(if_then_else
(match_operator 1 "equality_operator"
- [(match_operand:CC 2 "register_operand" "z")
+ [(match_operand:FPCC 2 "register_operand" "<reg>")
(const_int 0)])
(label_ref (match_operand 0 "" ""))
(pc)))]
@@ -5522,11 +5755,11 @@
}
[(set_attr "type" "branch")])
-(define_insn "*branch_fp_inverted"
+(define_insn "*branch_fp_inverted_<mode>"
[(set (pc)
(if_then_else
(match_operator 1 "equality_operator"
- [(match_operand:CC 2 "register_operand" "z")
+ [(match_operand:FPCC 2 "register_operand" "<reg>")
(const_int 0)])
(pc)
(label_ref (match_operand 0 "" ""))))]
@@ -5870,21 +6103,21 @@
;;
;; ....................
-(define_insn "s<code>_<mode>"
- [(set (match_operand:CC 0 "register_operand" "=z")
- (fcond:CC (match_operand:SCALARF 1 "register_operand" "f")
- (match_operand:SCALARF 2 "register_operand" "f")))]
+(define_insn "s<code>_<SCALARF:mode>_using_<FPCC:mode>"
+ [(set (match_operand:FPCC 0 "register_operand" "=<reg>")
+ (fcond:FPCC (match_operand:SCALARF 1 "register_operand" "f")
+ (match_operand:SCALARF 2 "register_operand" "f")))]
""
- "c.<fcond>.<fmt>\t%Z0%1,%2"
+ "<fpcmp>.<fcond>.<fmt>\t%Z0%1,%2"
[(set_attr "type" "fcmp")
(set_attr "mode" "FPSW")])
-(define_insn "s<code>_<mode>"
- [(set (match_operand:CC 0 "register_operand" "=z")
- (swapped_fcond:CC (match_operand:SCALARF 1 "register_operand" "f")
- (match_operand:SCALARF 2 "register_operand" "f")))]
+(define_insn "s<code>_<SCALARF:mode>_using_<FPCC:mode>"
+ [(set (match_operand:FPCC 0 "register_operand" "=<reg>")
+ (swapped_fcond:FPCC (match_operand:SCALARF 1 "register_operand" "f")
+ (match_operand:SCALARF 2 "register_operand" "f")))]
""
- "c.<swapped_fcond>.<fmt>\t%Z0%2,%1"
+ "<fpcmp>.<swapped_fcond>.<fmt>\t%Z0%2,%1"
[(set_attr "type" "fcmp")
(set_attr "mode" "FPSW")])
@@ -6091,10 +6324,10 @@
rtx diff_vec = PATTERN (NEXT_INSN (operands[2]));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
-
+
output_asm_insn ("sltu\t%0, %1", operands);
output_asm_insn ("bteqz\t%3", operands);
-
+
switch (GET_MODE (diff_vec))
{
case HImode:
@@ -6904,6 +7137,41 @@
[(set_attr "type" "condmove")
(set_attr "mode" "<SCALARF:MODE>")])
+(define_insn "*sel<code><GPR:mode>_using_<GPR2:mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=d,d")
+ (if_then_else:GPR
+ (equality_op:GPR2 (match_operand:GPR2 1 "register_operand" "d,d")
+ (const_int 0))
+ (match_operand:GPR 2 "reg_or_0_operand" "d,J")
+ (match_operand:GPR 3 "reg_or_0_operand" "J,d")))]
+ "ISA_HAS_SEL
+ && (register_operand (operands[2], <GPR:MODE>mode)
+ != register_operand (operands[3], <GPR:MODE>mode))"
+ "@
+ <sel>\t%0,%2,%1
+ <selinv>\t%0,%3,%1"
+ [(set_attr "type" "condmove")
+ (set_attr "mode" "<GPR:MODE>")])
+
+;; sel.fmt copies the 3rd argument when the 1st is non-zero and the 2nd
+;; argument if the 1st is zero. This means operand 2 and 3 are
+;; inverted in the instruction.
+
+(define_insn "*sel<mode>"
+ [(set (match_operand:SCALARF 0 "register_operand" "=f,f,f")
+ (if_then_else:SCALARF
+ (ne:CCF (match_operand:CCF 1 "register_operand" "0,f,f")
+ (const_int 0))
+ (match_operand:SCALARF 2 "reg_or_0_operand" "f,G,f")
+ (match_operand:SCALARF 3 "reg_or_0_operand" "f,f,G")))]
+ "ISA_HAS_SEL && ISA_HAS_CCF"
+ "@
+ sel.<fmt>\t%0,%3,%2
+ seleqz.<fmt>\t%0,%3,%1
+ selnez.<fmt>\t%0,%2,%1"
+ [(set_attr "type" "condmove")
+ (set_attr "mode" "<SCALARF:MODE>")])
+
;; These are the main define_expand's used to make conditional moves.
(define_expand "mov<mode>cc"
@@ -6912,8 +7180,11 @@
(if_then_else:GPR (match_dup 5)
(match_operand:GPR 2 "reg_or_0_operand")
(match_operand:GPR 3 "reg_or_0_operand")))]
- "ISA_HAS_CONDMOVE"
+ "ISA_HAS_CONDMOVE || ISA_HAS_SEL"
{
+ if (ISA_HAS_SEL && !INTEGRAL_MODE_P (GET_MODE (XEXP (operands[1], 0))))
+ FAIL;
+
mips_expand_conditional_move (operands);
DONE;
})
@@ -6922,10 +7193,25 @@
[(set (match_dup 4) (match_operand 1 "comparison_operator"))
(set (match_operand:SCALARF 0 "register_operand")
(if_then_else:SCALARF (match_dup 5)
- (match_operand:SCALARF 2 "register_operand")
- (match_operand:SCALARF 3 "register_operand")))]
- "ISA_HAS_FP_CONDMOVE"
+ (match_operand:SCALARF 2 "reg_or_0_operand")
+ (match_operand:SCALARF 3 "reg_or_0_operand")))]
+ "ISA_HAS_FP_CONDMOVE
+ || (ISA_HAS_SEL && ISA_HAS_CCF)"
{
+ if (ISA_HAS_SEL && !FLOAT_MODE_P (GET_MODE (XEXP (operands[1], 0))))
+ FAIL;
+
+ /* Workaround an LRA bug which means that tied operands in the sel.fmt
+ pattern lead to the double precision destination of sel.d getting
+ reloaded with the full register file usable and the restrictions on
+ whether the CCFmode input can be used in odd-numbered single-precision
+ registers are ignored. For consistency reasons the CCF mode values
+ must be guaranteed to only exist in the even-registers because of
+ the unusual duality between single and double precision values. */
+ if (ISA_HAS_SEL && <MODE>mode == DFmode
+ && (!TARGET_ODD_SPREG || TARGET_FLOATXX))
+ FAIL;
+
mips_expand_conditional_move (operands);
DONE;
})
@@ -7040,7 +7326,12 @@
[(set (reg:P TLS_GET_TP_REGNUM)
(unspec:P [(const_int 0)] UNSPEC_TLS_GET_TP))]
"HAVE_AS_TLS && !TARGET_MIPS16"
- ".set\tpush\;.set\tmips32r2\t\;rdhwr\t$3,$29\;.set\tpop"
+ {
+ if (mips_isa_rev >= 2)
+ return "rdhwr\t$3,$29";
+
+ return ".set\tpush\;.set\tmips32r2\t\;rdhwr\t$3,$29\;.set\tpop";
+ }
[(set_attr "type" "unknown")
; Since rdhwr always generates a trap for now, putting it in a delay
; slot would make the kernel's emulation of it much slower.
@@ -7185,6 +7476,9 @@
; ST-Microelectronics Loongson-2E/2F-specific patterns.
(include "loongson.md")
+; The MIPS MSA Instructions.
+(include "mips-msa.md")
+
(define_c_enum "unspec" [
UNSPEC_ADDRESS_FIRST
])
diff --git a/gcc-4.9/gcc/config/mips/mips.opt b/gcc-4.9/gcc/config/mips/mips.opt
index dd8aff479..740fdd6d8 100644
--- a/gcc-4.9/gcc/config/mips/mips.opt
+++ b/gcc-4.9/gcc/config/mips/mips.opt
@@ -197,6 +197,10 @@ mfp32
Target Report RejectNegative InverseMask(FLOAT64)
Use 32-bit floating-point registers
+mfpxx
+Target Report RejectNegative Mask(FLOATXX)
+Follow the O32 FPXX ABI
+
mfp64
Target Report RejectNegative Mask(FLOAT64)
Use 64-bit floating-point registers
@@ -303,6 +307,10 @@ mmicromips
Target Report Mask(MICROMIPS)
Use microMIPS instructions
+mmsa
+Target Report Var(TARGET_MSA)
+Use MIPS MSA Extension instructions
+
mmt
Target Report Var(TARGET_MT)
Allow the use of MT instructions
@@ -388,6 +396,10 @@ msynci
Target Report Mask(SYNCI)
Use synci instruction to invalidate i-cache
+mlra
+Target Report Var(mips_lra_flag) Init(1) Save
+Use LRA instead of reload
+
mtune=
Target RejectNegative Joined Var(mips_tune_option) ToLower Enum(mips_arch_opt_value)
-mtune=PROCESSOR Optimize the output for PROCESSOR
@@ -400,6 +412,10 @@ mvirt
Target Report Var(TARGET_VIRT)
Use Virtualization Application Specific instructions
+mxpa
+Target Report Var(TARGET_XPA)
+Use eXtended Physical Address (XPA) instructions
+
mvr4130-align
Target Report Mask(VR4130_ALIGN)
Perform VR4130-specific alignment optimizations
@@ -408,5 +424,32 @@ mxgot
Target Report Var(TARGET_XGOT)
Lift restrictions on GOT size
+modd-spreg
+Target Report Mask(ODD_SPREG)
+Enable use of odd-numbered single-precision registers
+
noasmopt
Driver
+
+mclib=
+Target RejectNegative Joined Var(mips_c_lib) ToLower Enum(mips_lib_setting) Init(MIPS_LIB_NEWLIB)
+Specify the C library to use with this application
+newlib Use newlib
+small Use SmallLib
+tiny Use SmallLib optimised for size
+
+Enum
+Name(mips_lib_setting) Type(enum mips_lib_setting)
+Known MIPS C libraries (for use with the -mclib= option):
+
+EnumValue
+Enum(mips_lib_setting) String(newlib) Value(MIPS_LIB_NEWLIB)
+
+EnumValue
+Enum(mips_lib_setting) String(small) Value(MIPS_LIB_SMALL)
+
+EnumValue
+Enum(mips_lib_setting) String(tiny) Value(MIPS_LIB_TINY)
+
+msched-weight
+Target Report Var(TARGET_SCHED_WEIGHT) Undocumented
diff --git a/gcc-4.9/gcc/config/mips/msa.h b/gcc-4.9/gcc/config/mips/msa.h
new file mode 100644
index 000000000..64fa42c71
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/msa.h
@@ -0,0 +1,1113 @@
+/* MIPS MSA intrinsics include file.
+
+ Copyright (C) 2014 Free Software Foundation, Inc.
+ Contributed by Imagination Technologies Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _MSA_H
+#define _MSA_H 1
+
+#if defined(__mips_msa)
+typedef signed char v16i8 __attribute__((vector_size(16), aligned(16)));
+typedef signed char v16i8_b __attribute__((vector_size(16), aligned(1)));
+typedef unsigned char v16u8 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned char v16u8_b __attribute__((vector_size(16), aligned(1)));
+typedef short v8i16 __attribute__((vector_size(16), aligned(16)));
+typedef short v8i16_h __attribute__((vector_size(16), aligned(2)));
+typedef unsigned short v8u16 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned short v8u16_h __attribute__((vector_size(16), aligned(2)));
+typedef int v4i32 __attribute__((vector_size(16), aligned(16)));
+typedef int v4i32_w __attribute__((vector_size(16), aligned(4)));
+typedef unsigned int v4u32 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned int v4u32_w __attribute__((vector_size(16), aligned(4)));
+typedef long long v2i64 __attribute__((vector_size(16), aligned(16)));
+typedef long long v2i64_d __attribute__((vector_size(16), aligned(8)));
+typedef unsigned long long v2u64 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned long long v2u64_d __attribute__((vector_size(16), aligned(8)));
+typedef float v4f32 __attribute__((vector_size(16), aligned(16)));
+typedef float v4f32_w __attribute__((vector_size(16), aligned(4)));
+typedef double v2f64 __attribute__ ((vector_size(16), aligned(16)));
+typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8)));
+
+#ifndef __clang__
+extern v16i8 __builtin_msa_sll_b(v16i8, v16i8);
+#define __msa_sll_b __builtin_msa_sll_b
+extern v8i16 __builtin_msa_sll_h(v8i16, v8i16);
+#define __msa_sll_h __builtin_msa_sll_h
+extern v4i32 __builtin_msa_sll_w(v4i32, v4i32);
+#define __msa_sll_w __builtin_msa_sll_w
+extern v2i64 __builtin_msa_sll_d(v2i64, v2i64);
+#define __msa_sll_d __builtin_msa_sll_d
+extern v16i8 __builtin_msa_slli_b(v16i8, unsigned char);
+#define __msa_slli_b __builtin_msa_slli_b
+extern v8i16 __builtin_msa_slli_h(v8i16, unsigned char);
+#define __msa_slli_h __builtin_msa_slli_h
+extern v4i32 __builtin_msa_slli_w(v4i32, unsigned char);
+#define __msa_slli_w __builtin_msa_slli_w
+extern v2i64 __builtin_msa_slli_d(v2i64, unsigned char);
+#define __msa_slli_d __builtin_msa_slli_d
+extern v16i8 __builtin_msa_sra_b(v16i8, v16i8);
+#define __msa_sra_b __builtin_msa_sra_b
+extern v8i16 __builtin_msa_sra_h(v8i16, v8i16);
+#define __msa_sra_h __builtin_msa_sra_h
+extern v4i32 __builtin_msa_sra_w(v4i32, v4i32);
+#define __msa_sra_w __builtin_msa_sra_w
+extern v2i64 __builtin_msa_sra_d(v2i64, v2i64);
+#define __msa_sra_d __builtin_msa_sra_d
+extern v16i8 __builtin_msa_srai_b(v16i8, unsigned char);
+#define __msa_srai_b __builtin_msa_srai_b
+extern v8i16 __builtin_msa_srai_h(v8i16, unsigned char);
+#define __msa_srai_h __builtin_msa_srai_h
+extern v4i32 __builtin_msa_srai_w(v4i32, unsigned char);
+#define __msa_srai_w __builtin_msa_srai_w
+extern v2i64 __builtin_msa_srai_d(v2i64, unsigned char);
+#define __msa_srai_d __builtin_msa_srai_d
+extern v16i8 __builtin_msa_srar_b(v16i8, v16i8);
+#define __msa_srar_b __builtin_msa_srar_b
+extern v8i16 __builtin_msa_srar_h(v8i16, v8i16);
+#define __msa_srar_h __builtin_msa_srar_h
+extern v4i32 __builtin_msa_srar_w(v4i32, v4i32);
+#define __msa_srar_w __builtin_msa_srar_w
+extern v2i64 __builtin_msa_srar_d(v2i64, v2i64);
+#define __msa_srar_d __builtin_msa_srar_d
+extern v16i8 __builtin_msa_srari_b(v16i8, unsigned char);
+#define __msa_srari_b __builtin_msa_srari_b
+extern v8i16 __builtin_msa_srari_h(v8i16, unsigned char);
+#define __msa_srari_h __builtin_msa_srari_h
+extern v4i32 __builtin_msa_srari_w(v4i32, unsigned char);
+#define __msa_srari_w __builtin_msa_srari_w
+extern v2i64 __builtin_msa_srari_d(v2i64, unsigned char);
+#define __msa_srari_d __builtin_msa_srari_d
+extern v16i8 __builtin_msa_srl_b(v16i8, v16i8);
+#define __msa_srl_b __builtin_msa_srl_b
+extern v8i16 __builtin_msa_srl_h(v8i16, v8i16);
+#define __msa_srl_h __builtin_msa_srl_h
+extern v4i32 __builtin_msa_srl_w(v4i32, v4i32);
+#define __msa_srl_w __builtin_msa_srl_w
+extern v2i64 __builtin_msa_srl_d(v2i64, v2i64);
+#define __msa_srl_d __builtin_msa_srl_d
+extern v16i8 __builtin_msa_srli_b(v16i8, unsigned char);
+#define __msa_srli_b __builtin_msa_srli_b
+extern v8i16 __builtin_msa_srli_h(v8i16, unsigned char);
+#define __msa_srli_h __builtin_msa_srli_h
+extern v4i32 __builtin_msa_srli_w(v4i32, unsigned char);
+#define __msa_srli_w __builtin_msa_srli_w
+extern v2i64 __builtin_msa_srli_d(v2i64, unsigned char);
+#define __msa_srli_d __builtin_msa_srli_d
+extern v16i8 __builtin_msa_srlr_b(v16i8, v16i8);
+#define __msa_srlr_b __builtin_msa_srlr_b
+extern v8i16 __builtin_msa_srlr_h(v8i16, v8i16);
+#define __msa_srlr_h __builtin_msa_srlr_h
+extern v4i32 __builtin_msa_srlr_w(v4i32, v4i32);
+#define __msa_srlr_w __builtin_msa_srlr_w
+extern v2i64 __builtin_msa_srlr_d(v2i64, v2i64);
+#define __msa_srlr_d __builtin_msa_srlr_d
+extern v16i8 __builtin_msa_srlri_b(v16i8, unsigned char);
+#define __msa_srlri_b __builtin_msa_srlri_b
+extern v8i16 __builtin_msa_srlri_h(v8i16, unsigned char);
+#define __msa_srlri_h __builtin_msa_srlri_h
+extern v4i32 __builtin_msa_srlri_w(v4i32, unsigned char);
+#define __msa_srlri_w __builtin_msa_srlri_w
+extern v2i64 __builtin_msa_srlri_d(v2i64, unsigned char);
+#define __msa_srlri_d __builtin_msa_srlri_d
+extern v16u8 __builtin_msa_bclr_b(v16u8, v16u8);
+#define __msa_bclr_b __builtin_msa_bclr_b
+extern v8u16 __builtin_msa_bclr_h(v8u16, v8u16);
+#define __msa_bclr_h __builtin_msa_bclr_h
+extern v4u32 __builtin_msa_bclr_w(v4u32, v4u32);
+#define __msa_bclr_w __builtin_msa_bclr_w
+extern v2u64 __builtin_msa_bclr_d(v2u64, v2u64);
+#define __msa_bclr_d __builtin_msa_bclr_d
+extern v16u8 __builtin_msa_bclri_b(v16u8, unsigned char);
+#define __msa_bclri_b __builtin_msa_bclri_b
+extern v8u16 __builtin_msa_bclri_h(v8u16, unsigned char);
+#define __msa_bclri_h __builtin_msa_bclri_h
+extern v4u32 __builtin_msa_bclri_w(v4u32, unsigned char);
+#define __msa_bclri_w __builtin_msa_bclri_w
+extern v2u64 __builtin_msa_bclri_d(v2u64, unsigned char);
+#define __msa_bclri_d __builtin_msa_bclri_d
+extern v16u8 __builtin_msa_bset_b(v16u8, v16u8);
+#define __msa_bset_b __builtin_msa_bset_b
+extern v8u16 __builtin_msa_bset_h(v8u16, v8u16);
+#define __msa_bset_h __builtin_msa_bset_h
+extern v4u32 __builtin_msa_bset_w(v4u32, v4u32);
+#define __msa_bset_w __builtin_msa_bset_w
+extern v2u64 __builtin_msa_bset_d(v2u64, v2u64);
+#define __msa_bset_d __builtin_msa_bset_d
+extern v16u8 __builtin_msa_bseti_b(v16u8, unsigned char);
+#define __msa_bseti_b __builtin_msa_bseti_b
+extern v8u16 __builtin_msa_bseti_h(v8u16, unsigned char);
+#define __msa_bseti_h __builtin_msa_bseti_h
+extern v4u32 __builtin_msa_bseti_w(v4u32, unsigned char);
+#define __msa_bseti_w __builtin_msa_bseti_w
+extern v2u64 __builtin_msa_bseti_d(v2u64, unsigned char);
+#define __msa_bseti_d __builtin_msa_bseti_d
+extern v16u8 __builtin_msa_bneg_b(v16u8, v16u8);
+#define __msa_bneg_b __builtin_msa_bneg_b
+extern v8u16 __builtin_msa_bneg_h(v8u16, v8u16);
+#define __msa_bneg_h __builtin_msa_bneg_h
+extern v4u32 __builtin_msa_bneg_w(v4u32, v4u32);
+#define __msa_bneg_w __builtin_msa_bneg_w
+extern v2u64 __builtin_msa_bneg_d(v2u64, v2u64);
+#define __msa_bneg_d __builtin_msa_bneg_d
+extern v16u8 __builtin_msa_bnegi_b(v16u8, unsigned char);
+#define __msa_bnegi_b __builtin_msa_bnegi_b
+extern v8u16 __builtin_msa_bnegi_h(v8u16, unsigned char);
+#define __msa_bnegi_h __builtin_msa_bnegi_h
+extern v4u32 __builtin_msa_bnegi_w(v4u32, unsigned char);
+#define __msa_bnegi_w __builtin_msa_bnegi_w
+extern v2u64 __builtin_msa_bnegi_d(v2u64, unsigned char);
+#define __msa_bnegi_d __builtin_msa_bnegi_d
+extern v16u8 __builtin_msa_binsl_b(v16u8, v16u8, v16u8);
+#define __msa_binsl_b __builtin_msa_binsl_b
+extern v8u16 __builtin_msa_binsl_h(v8u16, v8u16, v8u16);
+#define __msa_binsl_h __builtin_msa_binsl_h
+extern v4u32 __builtin_msa_binsl_w(v4u32, v4u32, v4u32);
+#define __msa_binsl_w __builtin_msa_binsl_w
+extern v2u64 __builtin_msa_binsl_d(v2u64, v2u64, v2u64);
+#define __msa_binsl_d __builtin_msa_binsl_d
+extern v16u8 __builtin_msa_binsli_b(v16u8, v16u8, unsigned char);
+#define __msa_binsli_b __builtin_msa_binsli_b
+extern v8u16 __builtin_msa_binsli_h(v8u16, v8u16, unsigned char);
+#define __msa_binsli_h __builtin_msa_binsli_h
+extern v4u32 __builtin_msa_binsli_w(v4u32, v4u32, unsigned char);
+#define __msa_binsli_w __builtin_msa_binsli_w
+extern v2u64 __builtin_msa_binsli_d(v2u64, v2u64, unsigned char);
+#define __msa_binsli_d __builtin_msa_binsli_d
+extern v16u8 __builtin_msa_binsr_b(v16u8, v16u8, v16u8);
+#define __msa_binsr_b __builtin_msa_binsr_b
+extern v8u16 __builtin_msa_binsr_h(v8u16, v8u16, v8u16);
+#define __msa_binsr_h __builtin_msa_binsr_h
+extern v4u32 __builtin_msa_binsr_w(v4u32, v4u32, v4u32);
+#define __msa_binsr_w __builtin_msa_binsr_w
+extern v2u64 __builtin_msa_binsr_d(v2u64, v2u64, v2u64);
+#define __msa_binsr_d __builtin_msa_binsr_d
+extern v16u8 __builtin_msa_binsri_b(v16u8, v16u8, unsigned char);
+#define __msa_binsri_b __builtin_msa_binsri_b
+extern v8u16 __builtin_msa_binsri_h(v8u16, v8u16, unsigned char);
+#define __msa_binsri_h __builtin_msa_binsri_h
+extern v4u32 __builtin_msa_binsri_w(v4u32, v4u32, unsigned char);
+#define __msa_binsri_w __builtin_msa_binsri_w
+extern v2u64 __builtin_msa_binsri_d(v2u64, v2u64, unsigned char);
+#define __msa_binsri_d __builtin_msa_binsri_d
+extern v16i8 __builtin_msa_addv_b(v16i8, v16i8);
+#define __msa_addv_b __builtin_msa_addv_b
+extern v8i16 __builtin_msa_addv_h(v8i16, v8i16);
+#define __msa_addv_h __builtin_msa_addv_h
+extern v4i32 __builtin_msa_addv_w(v4i32, v4i32);
+#define __msa_addv_w __builtin_msa_addv_w
+extern v2i64 __builtin_msa_addv_d(v2i64, v2i64);
+#define __msa_addv_d __builtin_msa_addv_d
+extern v16i8 __builtin_msa_addvi_b(v16i8, unsigned char);
+#define __msa_addvi_b __builtin_msa_addvi_b
+extern v8i16 __builtin_msa_addvi_h(v8i16, unsigned char);
+#define __msa_addvi_h __builtin_msa_addvi_h
+extern v4i32 __builtin_msa_addvi_w(v4i32, unsigned char);
+#define __msa_addvi_w __builtin_msa_addvi_w
+extern v2i64 __builtin_msa_addvi_d(v2i64, unsigned char);
+#define __msa_addvi_d __builtin_msa_addvi_d
+extern v16i8 __builtin_msa_subv_b(v16i8, v16i8);
+#define __msa_subv_b __builtin_msa_subv_b
+extern v8i16 __builtin_msa_subv_h(v8i16, v8i16);
+#define __msa_subv_h __builtin_msa_subv_h
+extern v4i32 __builtin_msa_subv_w(v4i32, v4i32);
+#define __msa_subv_w __builtin_msa_subv_w
+extern v2i64 __builtin_msa_subv_d(v2i64, v2i64);
+#define __msa_subv_d __builtin_msa_subv_d
+extern v16i8 __builtin_msa_subvi_b(v16i8, unsigned char);
+#define __msa_subvi_b __builtin_msa_subvi_b
+extern v8i16 __builtin_msa_subvi_h(v8i16, unsigned char);
+#define __msa_subvi_h __builtin_msa_subvi_h
+extern v4i32 __builtin_msa_subvi_w(v4i32, unsigned char);
+#define __msa_subvi_w __builtin_msa_subvi_w
+extern v2i64 __builtin_msa_subvi_d(v2i64, unsigned char);
+#define __msa_subvi_d __builtin_msa_subvi_d
+extern v16i8 __builtin_msa_max_s_b(v16i8, v16i8);
+#define __msa_max_s_b __builtin_msa_max_s_b
+extern v8i16 __builtin_msa_max_s_h(v8i16, v8i16);
+#define __msa_max_s_h __builtin_msa_max_s_h
+extern v4i32 __builtin_msa_max_s_w(v4i32, v4i32);
+#define __msa_max_s_w __builtin_msa_max_s_w
+extern v2i64 __builtin_msa_max_s_d(v2i64, v2i64);
+#define __msa_max_s_d __builtin_msa_max_s_d
+extern v16i8 __builtin_msa_maxi_s_b(v16i8, char);
+#define __msa_maxi_s_b __builtin_msa_maxi_s_b
+extern v8i16 __builtin_msa_maxi_s_h(v8i16, char);
+#define __msa_maxi_s_h __builtin_msa_maxi_s_h
+extern v4i32 __builtin_msa_maxi_s_w(v4i32, char);
+#define __msa_maxi_s_w __builtin_msa_maxi_s_w
+extern v2i64 __builtin_msa_maxi_s_d(v2i64, char);
+#define __msa_maxi_s_d __builtin_msa_maxi_s_d
+extern v16u8 __builtin_msa_max_u_b(v16u8, v16u8);
+#define __msa_max_u_b __builtin_msa_max_u_b
+extern v8u16 __builtin_msa_max_u_h(v8u16, v8u16);
+#define __msa_max_u_h __builtin_msa_max_u_h
+extern v4u32 __builtin_msa_max_u_w(v4u32, v4u32);
+#define __msa_max_u_w __builtin_msa_max_u_w
+extern v2u64 __builtin_msa_max_u_d(v2u64, v2u64);
+#define __msa_max_u_d __builtin_msa_max_u_d
+extern v16u8 __builtin_msa_maxi_u_b(v16u8, unsigned char);
+#define __msa_maxi_u_b __builtin_msa_maxi_u_b
+extern v8u16 __builtin_msa_maxi_u_h(v8u16, unsigned char);
+#define __msa_maxi_u_h __builtin_msa_maxi_u_h
+extern v4u32 __builtin_msa_maxi_u_w(v4u32, unsigned char);
+#define __msa_maxi_u_w __builtin_msa_maxi_u_w
+extern v2u64 __builtin_msa_maxi_u_d(v2u64, unsigned char);
+#define __msa_maxi_u_d __builtin_msa_maxi_u_d
+extern v16i8 __builtin_msa_min_s_b(v16i8, v16i8);
+#define __msa_min_s_b __builtin_msa_min_s_b
+extern v8i16 __builtin_msa_min_s_h(v8i16, v8i16);
+#define __msa_min_s_h __builtin_msa_min_s_h
+extern v4i32 __builtin_msa_min_s_w(v4i32, v4i32);
+#define __msa_min_s_w __builtin_msa_min_s_w
+extern v2i64 __builtin_msa_min_s_d(v2i64, v2i64);
+#define __msa_min_s_d __builtin_msa_min_s_d
+extern v16i8 __builtin_msa_mini_s_b(v16i8, char);
+#define __msa_mini_s_b __builtin_msa_mini_s_b
+extern v8i16 __builtin_msa_mini_s_h(v8i16, char);
+#define __msa_mini_s_h __builtin_msa_mini_s_h
+extern v4i32 __builtin_msa_mini_s_w(v4i32, char);
+#define __msa_mini_s_w __builtin_msa_mini_s_w
+extern v2i64 __builtin_msa_mini_s_d(v2i64, char);
+#define __msa_mini_s_d __builtin_msa_mini_s_d
+extern v16u8 __builtin_msa_min_u_b(v16u8, v16u8);
+#define __msa_min_u_b __builtin_msa_min_u_b
+extern v8u16 __builtin_msa_min_u_h(v8u16, v8u16);
+#define __msa_min_u_h __builtin_msa_min_u_h
+extern v4u32 __builtin_msa_min_u_w(v4u32, v4u32);
+#define __msa_min_u_w __builtin_msa_min_u_w
+extern v2u64 __builtin_msa_min_u_d(v2u64, v2u64);
+#define __msa_min_u_d __builtin_msa_min_u_d
+extern v16u8 __builtin_msa_mini_u_b(v16u8, unsigned char);
+#define __msa_mini_u_b __builtin_msa_mini_u_b
+extern v8u16 __builtin_msa_mini_u_h(v8u16, unsigned char);
+#define __msa_mini_u_h __builtin_msa_mini_u_h
+extern v4u32 __builtin_msa_mini_u_w(v4u32, unsigned char);
+#define __msa_mini_u_w __builtin_msa_mini_u_w
+extern v2u64 __builtin_msa_mini_u_d(v2u64, unsigned char);
+#define __msa_mini_u_d __builtin_msa_mini_u_d
+extern v16i8 __builtin_msa_max_a_b(v16i8, v16i8);
+#define __msa_max_a_b __builtin_msa_max_a_b
+extern v8i16 __builtin_msa_max_a_h(v8i16, v8i16);
+#define __msa_max_a_h __builtin_msa_max_a_h
+extern v4i32 __builtin_msa_max_a_w(v4i32, v4i32);
+#define __msa_max_a_w __builtin_msa_max_a_w
+extern v2i64 __builtin_msa_max_a_d(v2i64, v2i64);
+#define __msa_max_a_d __builtin_msa_max_a_d
+extern v16i8 __builtin_msa_min_a_b(v16i8, v16i8);
+#define __msa_min_a_b __builtin_msa_min_a_b
+extern v8i16 __builtin_msa_min_a_h(v8i16, v8i16);
+#define __msa_min_a_h __builtin_msa_min_a_h
+extern v4i32 __builtin_msa_min_a_w(v4i32, v4i32);
+#define __msa_min_a_w __builtin_msa_min_a_w
+extern v2i64 __builtin_msa_min_a_d(v2i64, v2i64);
+#define __msa_min_a_d __builtin_msa_min_a_d
+extern v16i8 __builtin_msa_ceq_b(v16i8, v16i8);
+#define __msa_ceq_b __builtin_msa_ceq_b
+extern v8i16 __builtin_msa_ceq_h(v8i16, v8i16);
+#define __msa_ceq_h __builtin_msa_ceq_h
+extern v4i32 __builtin_msa_ceq_w(v4i32, v4i32);
+#define __msa_ceq_w __builtin_msa_ceq_w
+extern v2i64 __builtin_msa_ceq_d(v2i64, v2i64);
+#define __msa_ceq_d __builtin_msa_ceq_d
+extern v16i8 __builtin_msa_ceqi_b(v16i8, char);
+#define __msa_ceqi_b __builtin_msa_ceqi_b
+extern v8i16 __builtin_msa_ceqi_h(v8i16, char);
+#define __msa_ceqi_h __builtin_msa_ceqi_h
+extern v4i32 __builtin_msa_ceqi_w(v4i32, char);
+#define __msa_ceqi_w __builtin_msa_ceqi_w
+extern v2i64 __builtin_msa_ceqi_d(v2i64, char);
+#define __msa_ceqi_d __builtin_msa_ceqi_d
+extern v16i8 __builtin_msa_clt_s_b(v16i8, v16i8);
+#define __msa_clt_s_b __builtin_msa_clt_s_b
+extern v8i16 __builtin_msa_clt_s_h(v8i16, v8i16);
+#define __msa_clt_s_h __builtin_msa_clt_s_h
+extern v4i32 __builtin_msa_clt_s_w(v4i32, v4i32);
+#define __msa_clt_s_w __builtin_msa_clt_s_w
+extern v2i64 __builtin_msa_clt_s_d(v2i64, v2i64);
+#define __msa_clt_s_d __builtin_msa_clt_s_d
+extern v16i8 __builtin_msa_clti_s_b(v16i8, char);
+#define __msa_clti_s_b __builtin_msa_clti_s_b
+extern v8i16 __builtin_msa_clti_s_h(v8i16, char);
+#define __msa_clti_s_h __builtin_msa_clti_s_h
+extern v4i32 __builtin_msa_clti_s_w(v4i32, char);
+#define __msa_clti_s_w __builtin_msa_clti_s_w
+extern v2i64 __builtin_msa_clti_s_d(v2i64, char);
+#define __msa_clti_s_d __builtin_msa_clti_s_d
+extern v16i8 __builtin_msa_clt_u_b(v16u8, v16u8);
+#define __msa_clt_u_b __builtin_msa_clt_u_b
+extern v8i16 __builtin_msa_clt_u_h(v8u16, v8u16);
+#define __msa_clt_u_h __builtin_msa_clt_u_h
+extern v4i32 __builtin_msa_clt_u_w(v4u32, v4u32);
+#define __msa_clt_u_w __builtin_msa_clt_u_w
+extern v2i64 __builtin_msa_clt_u_d(v2u64, v2u64);
+#define __msa_clt_u_d __builtin_msa_clt_u_d
+extern v16i8 __builtin_msa_clti_u_b(v16u8, unsigned char);
+#define __msa_clti_u_b __builtin_msa_clti_u_b
+extern v8i16 __builtin_msa_clti_u_h(v8u16, unsigned char);
+#define __msa_clti_u_h __builtin_msa_clti_u_h
+extern v4i32 __builtin_msa_clti_u_w(v4u32, unsigned char);
+#define __msa_clti_u_w __builtin_msa_clti_u_w
+extern v2i64 __builtin_msa_clti_u_d(v2u64, unsigned char);
+#define __msa_clti_u_d __builtin_msa_clti_u_d
+extern v16i8 __builtin_msa_cle_s_b(v16i8, v16i8);
+#define __msa_cle_s_b __builtin_msa_cle_s_b
+extern v8i16 __builtin_msa_cle_s_h(v8i16, v8i16);
+#define __msa_cle_s_h __builtin_msa_cle_s_h
+extern v4i32 __builtin_msa_cle_s_w(v4i32, v4i32);
+#define __msa_cle_s_w __builtin_msa_cle_s_w
+extern v2i64 __builtin_msa_cle_s_d(v2i64, v2i64);
+#define __msa_cle_s_d __builtin_msa_cle_s_d
+extern v16i8 __builtin_msa_clei_s_b(v16i8, char);
+#define __msa_clei_s_b __builtin_msa_clei_s_b
+extern v8i16 __builtin_msa_clei_s_h(v8i16, char);
+#define __msa_clei_s_h __builtin_msa_clei_s_h
+extern v4i32 __builtin_msa_clei_s_w(v4i32, char);
+#define __msa_clei_s_w __builtin_msa_clei_s_w
+extern v2i64 __builtin_msa_clei_s_d(v2i64, char);
+#define __msa_clei_s_d __builtin_msa_clei_s_d
+extern v16i8 __builtin_msa_cle_u_b(v16u8, v16u8);
+#define __msa_cle_u_b __builtin_msa_cle_u_b
+extern v8i16 __builtin_msa_cle_u_h(v8u16, v8u16);
+#define __msa_cle_u_h __builtin_msa_cle_u_h
+extern v4i32 __builtin_msa_cle_u_w(v4u32, v4u32);
+#define __msa_cle_u_w __builtin_msa_cle_u_w
+extern v2i64 __builtin_msa_cle_u_d(v2u64, v2u64);
+#define __msa_cle_u_d __builtin_msa_cle_u_d
+extern v16i8 __builtin_msa_clei_u_b(v16u8, unsigned char);
+#define __msa_clei_u_b __builtin_msa_clei_u_b
+extern v8i16 __builtin_msa_clei_u_h(v8u16, unsigned char);
+#define __msa_clei_u_h __builtin_msa_clei_u_h
+extern v4i32 __builtin_msa_clei_u_w(v4u32, unsigned char);
+#define __msa_clei_u_w __builtin_msa_clei_u_w
+extern v2i64 __builtin_msa_clei_u_d(v2u64, unsigned char);
+#define __msa_clei_u_d __builtin_msa_clei_u_d
+extern v16i8 __builtin_msa_ld_b(void *, int);
+#define __msa_ld_b __builtin_msa_ld_b
+extern v8i16 __builtin_msa_ld_h(void *, int);
+#define __msa_ld_h __builtin_msa_ld_h
+extern v4i32 __builtin_msa_ld_w(void *, int);
+#define __msa_ld_w __builtin_msa_ld_w
+extern v2i64 __builtin_msa_ld_d(void *, int);
+#define __msa_ld_d __builtin_msa_ld_d
+extern v16i8 __builtin_msa_sat_s_b(v16i8, unsigned char);
+#define __msa_sat_s_b __builtin_msa_sat_s_b
+extern v8i16 __builtin_msa_sat_s_h(v8i16, unsigned char);
+#define __msa_sat_s_h __builtin_msa_sat_s_h
+extern v4i32 __builtin_msa_sat_s_w(v4i32, unsigned char);
+#define __msa_sat_s_w __builtin_msa_sat_s_w
+extern v2i64 __builtin_msa_sat_s_d(v2i64, unsigned char);
+#define __msa_sat_s_d __builtin_msa_sat_s_d
+extern v16u8 __builtin_msa_sat_u_b(v16u8, unsigned char);
+#define __msa_sat_u_b __builtin_msa_sat_u_b
+extern v8u16 __builtin_msa_sat_u_h(v8u16, unsigned char);
+#define __msa_sat_u_h __builtin_msa_sat_u_h
+extern v4u32 __builtin_msa_sat_u_w(v4u32, unsigned char);
+#define __msa_sat_u_w __builtin_msa_sat_u_w
+extern v2u64 __builtin_msa_sat_u_d(v2u64, unsigned char);
+#define __msa_sat_u_d __builtin_msa_sat_u_d
+extern v16i8 __builtin_msa_add_a_b(v16i8, v16i8);
+#define __msa_add_a_b __builtin_msa_add_a_b
+extern v8i16 __builtin_msa_add_a_h(v8i16, v8i16);
+#define __msa_add_a_h __builtin_msa_add_a_h
+extern v4i32 __builtin_msa_add_a_w(v4i32, v4i32);
+#define __msa_add_a_w __builtin_msa_add_a_w
+extern v2i64 __builtin_msa_add_a_d(v2i64, v2i64);
+#define __msa_add_a_d __builtin_msa_add_a_d
+extern v16i8 __builtin_msa_adds_a_b(v16i8, v16i8);
+#define __msa_adds_a_b __builtin_msa_adds_a_b
+extern v8i16 __builtin_msa_adds_a_h(v8i16, v8i16);
+#define __msa_adds_a_h __builtin_msa_adds_a_h
+extern v4i32 __builtin_msa_adds_a_w(v4i32, v4i32);
+#define __msa_adds_a_w __builtin_msa_adds_a_w
+extern v2i64 __builtin_msa_adds_a_d(v2i64, v2i64);
+#define __msa_adds_a_d __builtin_msa_adds_a_d
+extern v16i8 __builtin_msa_adds_s_b(v16i8, v16i8);
+#define __msa_adds_s_b __builtin_msa_adds_s_b
+extern v8i16 __builtin_msa_adds_s_h(v8i16, v8i16);
+#define __msa_adds_s_h __builtin_msa_adds_s_h
+extern v4i32 __builtin_msa_adds_s_w(v4i32, v4i32);
+#define __msa_adds_s_w __builtin_msa_adds_s_w
+extern v2i64 __builtin_msa_adds_s_d(v2i64, v2i64);
+#define __msa_adds_s_d __builtin_msa_adds_s_d
+extern v16u8 __builtin_msa_adds_u_b(v16u8, v16u8);
+#define __msa_adds_u_b __builtin_msa_adds_u_b
+extern v8u16 __builtin_msa_adds_u_h(v8u16, v8u16);
+#define __msa_adds_u_h __builtin_msa_adds_u_h
+extern v4u32 __builtin_msa_adds_u_w(v4u32, v4u32);
+#define __msa_adds_u_w __builtin_msa_adds_u_w
+extern v2u64 __builtin_msa_adds_u_d(v2u64, v2u64);
+#define __msa_adds_u_d __builtin_msa_adds_u_d
+extern v16i8 __builtin_msa_ave_s_b(v16i8, v16i8);
+#define __msa_ave_s_b __builtin_msa_ave_s_b
+extern v8i16 __builtin_msa_ave_s_h(v8i16, v8i16);
+#define __msa_ave_s_h __builtin_msa_ave_s_h
+extern v4i32 __builtin_msa_ave_s_w(v4i32, v4i32);
+#define __msa_ave_s_w __builtin_msa_ave_s_w
+extern v2i64 __builtin_msa_ave_s_d(v2i64, v2i64);
+#define __msa_ave_s_d __builtin_msa_ave_s_d
+extern v16u8 __builtin_msa_ave_u_b(v16u8, v16u8);
+#define __msa_ave_u_b __builtin_msa_ave_u_b
+extern v8u16 __builtin_msa_ave_u_h(v8u16, v8u16);
+#define __msa_ave_u_h __builtin_msa_ave_u_h
+extern v4u32 __builtin_msa_ave_u_w(v4u32, v4u32);
+#define __msa_ave_u_w __builtin_msa_ave_u_w
+extern v2u64 __builtin_msa_ave_u_d(v2u64, v2u64);
+#define __msa_ave_u_d __builtin_msa_ave_u_d
+extern v16i8 __builtin_msa_aver_s_b(v16i8, v16i8);
+#define __msa_aver_s_b __builtin_msa_aver_s_b
+extern v8i16 __builtin_msa_aver_s_h(v8i16, v8i16);
+#define __msa_aver_s_h __builtin_msa_aver_s_h
+extern v4i32 __builtin_msa_aver_s_w(v4i32, v4i32);
+#define __msa_aver_s_w __builtin_msa_aver_s_w
+extern v2i64 __builtin_msa_aver_s_d(v2i64, v2i64);
+#define __msa_aver_s_d __builtin_msa_aver_s_d
+extern v16u8 __builtin_msa_aver_u_b(v16u8, v16u8);
+#define __msa_aver_u_b __builtin_msa_aver_u_b
+extern v8u16 __builtin_msa_aver_u_h(v8u16, v8u16);
+#define __msa_aver_u_h __builtin_msa_aver_u_h
+extern v4u32 __builtin_msa_aver_u_w(v4u32, v4u32);
+#define __msa_aver_u_w __builtin_msa_aver_u_w
+extern v2u64 __builtin_msa_aver_u_d(v2u64, v2u64);
+#define __msa_aver_u_d __builtin_msa_aver_u_d
+extern v16i8 __builtin_msa_subs_s_b(v16i8, v16i8);
+#define __msa_subs_s_b __builtin_msa_subs_s_b
+extern v8i16 __builtin_msa_subs_s_h(v8i16, v8i16);
+#define __msa_subs_s_h __builtin_msa_subs_s_h
+extern v4i32 __builtin_msa_subs_s_w(v4i32, v4i32);
+#define __msa_subs_s_w __builtin_msa_subs_s_w
+extern v2i64 __builtin_msa_subs_s_d(v2i64, v2i64);
+#define __msa_subs_s_d __builtin_msa_subs_s_d
+extern v16u8 __builtin_msa_subs_u_b(v16u8, v16u8);
+#define __msa_subs_u_b __builtin_msa_subs_u_b
+extern v8u16 __builtin_msa_subs_u_h(v8u16, v8u16);
+#define __msa_subs_u_h __builtin_msa_subs_u_h
+extern v4u32 __builtin_msa_subs_u_w(v4u32, v4u32);
+#define __msa_subs_u_w __builtin_msa_subs_u_w
+extern v2u64 __builtin_msa_subs_u_d(v2u64, v2u64);
+#define __msa_subs_u_d __builtin_msa_subs_u_d
+extern v16i8 __builtin_msa_subsuu_s_b(v16u8, v16u8);
+#define __msa_subsuu_s_b __builtin_msa_subsuu_s_b
+extern v8i16 __builtin_msa_subsuu_s_h(v8u16, v8u16);
+#define __msa_subsuu_s_h __builtin_msa_subsuu_s_h
+extern v4i32 __builtin_msa_subsuu_s_w(v4u32, v4u32);
+#define __msa_subsuu_s_w __builtin_msa_subsuu_s_w
+extern v2i64 __builtin_msa_subsuu_s_d(v2u64, v2u64);
+#define __msa_subsuu_s_d __builtin_msa_subsuu_s_d
+extern v16u8 __builtin_msa_subsus_u_b(v16u8, v16i8);
+#define __msa_subsus_u_b __builtin_msa_subsus_u_b
+extern v8u16 __builtin_msa_subsus_u_h(v8u16, v8i16);
+#define __msa_subsus_u_h __builtin_msa_subsus_u_h
+extern v4u32 __builtin_msa_subsus_u_w(v4u32, v4i32);
+#define __msa_subsus_u_w __builtin_msa_subsus_u_w
+extern v2u64 __builtin_msa_subsus_u_d(v2u64, v2i64);
+#define __msa_subsus_u_d __builtin_msa_subsus_u_d
+extern v16i8 __builtin_msa_asub_s_b(v16i8, v16i8);
+#define __msa_asub_s_b __builtin_msa_asub_s_b
+extern v8i16 __builtin_msa_asub_s_h(v8i16, v8i16);
+#define __msa_asub_s_h __builtin_msa_asub_s_h
+extern v4i32 __builtin_msa_asub_s_w(v4i32, v4i32);
+#define __msa_asub_s_w __builtin_msa_asub_s_w
+extern v2i64 __builtin_msa_asub_s_d(v2i64, v2i64);
+#define __msa_asub_s_d __builtin_msa_asub_s_d
+extern v16u8 __builtin_msa_asub_u_b(v16u8, v16u8);
+#define __msa_asub_u_b __builtin_msa_asub_u_b
+extern v8u16 __builtin_msa_asub_u_h(v8u16, v8u16);
+#define __msa_asub_u_h __builtin_msa_asub_u_h
+extern v4u32 __builtin_msa_asub_u_w(v4u32, v4u32);
+#define __msa_asub_u_w __builtin_msa_asub_u_w
+extern v2u64 __builtin_msa_asub_u_d(v2u64, v2u64);
+#define __msa_asub_u_d __builtin_msa_asub_u_d
+extern v16i8 __builtin_msa_mulv_b(v16i8, v16i8);
+#define __msa_mulv_b __builtin_msa_mulv_b
+extern v8i16 __builtin_msa_mulv_h(v8i16, v8i16);
+#define __msa_mulv_h __builtin_msa_mulv_h
+extern v4i32 __builtin_msa_mulv_w(v4i32, v4i32);
+#define __msa_mulv_w __builtin_msa_mulv_w
+extern v2i64 __builtin_msa_mulv_d(v2i64, v2i64);
+#define __msa_mulv_d __builtin_msa_mulv_d
+extern v16i8 __builtin_msa_maddv_b(v16i8, v16i8, v16i8);
+#define __msa_maddv_b __builtin_msa_maddv_b
+extern v8i16 __builtin_msa_maddv_h(v8i16, v8i16, v8i16);
+#define __msa_maddv_h __builtin_msa_maddv_h
+extern v4i32 __builtin_msa_maddv_w(v4i32, v4i32, v4i32);
+#define __msa_maddv_w __builtin_msa_maddv_w
+extern v2i64 __builtin_msa_maddv_d(v2i64, v2i64, v2i64);
+#define __msa_maddv_d __builtin_msa_maddv_d
+extern v16i8 __builtin_msa_msubv_b(v16i8, v16i8, v16i8);
+#define __msa_msubv_b __builtin_msa_msubv_b
+extern v8i16 __builtin_msa_msubv_h(v8i16, v8i16, v8i16);
+#define __msa_msubv_h __builtin_msa_msubv_h
+extern v4i32 __builtin_msa_msubv_w(v4i32, v4i32, v4i32);
+#define __msa_msubv_w __builtin_msa_msubv_w
+extern v2i64 __builtin_msa_msubv_d(v2i64, v2i64, v2i64);
+#define __msa_msubv_d __builtin_msa_msubv_d
+extern v16i8 __builtin_msa_div_s_b(v16i8, v16i8);
+#define __msa_div_s_b __builtin_msa_div_s_b
+extern v8i16 __builtin_msa_div_s_h(v8i16, v8i16);
+#define __msa_div_s_h __builtin_msa_div_s_h
+extern v4i32 __builtin_msa_div_s_w(v4i32, v4i32);
+#define __msa_div_s_w __builtin_msa_div_s_w
+extern v2i64 __builtin_msa_div_s_d(v2i64, v2i64);
+#define __msa_div_s_d __builtin_msa_div_s_d
+extern v16u8 __builtin_msa_div_u_b(v16u8, v16u8);
+#define __msa_div_u_b __builtin_msa_div_u_b
+extern v8u16 __builtin_msa_div_u_h(v8u16, v8u16);
+#define __msa_div_u_h __builtin_msa_div_u_h
+extern v4u32 __builtin_msa_div_u_w(v4u32, v4u32);
+#define __msa_div_u_w __builtin_msa_div_u_w
+extern v2u64 __builtin_msa_div_u_d(v2u64, v2u64);
+#define __msa_div_u_d __builtin_msa_div_u_d
+extern v8i16 __builtin_msa_hadd_s_h(v16i8, v16i8);
+#define __msa_hadd_s_h __builtin_msa_hadd_s_h
+extern v4i32 __builtin_msa_hadd_s_w(v8i16, v8i16);
+#define __msa_hadd_s_w __builtin_msa_hadd_s_w
+extern v2i64 __builtin_msa_hadd_s_d(v4i32, v4i32);
+#define __msa_hadd_s_d __builtin_msa_hadd_s_d
+extern v8u16 __builtin_msa_hadd_u_h(v16u8, v16u8);
+#define __msa_hadd_u_h __builtin_msa_hadd_u_h
+extern v4u32 __builtin_msa_hadd_u_w(v8u16, v8u16);
+#define __msa_hadd_u_w __builtin_msa_hadd_u_w
+extern v2u64 __builtin_msa_hadd_u_d(v4u32, v4u32);
+#define __msa_hadd_u_d __builtin_msa_hadd_u_d
+extern v8i16 __builtin_msa_hsub_s_h(v16i8, v16i8);
+#define __msa_hsub_s_h __builtin_msa_hsub_s_h
+extern v4i32 __builtin_msa_hsub_s_w(v8i16, v8i16);
+#define __msa_hsub_s_w __builtin_msa_hsub_s_w
+extern v2i64 __builtin_msa_hsub_s_d(v4i32, v4i32);
+#define __msa_hsub_s_d __builtin_msa_hsub_s_d
+extern v8i16 __builtin_msa_hsub_u_h(v16u8, v16u8);
+#define __msa_hsub_u_h __builtin_msa_hsub_u_h
+extern v4i32 __builtin_msa_hsub_u_w(v8u16, v8u16);
+#define __msa_hsub_u_w __builtin_msa_hsub_u_w
+extern v2i64 __builtin_msa_hsub_u_d(v4u32, v4u32);
+#define __msa_hsub_u_d __builtin_msa_hsub_u_d
+extern v16i8 __builtin_msa_mod_s_b(v16i8, v16i8);
+#define __msa_mod_s_b __builtin_msa_mod_s_b
+extern v8i16 __builtin_msa_mod_s_h(v8i16, v8i16);
+#define __msa_mod_s_h __builtin_msa_mod_s_h
+extern v4i32 __builtin_msa_mod_s_w(v4i32, v4i32);
+#define __msa_mod_s_w __builtin_msa_mod_s_w
+extern v2i64 __builtin_msa_mod_s_d(v2i64, v2i64);
+#define __msa_mod_s_d __builtin_msa_mod_s_d
+extern v16u8 __builtin_msa_mod_u_b(v16u8, v16u8);
+#define __msa_mod_u_b __builtin_msa_mod_u_b
+extern v8u16 __builtin_msa_mod_u_h(v8u16, v8u16);
+#define __msa_mod_u_h __builtin_msa_mod_u_h
+extern v4u32 __builtin_msa_mod_u_w(v4u32, v4u32);
+#define __msa_mod_u_w __builtin_msa_mod_u_w
+extern v2u64 __builtin_msa_mod_u_d(v2u64, v2u64);
+#define __msa_mod_u_d __builtin_msa_mod_u_d
+extern v8i16 __builtin_msa_dotp_s_h(v16i8, v16i8);
+#define __msa_dotp_s_h __builtin_msa_dotp_s_h
+extern v4i32 __builtin_msa_dotp_s_w(v8i16, v8i16);
+#define __msa_dotp_s_w __builtin_msa_dotp_s_w
+extern v2i64 __builtin_msa_dotp_s_d(v4i32, v4i32);
+#define __msa_dotp_s_d __builtin_msa_dotp_s_d
+extern v8u16 __builtin_msa_dotp_u_h(v16u8, v16u8);
+#define __msa_dotp_u_h __builtin_msa_dotp_u_h
+extern v4u32 __builtin_msa_dotp_u_w(v8u16, v8u16);
+#define __msa_dotp_u_w __builtin_msa_dotp_u_w
+extern v2u64 __builtin_msa_dotp_u_d(v4u32, v4u32);
+#define __msa_dotp_u_d __builtin_msa_dotp_u_d
+extern v8i16 __builtin_msa_dpadd_s_h(v8i16, v16i8, v16i8);
+#define __msa_dpadd_s_h __builtin_msa_dpadd_s_h
+extern v4i32 __builtin_msa_dpadd_s_w(v4i32, v8i16, v8i16);
+#define __msa_dpadd_s_w __builtin_msa_dpadd_s_w
+extern v2i64 __builtin_msa_dpadd_s_d(v2i64, v4i32, v4i32);
+#define __msa_dpadd_s_d __builtin_msa_dpadd_s_d
+extern v8u16 __builtin_msa_dpadd_u_h(v8u16, v16u8, v16u8);
+#define __msa_dpadd_u_h __builtin_msa_dpadd_u_h
+extern v4u32 __builtin_msa_dpadd_u_w(v4u32, v8u16, v8u16);
+#define __msa_dpadd_u_w __builtin_msa_dpadd_u_w
+extern v2u64 __builtin_msa_dpadd_u_d(v2u64, v4u32, v4u32);
+#define __msa_dpadd_u_d __builtin_msa_dpadd_u_d
+extern v8i16 __builtin_msa_dpsub_s_h(v8i16, v16i8, v16i8);
+#define __msa_dpsub_s_h __builtin_msa_dpsub_s_h
+extern v4i32 __builtin_msa_dpsub_s_w(v4i32, v8i16, v8i16);
+#define __msa_dpsub_s_w __builtin_msa_dpsub_s_w
+extern v2i64 __builtin_msa_dpsub_s_d(v2i64, v4i32, v4i32);
+#define __msa_dpsub_s_d __builtin_msa_dpsub_s_d
+extern v8i16 __builtin_msa_dpsub_u_h(v8i16, v16u8, v16u8);
+#define __msa_dpsub_u_h __builtin_msa_dpsub_u_h
+extern v4i32 __builtin_msa_dpsub_u_w(v4i32, v8u16, v8u16);
+#define __msa_dpsub_u_w __builtin_msa_dpsub_u_w
+extern v2i64 __builtin_msa_dpsub_u_d(v2i64, v4u32, v4u32);
+#define __msa_dpsub_u_d __builtin_msa_dpsub_u_d
+extern v16i8 __builtin_msa_sld_b(v16i8, v16i8, int);
+#define __msa_sld_b __builtin_msa_sld_b
+extern v8i16 __builtin_msa_sld_h(v8i16, v8i16, int);
+#define __msa_sld_h __builtin_msa_sld_h
+extern v4i32 __builtin_msa_sld_w(v4i32, v4i32, int);
+#define __msa_sld_w __builtin_msa_sld_w
+extern v2i64 __builtin_msa_sld_d(v2i64, v2i64, int);
+#define __msa_sld_d __builtin_msa_sld_d
+extern v16i8 __builtin_msa_sldi_b(v16i8, v16i8, unsigned char);
+#define __msa_sldi_b __builtin_msa_sldi_b
+extern v8i16 __builtin_msa_sldi_h(v8i16, v8i16, unsigned char);
+#define __msa_sldi_h __builtin_msa_sldi_h
+extern v4i32 __builtin_msa_sldi_w(v4i32, v4i32, unsigned char);
+#define __msa_sldi_w __builtin_msa_sldi_w
+extern v2i64 __builtin_msa_sldi_d(v2i64, v2i64, unsigned char);
+#define __msa_sldi_d __builtin_msa_sldi_d
+extern v16i8 __builtin_msa_splat_b(v16i8, int);
+#define __msa_splat_b __builtin_msa_splat_b
+extern v8i16 __builtin_msa_splat_h(v8i16, int);
+#define __msa_splat_h __builtin_msa_splat_h
+extern v4i32 __builtin_msa_splat_w(v4i32, int);
+#define __msa_splat_w __builtin_msa_splat_w
+extern v2i64 __builtin_msa_splat_d(v2i64, int);
+#define __msa_splat_d __builtin_msa_splat_d
+extern v16i8 __builtin_msa_splati_b(v16i8, unsigned char);
+#define __msa_splati_b __builtin_msa_splati_b
+extern v8i16 __builtin_msa_splati_h(v8i16, unsigned char);
+#define __msa_splati_h __builtin_msa_splati_h
+extern v4i32 __builtin_msa_splati_w(v4i32, unsigned char);
+#define __msa_splati_w __builtin_msa_splati_w
+extern v2i64 __builtin_msa_splati_d(v2i64, unsigned char);
+#define __msa_splati_d __builtin_msa_splati_d
+extern v16i8 __builtin_msa_pckev_b(v16i8, v16i8);
+#define __msa_pckev_b __builtin_msa_pckev_b
+extern v8i16 __builtin_msa_pckev_h(v8i16, v8i16);
+#define __msa_pckev_h __builtin_msa_pckev_h
+extern v4i32 __builtin_msa_pckev_w(v4i32, v4i32);
+#define __msa_pckev_w __builtin_msa_pckev_w
+extern v2i64 __builtin_msa_pckev_d(v2i64, v2i64);
+#define __msa_pckev_d __builtin_msa_pckev_d
+extern v16i8 __builtin_msa_pckod_b(v16i8, v16i8);
+#define __msa_pckod_b __builtin_msa_pckod_b
+extern v8i16 __builtin_msa_pckod_h(v8i16, v8i16);
+#define __msa_pckod_h __builtin_msa_pckod_h
+extern v4i32 __builtin_msa_pckod_w(v4i32, v4i32);
+#define __msa_pckod_w __builtin_msa_pckod_w
+extern v2i64 __builtin_msa_pckod_d(v2i64, v2i64);
+#define __msa_pckod_d __builtin_msa_pckod_d
+extern v16i8 __builtin_msa_ilvl_b(v16i8, v16i8);
+#define __msa_ilvl_b __builtin_msa_ilvl_b
+extern v8i16 __builtin_msa_ilvl_h(v8i16, v8i16);
+#define __msa_ilvl_h __builtin_msa_ilvl_h
+extern v4i32 __builtin_msa_ilvl_w(v4i32, v4i32);
+#define __msa_ilvl_w __builtin_msa_ilvl_w
+extern v2i64 __builtin_msa_ilvl_d(v2i64, v2i64);
+#define __msa_ilvl_d __builtin_msa_ilvl_d
+extern v16i8 __builtin_msa_ilvr_b(v16i8, v16i8);
+#define __msa_ilvr_b __builtin_msa_ilvr_b
+extern v8i16 __builtin_msa_ilvr_h(v8i16, v8i16);
+#define __msa_ilvr_h __builtin_msa_ilvr_h
+extern v4i32 __builtin_msa_ilvr_w(v4i32, v4i32);
+#define __msa_ilvr_w __builtin_msa_ilvr_w
+extern v2i64 __builtin_msa_ilvr_d(v2i64, v2i64);
+#define __msa_ilvr_d __builtin_msa_ilvr_d
+extern v16i8 __builtin_msa_ilvev_b(v16i8, v16i8);
+#define __msa_ilvev_b __builtin_msa_ilvev_b
+extern v8i16 __builtin_msa_ilvev_h(v8i16, v8i16);
+#define __msa_ilvev_h __builtin_msa_ilvev_h
+extern v4i32 __builtin_msa_ilvev_w(v4i32, v4i32);
+#define __msa_ilvev_w __builtin_msa_ilvev_w
+extern v2i64 __builtin_msa_ilvev_d(v2i64, v2i64);
+#define __msa_ilvev_d __builtin_msa_ilvev_d
+extern v16i8 __builtin_msa_ilvod_b(v16i8, v16i8);
+#define __msa_ilvod_b __builtin_msa_ilvod_b
+extern v8i16 __builtin_msa_ilvod_h(v8i16, v8i16);
+#define __msa_ilvod_h __builtin_msa_ilvod_h
+extern v4i32 __builtin_msa_ilvod_w(v4i32, v4i32);
+#define __msa_ilvod_w __builtin_msa_ilvod_w
+extern v2i64 __builtin_msa_ilvod_d(v2i64, v2i64);
+#define __msa_ilvod_d __builtin_msa_ilvod_d
+extern v16i8 __builtin_msa_vshf_b(v16i8, v16i8, v16i8);
+#define __msa_vshf_b __builtin_msa_vshf_b
+extern v8i16 __builtin_msa_vshf_h(v8i16, v8i16, v8i16);
+#define __msa_vshf_h __builtin_msa_vshf_h
+extern v4i32 __builtin_msa_vshf_w(v4i32, v4i32, v4i32);
+#define __msa_vshf_w __builtin_msa_vshf_w
+extern v2i64 __builtin_msa_vshf_d(v2i64, v2i64, v2i64);
+#define __msa_vshf_d __builtin_msa_vshf_d
+extern v16u8 __builtin_msa_and_v(v16u8, v16u8);
+#define __msa_and_v __builtin_msa_and_v
+extern v16u8 __builtin_msa_andi_b(v16u8, unsigned char);
+#define __msa_andi_b __builtin_msa_andi_b
+extern v16u8 __builtin_msa_or_v(v16u8, v16u8);
+#define __msa_or_v __builtin_msa_or_v
+extern v16u8 __builtin_msa_ori_b(v16u8, unsigned char);
+#define __msa_ori_b __builtin_msa_ori_b
+extern v16u8 __builtin_msa_nor_v(v16u8, v16u8);
+#define __msa_nor_v __builtin_msa_nor_v
+extern v16u8 __builtin_msa_nori_b(v16u8, unsigned char);
+#define __msa_nori_b __builtin_msa_nori_b
+extern v16u8 __builtin_msa_xor_v(v16u8, v16u8);
+#define __msa_xor_v __builtin_msa_xor_v
+extern v16u8 __builtin_msa_xori_b(v16u8, unsigned char);
+#define __msa_xori_b __builtin_msa_xori_b
+extern v16u8 __builtin_msa_bmnz_v(v16u8, v16u8, v16u8);
+#define __msa_bmnz_v __builtin_msa_bmnz_v
+extern v16u8 __builtin_msa_bmnzi_b(v16u8, v16u8, unsigned char);
+#define __msa_bmnzi_b __builtin_msa_bmnzi_b
+extern v16u8 __builtin_msa_bmz_v(v16u8, v16u8, v16u8);
+#define __msa_bmz_v __builtin_msa_bmz_v
+extern v16u8 __builtin_msa_bmzi_b(v16u8, v16u8, unsigned char);
+#define __msa_bmzi_b __builtin_msa_bmzi_b
+extern v16u8 __builtin_msa_bsel_v(v16u8, v16u8, v16u8);
+#define __msa_bsel_v __builtin_msa_bsel_v
+extern v16u8 __builtin_msa_bseli_b(v16u8, v16u8, unsigned char);
+#define __msa_bseli_b __builtin_msa_bseli_b
+extern v16i8 __builtin_msa_shf_b(v16i8, unsigned char);
+#define __msa_shf_b __builtin_msa_shf_b
+extern v8i16 __builtin_msa_shf_h(v8i16, unsigned char);
+#define __msa_shf_h __builtin_msa_shf_h
+extern v4i32 __builtin_msa_shf_w(v4i32, unsigned char);
+#define __msa_shf_w __builtin_msa_shf_w
+extern int __builtin_msa_bnz_v(v16u8);
+#define __msa_test_bnz_v __builtin_msa_bnz_v
+extern int __builtin_msa_bz_v(v16u8);
+#define __msa_test_bz_v __builtin_msa_bz_v
+extern v16i8 __builtin_msa_fill_b(int);
+#define __msa_fill_b __builtin_msa_fill_b
+extern v8i16 __builtin_msa_fill_h(int);
+#define __msa_fill_h __builtin_msa_fill_h
+extern v4i32 __builtin_msa_fill_w(int);
+#define __msa_fill_w __builtin_msa_fill_w
+extern v2i64 __builtin_msa_fill_d(long long);
+#define __msa_fill_d __builtin_msa_fill_d
+extern v16i8 __builtin_msa_pcnt_b(v16i8);
+#define __msa_pcnt_b __builtin_msa_pcnt_b
+extern v8i16 __builtin_msa_pcnt_h(v8i16);
+#define __msa_pcnt_h __builtin_msa_pcnt_h
+extern v4i32 __builtin_msa_pcnt_w(v4i32);
+#define __msa_pcnt_w __builtin_msa_pcnt_w
+extern v2i64 __builtin_msa_pcnt_d(v2i64);
+#define __msa_pcnt_d __builtin_msa_pcnt_d
+extern v16i8 __builtin_msa_nloc_b(v16i8);
+#define __msa_nloc_b __builtin_msa_nloc_b
+extern v8i16 __builtin_msa_nloc_h(v8i16);
+#define __msa_nloc_h __builtin_msa_nloc_h
+extern v4i32 __builtin_msa_nloc_w(v4i32);
+#define __msa_nloc_w __builtin_msa_nloc_w
+extern v2i64 __builtin_msa_nloc_d(v2i64);
+#define __msa_nloc_d __builtin_msa_nloc_d
+extern v16i8 __builtin_msa_nlzc_b(v16i8);
+#define __msa_nlzc_b __builtin_msa_nlzc_b
+extern v8i16 __builtin_msa_nlzc_h(v8i16);
+#define __msa_nlzc_h __builtin_msa_nlzc_h
+extern v4i32 __builtin_msa_nlzc_w(v4i32);
+#define __msa_nlzc_w __builtin_msa_nlzc_w
+extern v2i64 __builtin_msa_nlzc_d(v2i64);
+#define __msa_nlzc_d __builtin_msa_nlzc_d
+extern int __builtin_msa_copy_s_b(v16i8, unsigned char);
+#define __msa_copy_s_b __builtin_msa_copy_s_b
+extern int __builtin_msa_copy_s_h(v8i16, unsigned char);
+#define __msa_copy_s_h __builtin_msa_copy_s_h
+extern int __builtin_msa_copy_s_w(v4i32, unsigned char);
+#define __msa_copy_s_w __builtin_msa_copy_s_w
+extern long long __builtin_msa_copy_s_d(v2i64, unsigned char);
+#define __msa_copy_s_d __builtin_msa_copy_s_d
+extern int __builtin_msa_copy_u_b(v16i8, unsigned char);
+#define __msa_copy_u_b __builtin_msa_copy_u_b
+extern int __builtin_msa_copy_u_h(v8i16, unsigned char);
+#define __msa_copy_u_h __builtin_msa_copy_u_h
+extern int __builtin_msa_copy_u_w(v4i32, unsigned char);
+#define __msa_copy_u_w __builtin_msa_copy_u_w
+extern long long __builtin_msa_copy_u_d(v2i64, unsigned char);
+#define __msa_copy_u_d __builtin_msa_copy_u_d
+extern v16i8 __builtin_msa_insert_b(v16i8, unsigned char, int);
+#define __msa_insert_b __builtin_msa_insert_b
+extern v8i16 __builtin_msa_insert_h(v8i16, unsigned char, int);
+#define __msa_insert_h __builtin_msa_insert_h
+extern v4i32 __builtin_msa_insert_w(v4i32, unsigned char, int);
+#define __msa_insert_w __builtin_msa_insert_w
+extern v2i64 __builtin_msa_insert_d(v2i64, unsigned char, long long);
+#define __msa_insert_d __builtin_msa_insert_d
+extern v16i8 __builtin_msa_insve_b(v16i8, unsigned char, v16i8);
+#define __msa_insve_b __builtin_msa_insve_b
+extern v8i16 __builtin_msa_insve_h(v8i16, unsigned char, v8i16);
+#define __msa_insve_h __builtin_msa_insve_h
+extern v4i32 __builtin_msa_insve_w(v4i32, unsigned char, v4i32);
+#define __msa_insve_w __builtin_msa_insve_w
+extern v2i64 __builtin_msa_insve_d(v2i64, unsigned char, v2i64);
+#define __msa_insve_d __builtin_msa_insve_d
+extern int __builtin_msa_bnz_b(v16u8);
+#define __msa_test_bnz_b __builtin_msa_bnz_b
+extern int __builtin_msa_bnz_h(v8u16);
+#define __msa_test_bnz_h __builtin_msa_bnz_h
+extern int __builtin_msa_bnz_w(v4u32);
+#define __msa_test_bnz_w __builtin_msa_bnz_w
+extern int __builtin_msa_bnz_d(v2u64);
+#define __msa_test_bnz_d __builtin_msa_bnz_d
+extern int __builtin_msa_bz_b(v16u8);
+#define __msa_test_bz_b __builtin_msa_bz_b
+extern int __builtin_msa_bz_h(v8u16);
+#define __msa_test_bz_h __builtin_msa_bz_h
+extern int __builtin_msa_bz_w(v4u32);
+#define __msa_test_bz_w __builtin_msa_bz_w
+extern int __builtin_msa_bz_d(v2u64);
+#define __msa_test_bz_d __builtin_msa_bz_d
+extern v16i8 __builtin_msa_ldi_b(short);
+#define __msa_ldi_b __builtin_msa_ldi_b
+extern v8i16 __builtin_msa_ldi_h(short);
+#define __msa_ldi_h __builtin_msa_ldi_h
+extern v4i32 __builtin_msa_ldi_w(short);
+#define __msa_ldi_w __builtin_msa_ldi_w
+extern v2i64 __builtin_msa_ldi_d(short);
+#define __msa_ldi_d __builtin_msa_ldi_d
+extern v4i32 __builtin_msa_fcaf_w(v4f32, v4f32);
+#define __msa_fcaf_w __builtin_msa_fcaf_w
+extern v2i64 __builtin_msa_fcaf_d(v2f64, v2f64);
+#define __msa_fcaf_d __builtin_msa_fcaf_d
+extern v4i32 __builtin_msa_fcor_w(v4f32, v4f32);
+#define __msa_fcor_w __builtin_msa_fcor_w
+extern v2i64 __builtin_msa_fcor_d(v2f64, v2f64);
+#define __msa_fcor_d __builtin_msa_fcor_d
+extern v4i32 __builtin_msa_fcun_w(v4f32, v4f32);
+#define __msa_fcun_w __builtin_msa_fcun_w
+extern v2i64 __builtin_msa_fcun_d(v2f64, v2f64);
+#define __msa_fcun_d __builtin_msa_fcun_d
+extern v4i32 __builtin_msa_fcune_w(v4f32, v4f32);
+#define __msa_fcune_w __builtin_msa_fcune_w
+extern v2i64 __builtin_msa_fcune_d(v2f64, v2f64);
+#define __msa_fcune_d __builtin_msa_fcune_d
+extern v4i32 __builtin_msa_fcueq_w(v4f32, v4f32);
+#define __msa_fcueq_w __builtin_msa_fcueq_w
+extern v2i64 __builtin_msa_fcueq_d(v2f64, v2f64);
+#define __msa_fcueq_d __builtin_msa_fcueq_d
+extern v4i32 __builtin_msa_fceq_w(v4f32, v4f32);
+#define __msa_fceq_w __builtin_msa_fceq_w
+extern v2i64 __builtin_msa_fceq_d(v2f64, v2f64);
+#define __msa_fceq_d __builtin_msa_fceq_d
+extern v4i32 __builtin_msa_fcne_w(v4f32, v4f32);
+#define __msa_fcne_w __builtin_msa_fcne_w
+extern v2i64 __builtin_msa_fcne_d(v2f64, v2f64);
+#define __msa_fcne_d __builtin_msa_fcne_d
+extern v4i32 __builtin_msa_fclt_w(v4f32, v4f32);
+#define __msa_fclt_w __builtin_msa_fclt_w
+extern v2i64 __builtin_msa_fclt_d(v2f64, v2f64);
+#define __msa_fclt_d __builtin_msa_fclt_d
+extern v4i32 __builtin_msa_fcult_w(v4f32, v4f32);
+#define __msa_fcult_w __builtin_msa_fcult_w
+extern v2i64 __builtin_msa_fcult_d(v2f64, v2f64);
+#define __msa_fcult_d __builtin_msa_fcult_d
+extern v4i32 __builtin_msa_fcle_w(v4f32, v4f32);
+#define __msa_fcle_w __builtin_msa_fcle_w
+extern v2i64 __builtin_msa_fcle_d(v2f64, v2f64);
+#define __msa_fcle_d __builtin_msa_fcle_d
+extern v4i32 __builtin_msa_fcule_w(v4f32, v4f32);
+#define __msa_fcule_w __builtin_msa_fcule_w
+extern v2i64 __builtin_msa_fcule_d(v2f64, v2f64);
+#define __msa_fcule_d __builtin_msa_fcule_d
+extern v4i32 __builtin_msa_fsaf_w(v4f32, v4f32);
+#define __msa_fsaf_w __builtin_msa_fsaf_w
+extern v2i64 __builtin_msa_fsaf_d(v2f64, v2f64);
+#define __msa_fsaf_d __builtin_msa_fsaf_d
+extern v4i32 __builtin_msa_fsor_w(v4f32, v4f32);
+#define __msa_fsor_w __builtin_msa_fsor_w
+extern v2i64 __builtin_msa_fsor_d(v2f64, v2f64);
+#define __msa_fsor_d __builtin_msa_fsor_d
+extern v4i32 __builtin_msa_fsun_w(v4f32, v4f32);
+#define __msa_fsun_w __builtin_msa_fsun_w
+extern v2i64 __builtin_msa_fsun_d(v2f64, v2f64);
+#define __msa_fsun_d __builtin_msa_fsun_d
+extern v4i32 __builtin_msa_fsune_w(v4f32, v4f32);
+#define __msa_fsune_w __builtin_msa_fsune_w
+extern v2i64 __builtin_msa_fsune_d(v2f64, v2f64);
+#define __msa_fsune_d __builtin_msa_fsune_d
+extern v4i32 __builtin_msa_fsueq_w(v4f32, v4f32);
+#define __msa_fsueq_w __builtin_msa_fsueq_w
+extern v2i64 __builtin_msa_fsueq_d(v2f64, v2f64);
+#define __msa_fsueq_d __builtin_msa_fsueq_d
+extern v4i32 __builtin_msa_fseq_w(v4f32, v4f32);
+#define __msa_fseq_w __builtin_msa_fseq_w
+extern v2i64 __builtin_msa_fseq_d(v2f64, v2f64);
+#define __msa_fseq_d __builtin_msa_fseq_d
+extern v4i32 __builtin_msa_fsne_w(v4f32, v4f32);
+#define __msa_fsne_w __builtin_msa_fsne_w
+extern v2i64 __builtin_msa_fsne_d(v2f64, v2f64);
+#define __msa_fsne_d __builtin_msa_fsne_d
+extern v4i32 __builtin_msa_fslt_w(v4f32, v4f32);
+#define __msa_fslt_w __builtin_msa_fslt_w
+extern v2i64 __builtin_msa_fslt_d(v2f64, v2f64);
+#define __msa_fslt_d __builtin_msa_fslt_d
+extern v4i32 __builtin_msa_fsult_w(v4f32, v4f32);
+#define __msa_fsult_w __builtin_msa_fsult_w
+extern v2i64 __builtin_msa_fsult_d(v2f64, v2f64);
+#define __msa_fsult_d __builtin_msa_fsult_d
+extern v4i32 __builtin_msa_fsle_w(v4f32, v4f32);
+#define __msa_fsle_w __builtin_msa_fsle_w
+extern v2i64 __builtin_msa_fsle_d(v2f64, v2f64);
+#define __msa_fsle_d __builtin_msa_fsle_d
+extern v4i32 __builtin_msa_fsule_w(v4f32, v4f32);
+#define __msa_fsule_w __builtin_msa_fsule_w
+extern v2i64 __builtin_msa_fsule_d(v2f64, v2f64);
+#define __msa_fsule_d __builtin_msa_fsule_d
+extern v4f32 __builtin_msa_fadd_w(v4f32, v4f32);
+#define __msa_fadd_w __builtin_msa_fadd_w
+extern v2f64 __builtin_msa_fadd_d(v2f64, v2f64);
+#define __msa_fadd_d __builtin_msa_fadd_d
+extern v4f32 __builtin_msa_fsub_w(v4f32, v4f32);
+#define __msa_fsub_w __builtin_msa_fsub_w
+extern v2f64 __builtin_msa_fsub_d(v2f64, v2f64);
+#define __msa_fsub_d __builtin_msa_fsub_d
+extern v4f32 __builtin_msa_fmul_w(v4f32, v4f32);
+#define __msa_fmul_w __builtin_msa_fmul_w
+extern v2f64 __builtin_msa_fmul_d(v2f64, v2f64);
+#define __msa_fmul_d __builtin_msa_fmul_d
+extern v4f32 __builtin_msa_fdiv_w(v4f32, v4f32);
+#define __msa_fdiv_w __builtin_msa_fdiv_w
+extern v2f64 __builtin_msa_fdiv_d(v2f64, v2f64);
+#define __msa_fdiv_d __builtin_msa_fdiv_d
+extern v4f32 __builtin_msa_fmadd_w(v4f32, v4f32, v4f32);
+#define __msa_fmadd_w __builtin_msa_fmadd_w
+extern v2f64 __builtin_msa_fmadd_d(v2f64, v2f64, v2f64);
+#define __msa_fmadd_d __builtin_msa_fmadd_d
+extern v4f32 __builtin_msa_fmsub_w(v4f32, v4f32, v4f32);
+#define __msa_fmsub_w __builtin_msa_fmsub_w
+extern v2f64 __builtin_msa_fmsub_d(v2f64, v2f64, v2f64);
+#define __msa_fmsub_d __builtin_msa_fmsub_d
+extern v4f32 __builtin_msa_fexp2_w(v4f32, v4i32);
+#define __msa_fexp2_w __builtin_msa_fexp2_w
+extern v2f64 __builtin_msa_fexp2_d(v2f64, v2i64);
+#define __msa_fexp2_d __builtin_msa_fexp2_d
+extern v8i16 __builtin_msa_fexdo_h(v4f32, v4f32);
+#define __msa_fexdo_h __builtin_msa_fexdo_h
+extern v4f32 __builtin_msa_fexdo_w(v2f64, v2f64);
+#define __msa_fexdo_w __builtin_msa_fexdo_w
+extern v8i16 __builtin_msa_ftq_h(v4f32, v4f32);
+#define __msa_ftq_h __builtin_msa_ftq_h
+extern v4i32 __builtin_msa_ftq_w(v2f64, v2f64);
+#define __msa_ftq_w __builtin_msa_ftq_w
+extern v4f32 __builtin_msa_fmin_w(v4f32, v4f32);
+#define __msa_fmin_w __builtin_msa_fmin_w
+extern v2f64 __builtin_msa_fmin_d(v2f64, v2f64);
+#define __msa_fmin_d __builtin_msa_fmin_d
+extern v4f32 __builtin_msa_fmin_a_w(v4f32, v4f32);
+#define __msa_fmin_a_w __builtin_msa_fmin_a_w
+extern v2f64 __builtin_msa_fmin_a_d(v2f64, v2f64);
+#define __msa_fmin_a_d __builtin_msa_fmin_a_d
+extern v4f32 __builtin_msa_fmax_w(v4f32, v4f32);
+#define __msa_fmax_w __builtin_msa_fmax_w
+extern v2f64 __builtin_msa_fmax_d(v2f64, v2f64);
+#define __msa_fmax_d __builtin_msa_fmax_d
+extern v4f32 __builtin_msa_fmax_a_w(v4f32, v4f32);
+#define __msa_fmax_a_w __builtin_msa_fmax_a_w
+extern v2f64 __builtin_msa_fmax_a_d(v2f64, v2f64);
+#define __msa_fmax_a_d __builtin_msa_fmax_a_d
+extern v8i16 __builtin_msa_mul_q_h(v8i16, v8i16);
+#define __msa_mul_q_h __builtin_msa_mul_q_h
+extern v4i32 __builtin_msa_mul_q_w(v4i32, v4i32);
+#define __msa_mul_q_w __builtin_msa_mul_q_w
+extern v8i16 __builtin_msa_mulr_q_h(v8i16, v8i16);
+#define __msa_mulr_q_h __builtin_msa_mulr_q_h
+extern v4i32 __builtin_msa_mulr_q_w(v4i32, v4i32);
+#define __msa_mulr_q_w __builtin_msa_mulr_q_w
+extern v8i16 __builtin_msa_madd_q_h(v8i16, v8i16, v8i16);
+#define __msa_madd_q_h __builtin_msa_madd_q_h
+extern v4i32 __builtin_msa_madd_q_w(v4i32, v4i32, v4i32);
+#define __msa_madd_q_w __builtin_msa_madd_q_w
+extern v8i16 __builtin_msa_maddr_q_h(v8i16, v8i16, v8i16);
+#define __msa_maddr_q_h __builtin_msa_maddr_q_h
+extern v4i32 __builtin_msa_maddr_q_w(v4i32, v4i32, v4i32);
+#define __msa_maddr_q_w __builtin_msa_maddr_q_w
+extern v8i16 __builtin_msa_msub_q_h(v8i16, v8i16, v8i16);
+#define __msa_msub_q_h __builtin_msa_msub_q_h
+extern v4i32 __builtin_msa_msub_q_w(v4i32, v4i32, v4i32);
+#define __msa_msub_q_w __builtin_msa_msub_q_w
+extern v8i16 __builtin_msa_msubr_q_h(v8i16, v8i16, v8i16);
+#define __msa_msubr_q_h __builtin_msa_msubr_q_h
+extern v4i32 __builtin_msa_msubr_q_w(v4i32, v4i32, v4i32);
+#define __msa_msubr_q_w __builtin_msa_msubr_q_w
+extern v4i32 __builtin_msa_fclass_w(v4f32);
+#define __msa_fclass_w __builtin_msa_fclass_w
+extern v2i64 __builtin_msa_fclass_d(v2f64);
+#define __msa_fclass_d __builtin_msa_fclass_d
+extern v4f32 __builtin_msa_fsqrt_w(v4f32);
+#define __msa_fsqrt_w __builtin_msa_fsqrt_w
+extern v2f64 __builtin_msa_fsqrt_d(v2f64);
+#define __msa_fsqrt_d __builtin_msa_fsqrt_d
+extern v4f32 __builtin_msa_frcp_w(v4f32);
+#define __msa_frcp_w __builtin_msa_frcp_w
+extern v2f64 __builtin_msa_frcp_d(v2f64);
+#define __msa_frcp_d __builtin_msa_frcp_d
+extern v4f32 __builtin_msa_frint_w(v4f32);
+#define __msa_frint_w __builtin_msa_frint_w
+extern v2f64 __builtin_msa_frint_d(v2f64);
+#define __msa_frint_d __builtin_msa_frint_d
+extern v4f32 __builtin_msa_frsqrt_w(v4f32);
+#define __msa_frsqrt_w __builtin_msa_frsqrt_w
+extern v2f64 __builtin_msa_frsqrt_d(v2f64);
+#define __msa_frsqrt_d __builtin_msa_frsqrt_d
+extern v4f32 __builtin_msa_flog2_w(v4f32);
+#define __msa_flog2_w __builtin_msa_flog2_w
+extern v2f64 __builtin_msa_flog2_d(v2f64);
+#define __msa_flog2_d __builtin_msa_flog2_d
+extern v4f32 __builtin_msa_fexupl_w(v8i16);
+#define __msa_fexupl_w __builtin_msa_fexupl_w
+extern v2f64 __builtin_msa_fexupl_d(v4f32);
+#define __msa_fexupl_d __builtin_msa_fexupl_d
+extern v4f32 __builtin_msa_fexupr_w(v8i16);
+#define __msa_fexupr_w __builtin_msa_fexupr_w
+extern v2f64 __builtin_msa_fexupr_d(v4f32);
+#define __msa_fexupr_d __builtin_msa_fexupr_d
+extern v4f32 __builtin_msa_ffql_w(v8i16);
+#define __msa_ffql_w __builtin_msa_ffql_w
+extern v2f64 __builtin_msa_ffql_d(v4i32);
+#define __msa_ffql_d __builtin_msa_ffql_d
+extern v4f32 __builtin_msa_ffqr_w(v8i16);
+#define __msa_ffqr_w __builtin_msa_ffqr_w
+extern v2f64 __builtin_msa_ffqr_d(v4i32);
+#define __msa_ffqr_d __builtin_msa_ffqr_d
+extern v4i32 __builtin_msa_ftint_s_w(v4f32);
+#define __msa_ftint_s_w __builtin_msa_ftint_s_w
+extern v2i64 __builtin_msa_ftint_s_d(v2f64);
+#define __msa_ftint_s_d __builtin_msa_ftint_s_d
+extern v4u32 __builtin_msa_ftint_u_w(v4f32);
+#define __msa_ftint_u_w __builtin_msa_ftint_u_w
+extern v2u64 __builtin_msa_ftint_u_d(v2f64);
+#define __msa_ftint_u_d __builtin_msa_ftint_u_d
+extern v4i32 __builtin_msa_ftrunc_s_w(v4f32);
+#define __msa_ftrunc_s_w __builtin_msa_ftrunc_s_w
+extern v2i64 __builtin_msa_ftrunc_s_d(v2f64);
+#define __msa_ftrunc_s_d __builtin_msa_ftrunc_s_d
+extern v4u32 __builtin_msa_ftrunc_u_w(v4f32);
+#define __msa_ftrunc_u_w __builtin_msa_ftrunc_u_w
+extern v2u64 __builtin_msa_ftrunc_u_d(v2f64);
+#define __msa_ftrunc_u_d __builtin_msa_ftrunc_u_d
+extern v4f32 __builtin_msa_ffint_s_w(v4i32);
+#define __msa_ffint_s_w __builtin_msa_ffint_s_w
+extern v2f64 __builtin_msa_ffint_s_d(v2i64);
+#define __msa_ffint_s_d __builtin_msa_ffint_s_d
+extern v4f32 __builtin_msa_ffint_u_w(v4u32);
+#define __msa_ffint_u_w __builtin_msa_ffint_u_w
+extern v2f64 __builtin_msa_ffint_u_d(v2u64);
+#define __msa_ffint_u_d __builtin_msa_ffint_u_d
+extern int __builtin_msa_cfcmsa(unsigned char);
+#define __msa_cfcmsa __builtin_msa_cfcmsa
+extern v16i8 __builtin_msa_move_v(v16i8);
+#define __msa_move_v __builtin_msa_move_v
+extern v4f32 __builtin_msa_cast_to_vector_float(float);
+#define __msa_cast_to_vector_float __builtin_msa_cast_to_vector_float
+extern v2f64 __builtin_msa_cast_to_vector_double(double);
+#define __msa_cast_to_vector_double __builtin_msa_cast_to_vector_double
+extern float __builtin_msa_cast_to_scalar_float(v4f32);
+#define __msa_cast_to_scalar_float __builtin_msa_cast_to_scalar_float
+extern double __builtin_msa_cast_to_scalar_double(v2f64);
+#define __msa_cast_to_scalar_double __builtin_msa_cast_to_scalar_double
+#endif /* __clang__ */
+#endif /* defined(__mips_msa) */
+#endif /* _MSA_H */
diff --git a/gcc-4.9/gcc/config/mips/mti-elf.h b/gcc-4.9/gcc/config/mips/mti-elf.h
index 76d289eae..9b8076c0b 100644
--- a/gcc-4.9/gcc/config/mips/mti-elf.h
+++ b/gcc-4.9/gcc/config/mips/mti-elf.h
@@ -34,6 +34,9 @@ along with GCC; see the file COPYING3. If not see
or -mgp setting. */ \
"%{!mabi=*: %{" MIPS_32BIT_OPTION_SPEC ": -mabi=32;: -mabi=n32}}", \
\
+ /* If no FP option is specified, infer one from the ABI/ISA level. */\
+ "%{!mfp*: %{mabi=32: %{" MIPS_FPXX_OPTION_SPEC ": -mfpxx}}}", \
+ \
/* Make sure that an endian option is always present. This makes \
things like LINK_SPEC easier to write. */ \
"%{!EB:%{!EL:%(endian_spec)}}", \
diff --git a/gcc-4.9/gcc/config/mips/mti-linux.h b/gcc-4.9/gcc/config/mips/mti-linux.h
index db9896b40..d9b65f82c 100644
--- a/gcc-4.9/gcc/config/mips/mti-linux.h
+++ b/gcc-4.9/gcc/config/mips/mti-linux.h
@@ -19,8 +19,17 @@ along with GCC; see the file COPYING3. If not see
/* This target is a multilib target, specify the sysroot paths. */
#undef SYSROOT_SUFFIX_SPEC
+#if MIPS_ISA_DEFAULT == 33 /* mips32r2 is the default */
#define SYSROOT_SUFFIX_SPEC \
- "%{mips32:/mips32}%{mips64:/mips64}%{mips64r2:/mips64r2}%{mips16:/mips16}%{mmicromips:/micromips}%{mabi=64:/64}%{mel|EL:/el}%{msoft-float:/sof}%{mfp64:/fp64}%{mnan=2008:/nan2008}"
+ "%{muclibc:/uclibc}%{mips32:/mips32}%{mips64:/mips64}%{mips64r2:/mips64r2}%{mips32r6:/mips32r6}%{mips64r6:/mips64r6}%{mips16:/mips16}%{mmicromips:/micromips}%{mabi=64:/64}%{mel|EL:/el}%{msoft-float:/sof}%{!mips32r6:%{!mips64r6:%{mnan=2008:/nan2008}}}"
+#elif MIPS_ISA_DEFAULT == 37 /* mips32r6 is the default */
+#define SYSROOT_SUFFIX_SPEC \
+ "%{muclibc:/uclibc}%{mips32:/mips32}%{mips64:/mips64}%{mips32r2:/mips32r2}%{mips64r2:/mips64r2}%{mips64r6:/mips64r6}%{mips16:/mips16}%{mmicromips:/micromips}%{mabi=64:/64}%{mel|EL:/el}%{msoft-float:/sof}%{!mips32r6:%{!mips64r6:%{mnan=2008:/nan2008}}}"
+#else /* Unexpected default ISA. */
+#error No SYSROOT_SUFFIX_SPEC exists for this default ISA
+#endif
+
+#define SYSROOT_HEADERS_SUFFIX_SPEC "%{muclibc:/uclibc}"
#undef DRIVER_SELF_SPECS
#define DRIVER_SELF_SPECS \
@@ -39,6 +48,9 @@ along with GCC; see the file COPYING3. If not see
or -mgp setting. */ \
"%{!mabi=*: %{" MIPS_32BIT_OPTION_SPEC ": -mabi=32;: -mabi=n32}}", \
\
+ /* If no FP option is specified, infer one from the ABI/ISA level. */\
+ "%{!mfp*: %{mabi=32: %{" MIPS_FPXX_OPTION_SPEC ": -mfpxx}}}", \
+ \
/* Base SPECs. */ \
BASE_DRIVER_SELF_SPECS \
\
diff --git a/gcc-4.9/gcc/config/mips/netbsd.h b/gcc-4.9/gcc/config/mips/netbsd.h
index efa28869b..ed41e0f83 100644
--- a/gcc-4.9/gcc/config/mips/netbsd.h
+++ b/gcc-4.9/gcc/config/mips/netbsd.h
@@ -84,21 +84,13 @@ along with GCC; see the file COPYING3. If not see
builtin_define ("__mips=3"); \
else if (ISA_MIPS4) \
builtin_define ("__mips=4"); \
- else if (ISA_MIPS32) \
- { \
- builtin_define ("__mips=32"); \
- builtin_define ("__mips_isa_rev=1"); \
- } \
- else if (ISA_MIPS32R2) \
- { \
- builtin_define ("__mips=32"); \
- builtin_define ("__mips_isa_rev=2"); \
- } \
- else if (ISA_MIPS64) \
- { \
- builtin_define ("__mips=64"); \
- builtin_define ("__mips_isa_rev=1"); \
- } \
+ else if (mips_isa >= 32 && mips_isa < 64) \
+ builtin_define ("__mips=32"); \
+ else if (mips_isa >= 64) \
+ builtin_define ("__mips=64"); \
+ if (mips_isa_rev > 0) \
+ builtin_define_with_int_value ("__mips_isa_rev", \
+ mips_isa_rev); \
\
if (TARGET_HARD_FLOAT) \
builtin_define ("__mips_hard_float"); \
@@ -141,7 +133,8 @@ along with GCC; see the file COPYING3. If not see
"%{EL:-m elf32lmip} \
%{EB:-m elf32bmip} \
%(endian_spec) \
- %{G*} %{mips1} %{mips2} %{mips3} %{mips4} %{mips32} %{mips32r2} %{mips64} \
+ %{G*} %{mips1} %{mips2} %{mips3} %{mips4} %{mips32} %{mips32r2} \
+ %{mips32r6} %{mips64} %{mips64r6} \
%(netbsd_link_spec)"
#define NETBSD_ENTRY_POINT "__start"
diff --git a/gcc-4.9/gcc/config/mips/p5600.md b/gcc-4.9/gcc/config/mips/p5600.md
new file mode 100644
index 000000000..14d417fcc
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/p5600.md
@@ -0,0 +1,304 @@
+;; DFA-based pipeline description for P5600.
+;;
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "p5600_agen_pipe, p5600_alu_pipe, p5600_fpu_pipe")
+
+;; The address generation queue (AGQ) has AL2, CTISTD and LDSTA pipes
+(define_cpu_unit "p5600_agq, p5600_al2, p5600_ctistd, p5600_ldsta,
+ p5600_gpdiv" "p5600_agen_pipe")
+
+;; The arithmetic-logic-unit queue (ALQ) has ALU pipe
+(define_cpu_unit "p5600_alq, p5600_alu" "p5600_alu_pipe")
+
+;; The floating-point-unit queue (FPQ) has short and long pipes
+(define_cpu_unit "p5600_fpu_short, p5600_fpu_long" "p5600_fpu_pipe")
+
+;; Short FPU pipeline.
+(define_cpu_unit "p5600_fpu_intadd, p5600_fpu_cmp, p5600_fpu_float,
+ p5600_fpu_logic_a, p5600_fpu_logic_b, p5600_fpu_div,
+ p5600_fpu_store" "p5600_fpu_pipe")
+
+;; Long FPU pipeline.
+(define_cpu_unit "p5600_fpu_logic, p5600_fpu_float_a, p5600_fpu_float_b,
+ p5600_fpu_float_c, p5600_fpu_float_d" "p5600_fpu_pipe")
+(define_cpu_unit "p5600_fpu_mult, p5600_fpu_fdiv, p5600_fpu_load,
+ p5600_fpu_apu" "p5600_fpu_pipe")
+
+(define_reservation "p5600_agq_al2" "p5600_agq, p5600_al2")
+(define_reservation "p5600_agq_ctistd" "p5600_agq, p5600_ctistd")
+(define_reservation "p5600_agq_ldsta" "p5600_agq, p5600_ldsta")
+(define_reservation "p5600_alq_alu" "p5600_alq, p5600_alu")
+
+;;
+;; FPU-MSA pipe
+;;
+
+;; Arithmetic
+;; add, hadd, sub, hsub, average, min, max, compare
+(define_insn_reservation "msa_short_int_add" 2
+ (eq_attr "msa_execunit" "msa_eu_int_add")
+ "p5600_fpu_short, p5600_fpu_intadd")
+
+;; Bitwise Instructions
+;; and, or, xor, bit-clear, leading-bits-count, shift, shuffle
+(define_insn_reservation "msa_short_logic" 2
+ (eq_attr "msa_execunit" "msa_eu_logic")
+ "p5600_fpu_short, p5600_fpu_logic_a")
+
+;; move.v
+(define_insn_reservation "msa_short_logic_move_v" 2
+ (and (eq_attr "type" "fmove")
+ (eq_attr "mode" "TI"))
+ "p5600_fpu_short, p5600_fpu_logic_a")
+
+;; Float compare
+(define_insn_reservation "msa_short_cmp" 2
+ (eq_attr "msa_execunit" "msa_eu_cmp")
+ "p5600_fpu_short, p5600_fpu_cmp")
+
+;; Float exp2, min, max
+(define_insn_reservation "msa_short_float2" 2
+ (eq_attr "msa_execunit" "msa_eu_float2")
+ "p5600_fpu_short, p5600_fpu_float")
+
+;; Vector sat
+(define_insn_reservation "msa_short_logic3" 3
+ (eq_attr "msa_execunit" "msa_eu_logic3")
+ "p5600_fpu_short, p5600_fpu_logic_a, p5600_fpu_logic_b")
+
+;; Vector copy, bz, bnz
+(define_insn_reservation "msa_short_store4" 4
+ (eq_attr "msa_execunit" "msa_eu_store4")
+ "p5600_fpu_short, p5600_fpu_store")
+
+;; Vector load
+(define_insn_reservation "msa_long_load" 10
+ (and (eq_attr "type" "fpload")
+ (eq_attr "mode" "TI"))
+ "p5600_fpu_long, p5600_fpu_load")
+
+;; Vector store
+(define_insn_reservation "msa_short_store" 2
+ (and (eq_attr "type" "fpstore")
+ (eq_attr "mode" "TI"))
+ "p5600_fpu_short, p5600_fpu_store")
+
+;; binsl, binsr, insert, vshf, sld
+(define_insn_reservation "msa_long_logic" 2
+ (eq_attr "msa_execunit" "msa_eu_logic_l")
+ "p5600_fpu_long, p5600_fpu_logic")
+
+;; Float fclass, flog2
+(define_insn_reservation "msa_long_float2" 2
+ (eq_attr "msa_execunit" "msa_eu_float2_l")
+ "p5600_fpu_long, p5600_fpu_float_a")
+
+;; fadd, fsub
+(define_insn_reservation "msa_long_float4" 4
+ (eq_attr "msa_execunit" "msa_eu_float4")
+ "p5600_fpu_long, p5600_fpu_float_a, p5600_fpu_float_b")
+
+;; fmul
+(define_insn_reservation "msa_long_float5" 5
+ (eq_attr "msa_execunit" "msa_eu_float5")
+ "p5600_fpu_long, p5600_fpu_float_a, p5600_fpu_float_b, p5600_fpu_float_c")
+
+;; fmadd, fmsub
+(define_insn_reservation "msa_long_float8" 8
+ (eq_attr "msa_execunit" "msa_eu_float8")
+ "p5600_fpu_long, p5600_fpu_float_a,
+ p5600_fpu_float_b, p5600_fpu_float_c, p5600_fpu_float_d")
+
+;; Vector mul, dotp, madd, msub
+(define_insn_reservation "msa_long_mult" 5
+ (eq_attr "msa_execunit" "msa_eu_mult")
+ "p5600_fpu_long, p5600_fpu_mult")
+
+;; fdiv, fmod (semi-pipelined)
+(define_insn_reservation "msa_long_fdiv" 10
+ (eq_attr "msa_execunit" "msa_eu_fdiv")
+ "p5600_fpu_long, nothing, nothing, p5600_fpu_fdiv*8")
+
+;; div, mod (non-pipelined)
+(define_insn_reservation "msa_long_div" 10
+ (eq_attr "msa_execunit" "msa_eu_div")
+ "p5600_fpu_long, p5600_fpu_div*9, p5600_fpu_div + p5600_fpu_logic_a")
+
+;;
+;; FPU pipe
+;;
+
+;; fadd, fsub
+(define_insn_reservation "p5600_fpu_fadd" 4
+ (eq_attr "type" "fadd,fabs,fneg")
+ "p5600_fpu_long, p5600_fpu_apu")
+
+;; fabs, fneg, fcmp
+(define_insn_reservation "p5600_fpu_fabs" 2
+ (eq_attr "type" "fabs,fneg,fcmp,fmove")
+ "p5600_fpu_short, p5600_fpu_apu")
+
+;; fload
+(define_insn_reservation "p5600_fpu_fload" 8
+ (eq_attr "type" "fpload,fpidxload")
+ "p5600_fpu_long, p5600_fpu_apu")
+
+;; fstore
+(define_insn_reservation "p5600_fpu_fstore" 1
+ (eq_attr "type" "fpstore,fpidxstore")
+ "p5600_fpu_short, p5600_fpu_apu")
+
+;; fmadd
+(define_insn_reservation "p5600_fpu_fmadd" 9
+ (eq_attr "type" "fmadd")
+ "p5600_fpu_long, p5600_fpu_apu")
+
+;; fmul
+(define_insn_reservation "p5600_fpu_fmul" 5
+ (eq_attr "type" "fmul")
+ "p5600_fpu_long, p5600_fpu_apu")
+
+;; fdiv, fsqrt
+(define_insn_reservation "p5600_fpu_div" 17
+ (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt")
+ "p5600_fpu_long, p5600_fpu_apu*17")
+
+;; fcvt
+(define_insn_reservation "p5600_fpu_fcvt" 4
+ (eq_attr "type" "fcvt")
+ "p5600_fpu_long, p5600_fpu_apu")
+
+;; mtc
+(define_insn_reservation "p5600_fpu_fmtc" 7
+ (eq_attr "type" "mtc")
+ "p5600_fpu_short, p5600_fpu_store")
+
+;; mfc
+(define_insn_reservation "p5600_fpu_fmfc" 4
+ (eq_attr "type" "mfc")
+ "p5600_fpu_short, p5600_fpu_store")
+
+;; madd/msub feeding into the add source
+;; madd.fmt dst, x, y, z -> madd.fmt a, dst, b, c 5 cycles
+(define_bypass 5 "p5600_fpu_fmadd" "p5600_fpu_fmadd" "mips_fmadd_bypass")
+
+;;
+;; Integer pipe
+;;
+
+;; and
+(define_insn_reservation "p5600_int_and" 1
+ (eq_attr "move_type" "logical")
+ "p5600_alq_alu")
+
+;; lui
+(define_insn_reservation "p5600_int_lui" 1
+ (eq_attr "move_type" "const")
+ "p5600_alq_alu")
+
+;; Load lb, lbu, lh, lhu, lq, lw, lw_i2f, lwxs
+(define_insn_reservation "p5600_int_load" 4
+ (eq_attr "move_type" "load")
+ "p5600_agq_ldsta")
+
+;; store
+(define_insn_reservation "p5600_int_store" 3
+ (eq_attr "move_type" "store")
+ "p5600_agq_ldsta")
+
+;; andi, sll, srl, seb, seh
+(define_insn_reservation "p5600_int_arith_1" 1
+ (eq_attr "move_type" "andi,sll0,signext")
+ "p5600_agq_al2 | p5600_alq_alu")
+
+;; addi, addiu, ori, xori, add, addu
+(define_insn_reservation "p5600_int_arith_2" 1
+ (eq_attr "alu_type" "add,or,xor")
+ "p5600_agq_al2 | p5600_alq_alu")
+
+;; nor, sub
+(define_insn_reservation "p5600_int_arith_3" 1
+ (eq_attr "alu_type" "nor,sub")
+ "p5600_alq_alu")
+
+;; srl, sra, rotr, slt, sllv, srlv
+(define_insn_reservation "p5600_int_arith_4" 1
+ (eq_attr "type" "shift,slt,move")
+ "p5600_agq_al2 | p5600_alq_alu")
+
+;; nop
+(define_insn_reservation "p5600_int_nop" 0
+ (eq_attr "type" "nop")
+ "p5600_agq_al2")
+
+;; clo, clz
+(define_insn_reservation "p5600_int_countbits" 1
+ (eq_attr "type" "clz")
+ "p5600_agq_al2")
+
+;; Conditional moves
+(define_insn_reservation "p5600_int_condmove" 1
+ (eq_attr "type" "condmove")
+ "p5600_agq_al2")
+
+;; madd, msub
+(define_insn_reservation "p5600_dsp_mac" 5
+ (eq_attr "type" "imadd")
+ "p5600_agq_al2")
+
+;; mfhi/lo
+(define_insn_reservation "p5600_dsp_mfhilo" 1
+ (eq_attr "type" "mfhi,mflo")
+ "p5600_agq_al2")
+
+;; mthi/lo
+(define_insn_reservation "p5600_dsp_mthilo" 5
+ (eq_attr "type" "mthi,mtlo")
+ "p5600_agq_al2")
+
+;; mult, multu, mul
+(define_insn_reservation "p5600_dsp_mult" 5
+ (eq_attr "type" "imul3,imul")
+ "p5600_agq_al2")
+
+;; branch and jump
+(define_insn_reservation "p5600_int_branch" 1
+ (eq_attr "type" "branch,jump")
+ "p5600_agq_ctistd")
+
+;; prefetch
+(define_insn_reservation "p5600_int_prefetch" 3
+ (eq_attr "type" "prefetch,prefetchx")
+ "p5600_agq_ldsta")
+
+;; divide
+(define_insn_reservation "p5600_int_div" 8
+ (eq_attr "type" "idiv")
+ "p5600_agq_al2+p5600_gpdiv*8")
+
+;; arith
+(define_insn_reservation "p5600_int_arith_5" 2
+ (eq_attr "type" "arith")
+ "p5600_agq_al2")
+
+;; call
+(define_insn_reservation "p5600_int_call" 2
+ (eq_attr "jal" "indirect,direct")
+ "p5600_agq_ctistd")
diff --git a/gcc-4.9/gcc/config/mips/predicates.md b/gcc-4.9/gcc/config/mips/predicates.md
index 8ac8e0b6a..cdb989ff5 100644
--- a/gcc-4.9/gcc/config/mips/predicates.md
+++ b/gcc-4.9/gcc/config/mips/predicates.md
@@ -33,10 +33,38 @@
(ior (match_operand 0 "const_arith_operand")
(match_operand 0 "register_operand")))
+(define_predicate "const_immlsa_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 1, 4)")))
+
+(define_predicate "const_msa_branch_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -1024, 1023)")))
+
+(define_predicate "const_uimm3_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
+
+(define_predicate "const_uimm4_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
+
+(define_predicate "const_uimm5_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
+
(define_predicate "const_uimm6_operand"
(and (match_code "const_int")
(match_test "UIMM6_OPERAND (INTVAL (op))")))
+(define_predicate "const_uimm8_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
+
+(define_predicate "const_imm5_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -16, 15)")))
+
(define_predicate "const_imm10_operand"
(and (match_code "const_int")
(match_test "IMM10_OPERAND (INTVAL (op))")))
@@ -45,6 +73,22 @@
(ior (match_operand 0 "const_imm10_operand")
(match_operand 0 "register_operand")))
+(define_predicate "aq10b_operand"
+ (and (match_code "const_int")
+ (match_test "mips_signed_immediate_p (INTVAL (op), 10, 0)")))
+
+(define_predicate "aq10h_operand"
+ (and (match_code "const_int")
+ (match_test "mips_signed_immediate_p (INTVAL (op), 10, 1)")))
+
+(define_predicate "aq10w_operand"
+ (and (match_code "const_int")
+ (match_test "mips_signed_immediate_p (INTVAL (op), 10, 2)")))
+
+(define_predicate "aq10d_operand"
+ (and (match_code "const_int")
+ (match_test "mips_signed_immediate_p (INTVAL (op), 10, 3)")))
+
(define_predicate "sle_operand"
(and (match_code "const_int")
(match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
@@ -57,6 +101,14 @@
(and (match_code "const_int,const_double,const_vector")
(match_test "op == CONST0_RTX (GET_MODE (op))")))
+(define_predicate "const_m1_operand"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONSTM1_RTX (GET_MODE (op))")))
+
+(define_predicate "reg_or_m1_operand"
+ (ior (match_operand 0 "const_m1_operand")
+ (match_operand 0 "register_operand")))
+
(define_predicate "reg_or_0_operand"
(ior (and (match_operand 0 "const_0_operand")
(not (match_test "TARGET_MIPS16")))
@@ -492,3 +544,198 @@
(define_predicate "non_volatile_mem_operand"
(and (match_operand 0 "memory_operand")
(not (match_test "MEM_VOLATILE_P (op)"))))
+
+(define_predicate "const_vector_same_uimm3_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_int_p (op, mode, 0, 7);
+})
+
+(define_predicate "const_vector_same_v16qi_set_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_bitimm_set_p (op, mode);
+})
+
+(define_predicate "const_vector_same_v16qi_clr_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_bitimm_clr_p (op, mode);
+})
+
+(define_predicate "const_vector_same_cmpsimm4_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_int_p (op, mode, -16, 15);
+})
+
+(define_predicate "const_vector_same_cmpuimm4_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_int_p (op, mode, 0, 31);
+})
+
+(define_predicate "const_vector_same_simm10_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_int_p (op, mode, -1024, 1023);
+})
+
+(define_predicate "const_vector_same_uimm4_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_int_p (op, mode, 0, 15);
+})
+
+(define_predicate "const_vector_same_v8hi_set_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_bitimm_set_p (op, mode);
+})
+
+(define_predicate "const_vector_same_v8hi_clr_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_bitimm_clr_p (op, mode);
+})
+
+(define_predicate "const_vector_same_v4si_set_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_bitimm_set_p (op, mode);
+})
+
+(define_predicate "const_vector_same_v4si_clr_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_bitimm_clr_p (op, mode);
+})
+
+(define_predicate "const_vector_same_uimm6_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_int_p (op, mode, 0, 63);
+})
+
+(define_predicate "const_vector_same_v2di_set_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_bitimm_set_p (op, mode);
+})
+
+(define_predicate "const_vector_same_v2di_clr_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_bitimm_clr_p (op, mode);
+})
+
+(define_predicate "const_vector_same_ximm5_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_int_p (op, mode, -31, 31);
+})
+
+(define_predicate "const_vector_same_simm5_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_int_p (op, mode, -32, 0);
+})
+
+(define_predicate "const_vector_same_uimm5_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_int_p (op, mode, 0, 31);
+})
+
+(define_predicate "const_vector_same_uimm8_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_int_p (op, mode, 0, 255);
+})
+
+(define_predicate "const_vector_same_byte_operand"
+ (match_code "const_vector")
+{
+ return mips_const_vector_same_byte_p (op, mode);
+})
+
+(define_predicate "reg_or_vector_same_ximm5_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_ximm5_operand")))
+
+(define_predicate "reg_or_vector_same_simm5_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_simm5_operand")))
+
+(define_predicate "reg_or_vector_same_uimm5_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_uimm5_operand")))
+
+(define_predicate "reg_or_vector_same_uimm3_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_uimm3_operand")))
+
+(define_predicate "reg_or_vector_same_v16qi_set_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_v16qi_set_operand")))
+
+(define_predicate "reg_or_vector_same_v16qi_clr_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_v16qi_clr_operand")))
+
+(define_predicate "reg_or_vector_same_uimm4_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_uimm4_operand")))
+
+(define_predicate "reg_or_vector_same_v8hi_set_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_v8hi_set_operand")))
+
+(define_predicate "reg_or_vector_same_v8hi_clr_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_v8hi_clr_operand")))
+
+(define_predicate "reg_or_vector_same_v4si_set_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_v4si_set_operand")))
+
+(define_predicate "reg_or_vector_same_v4si_clr_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_v4si_clr_operand")))
+
+(define_predicate "reg_or_vector_same_uimm6_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_uimm6_operand")))
+
+(define_predicate "reg_or_vector_same_v2di_set_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_v2di_set_operand")))
+
+(define_predicate "reg_or_vector_same_v2di_clr_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_v2di_clr_operand")))
+
+(define_predicate "reg_or_vector_same_uimm8_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_uimm8_operand")))
+
+(define_predicate "reg_or_vector_same_byte_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_byte_operand")))
+
+(define_predicate "reg_or_vector_same_bitumm3_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_uimm3_operand")))
+
+(define_predicate "reg_or_vector_same_bituimm4_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_uimm4_operand")))
+
+(define_predicate "reg_or_vector_same_bituimm5_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_uimm5_operand")))
+
+(define_predicate "reg_or_vector_same_bituimm6_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_vector_same_uimm6_operand")))
+
diff --git a/gcc-4.9/gcc/config/mips/t-img-elf b/gcc-4.9/gcc/config/mips/t-img-elf
new file mode 100644
index 000000000..14733317e
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-img-elf
@@ -0,0 +1,36 @@
+# Copyright (C) 2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# The default build is mips32r6, hard-float big-endian.
+# A multilib for mips32r6+LE
+# A multilib for mips64r6
+# A multilib for mips64r6+LE
+# A multilib for mips32r6+LE+singlefloat+shortdouble
+
+MULTILIB_OPTIONS = mips64r6 EL msingle-float fshort-double
+MULTILIB_DIRNAMES = mips64r6 el sgl short
+MULTILIB_MATCHES = EL=mel EB=meb
+
+# Don't build 64r6 with single-float
+MULTILIB_EXCEPTIONS += mips64r6/*msingle-float*
+MULTILIB_EXCEPTIONS += mips64r6/*fshort-double*
+
+MULTILIB_EXCEPTIONS += msingle-float*
+MULTILIB_EXCEPTIONS += *msingle-float
+MULTILIB_EXCEPTIONS += fshort-double
+MULTILIB_EXCEPTIONS += EL/fshort-double
diff --git a/gcc-4.9/gcc/config/mips/t-img-linux b/gcc-4.9/gcc/config/mips/t-img-linux
new file mode 100644
index 000000000..5dbfbe45e
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-img-linux
@@ -0,0 +1,30 @@
+# Copyright (C) 2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# The default build is mips32r6, hard-float big-endian. Add mips64r6,
+# 64-bit ABI and little-endian variations.
+
+MULTILIB_OPTIONS = mips64r6 mabi=64 EL
+MULTILIB_DIRNAMES = mips64r6 64 el
+MULTILIB_MATCHES = EL=mel EB=meb
+
+# The 64 bit ABI is not supported on the mips32r6 architecture.
+# Because mips32r6 is the default we can't use that flag to trigger
+# the exception so we check for mabi=64 with no specific mips
+# architecture flag instead.
+MULTILIB_EXCEPTIONS += mabi=64*
diff --git a/gcc-4.9/gcc/config/mips/t-isa3264 b/gcc-4.9/gcc/config/mips/t-isa3264
index a5e001ef7..845574523 100644
--- a/gcc-4.9/gcc/config/mips/t-isa3264
+++ b/gcc-4.9/gcc/config/mips/t-isa3264
@@ -22,12 +22,12 @@ ifneq ($(filter MIPS_ABI_DEFAULT=ABI_EABI,$(tm_defines)),)
MULTILIB_OPTIONS = msoft-float EL/EB mips32/mips32r2/mips64/mips64r2
MULTILIB_DIRNAMES = soft-float el eb mips32 mips32r2 mips64 mips64r2
else
-MULTILIB_OPTIONS = msoft-float/mfp64 EL/EB mips32/mips32r2/mips64/mips64r2
-MULTILIB_DIRNAMES = soft-float fp64 el eb mips32 mips32r2 mips64 mips64r2
+MULTILIB_OPTIONS = msoft-float/mfp64 EL/EB mips32/mips32r2/mips32r6/mips64/mips64r2/mips64r6
+MULTILIB_DIRNAMES = soft-float fp64 el eb mips32 mips32r2 mips32r6 mips64 mips64r2 mips64r6
ifneq ($(filter MIPS_ISA_DEFAULT=33,$(tm_defines)),)
-MULTILIB_EXCLUSIONS = mips32/mfp64 mips64/mfp64 mips64r2/mfp64
+MULTILIB_EXCLUSIONS = mips32/mfp64 mips64/mfp64 mips64r2/mfp64 mips32r6/mfp64 mips64r6/mfp64
else
MULTILIB_EXCLUSIONS = !mips32r2/mfp64
endif
endif
-MULTILIB_MATCHES = EL=mel EB=meb
+MULTILIB_MATCHES = EL=mel EB=meb mips32r2=mips32r3 mips32r2=mips32r5 mips64r2=mips64r3 mips64r2=mips64r5
diff --git a/gcc-4.9/gcc/config/mips/t-linux-android b/gcc-4.9/gcc/config/mips/t-linux-android
index 298cad9d5..00b4e3ac2 100644
--- a/gcc-4.9/gcc/config/mips/t-linux-android
+++ b/gcc-4.9/gcc/config/mips/t-linux-android
@@ -1,3 +1,3 @@
-MULTILIB_OPTIONS = mips32r2
-MULTILIB_DIRNAMES = mips-r2
+MULTILIB_OPTIONS = mips32r2 mips32r6
+MULTILIB_DIRNAMES = mips-r2 mips-r6
MULTILIB_EXCLUSIONS :=
diff --git a/gcc-4.9/gcc/config/mips/t-mti-elf b/gcc-4.9/gcc/config/mips/t-mti-elf
index 1109ea716..75cf0da77 100644
--- a/gcc-4.9/gcc/config/mips/t-mti-elf
+++ b/gcc-4.9/gcc/config/mips/t-mti-elf
@@ -19,9 +19,9 @@
# The default build is mips32r2, hard-float big-endian. Add mips32,
# soft-float, and little-endian variations.
-MULTILIB_OPTIONS = mips32/mips64/mips64r2 mips16/mmicromips mabi=64 EL msoft-float/mfp64 mnan=2008
-MULTILIB_DIRNAMES = mips32 mips64 mips64r2 mips16 micromips 64 el sof fp64 nan2008
-MULTILIB_MATCHES = EL=mel EB=meb
+MULTILIB_OPTIONS = mips64r2 mmicromips mabi=64 EL msoft-float mnan=2008
+MULTILIB_DIRNAMES = mips64r2 micromips 64 el sof nan2008
+MULTILIB_MATCHES = EL=mel EB=meb mips32r2=mips32r3 mips32r2=mips32r5 mips64r2=mips64r3 mips64r2=mips64r5
# The 64 bit ABI is not supported on the mips32 architecture.
MULTILIB_EXCEPTIONS += *mips32*/*mabi=64*
@@ -44,7 +44,9 @@ MULTILIB_EXCEPTIONS += *mmicromips/mabi=64*
# We do not want nan2008 libraries for soft-float.
MULTILIB_EXCEPTIONS += *msoft-float*/*mnan=2008*
-# -mfp64 libraries are only built for mips32r2 and not in mips16 mode.
-MULTILIB_EXCEPTIONS += *mips32/*mfp64*
-MULTILIB_EXCEPTIONS += *mips64*/*mfp64*
-MULTILIB_EXCEPTIONS += *mips16*/*mfp64*
+# Cutbacks for released build
+MULTILIB_EXCEPTIONS += *mips64*/*mnan=2008*
+MULTILIB_EXCEPTIONS += *micromips/EL
+MULTILIB_EXCEPTIONS += *micromips/msoft-float
+MULTILIB_EXCEPTIONS += *micromips/mnan=2008*
+MULTILIB_EXCEPTIONS += *micromips
diff --git a/gcc-4.9/gcc/config/mips/t-mti-linux b/gcc-4.9/gcc/config/mips/t-mti-linux
index 1109ea716..850604304 100644
--- a/gcc-4.9/gcc/config/mips/t-mti-linux
+++ b/gcc-4.9/gcc/config/mips/t-mti-linux
@@ -19,9 +19,9 @@
# The default build is mips32r2, hard-float big-endian. Add mips32,
# soft-float, and little-endian variations.
-MULTILIB_OPTIONS = mips32/mips64/mips64r2 mips16/mmicromips mabi=64 EL msoft-float/mfp64 mnan=2008
-MULTILIB_DIRNAMES = mips32 mips64 mips64r2 mips16 micromips 64 el sof fp64 nan2008
-MULTILIB_MATCHES = EL=mel EB=meb
+MULTILIB_OPTIONS = muclibc mips64r2 mmicromips mabi=64 EL mnan=2008
+MULTILIB_DIRNAMES = uclibc mips64r2 micromips 64 el nan2008
+MULTILIB_MATCHES = EL=mel EB=meb mips32r2=mips32r3 mips32r2=mips32r5 mips64r2=mips64r3 mips64r2=mips64r5
# The 64 bit ABI is not supported on the mips32 architecture.
MULTILIB_EXCEPTIONS += *mips32*/*mabi=64*
@@ -44,7 +44,11 @@ MULTILIB_EXCEPTIONS += *mmicromips/mabi=64*
# We do not want nan2008 libraries for soft-float.
MULTILIB_EXCEPTIONS += *msoft-float*/*mnan=2008*
-# -mfp64 libraries are only built for mips32r2 and not in mips16 mode.
-MULTILIB_EXCEPTIONS += *mips32/*mfp64*
-MULTILIB_EXCEPTIONS += *mips64*/*mfp64*
-MULTILIB_EXCEPTIONS += *mips16*/*mfp64*
+# Cutbacks for released build
+MULTILIB_EXCEPTIONS += *uclibc*/*mips64*
+MULTILIB_EXCEPTIONS += *uclibc*/*mabi=64*
+MULTILIB_EXCEPTIONS += *uclibc*/*micromips*
+MULTILIB_EXCEPTIONS += *mips64*/*mnan=2008*
+MULTILIB_EXCEPTIONS += *micromips/EL
+MULTILIB_EXCEPTIONS += *micromips/mnan=2008*
+MULTILIB_EXCEPTIONS += *micromips
diff --git a/gcc-4.9/gcc/config/mips/t-sde b/gcc-4.9/gcc/config/mips/t-sde
index 229e3d644..c04b5f347 100644
--- a/gcc-4.9/gcc/config/mips/t-sde
+++ b/gcc-4.9/gcc/config/mips/t-sde
@@ -18,7 +18,7 @@
MULTILIB_OPTIONS = EL/EB mips32/mips32r2/mips64/mips64r2 mips16/mmicromips msoft-float/mfp64 mcode-readable=no
MULTILIB_DIRNAMES = el eb mips32 mips32r2 mips64 mips64r2 mips16 micromips sof f64 spram
-MULTILIB_MATCHES = EL=mel EB=meb
+MULTILIB_MATCHES = EL=mel EB=meb mips32r2=mips32r3 mips32r2=mips32r5 mips64r2=mips64r3 mips64r2=mips64r5
# The -mfp64 option is only valid in conjunction with -mips32r2.
ifneq ($(filter MIPS_ISA_DEFAULT=33,$(tm_defines)),)
diff --git a/gcc-4.9/gcc/config/mips/t-sdemtk b/gcc-4.9/gcc/config/mips/t-sdemtk
index 820faa305..2c1dea804 100644
--- a/gcc-4.9/gcc/config/mips/t-sdemtk
+++ b/gcc-4.9/gcc/config/mips/t-sdemtk
@@ -21,6 +21,7 @@
MULTILIB_OPTIONS = EL/EB mips32/mips32r2/mips64/mips64r2 mips16 msoft-float/mno-float/mfp64
MULTILIB_DIRNAMES = el eb mips32 mips32r2 mips64 mips64r2 mips16 sof nof f64
+MULTILIB_MATCHES = mips32r2=mips32r3 mips32r2=mips32r5 mips64r2=mips64r3 mips64r2=mips64r5
# Remove stdarg.h and stddef.h from USER_H.
USER_H = $(srcdir)/ginclude/float.h \
diff --git a/gcc-4.9/gcc/configure b/gcc-4.9/gcc/configure
index 5408d8fbe..0df58deb1 100755
--- a/gcc-4.9/gcc/configure
+++ b/gcc-4.9/gcc/configure
@@ -26118,6 +26118,41 @@ $as_echo "#define HAVE_AS_GNU_ATTRIBUTE 1" >>confdefs.h
fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .module support" >&5
+$as_echo_n "checking assembler for .module support... " >&6; }
+if test "${gcc_cv_as_mips_module+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ gcc_cv_as_mips_module=no
+ if test x$gcc_cv_as != x; then
+ $as_echo '.module fp=32' > conftest.s
+ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5'
+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }
+ then
+ gcc_cv_as_mips_module=yes
+ else
+ echo "configure: failed program was" >&5
+ cat conftest.s >&5
+ fi
+ rm -f conftest.o conftest.s
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_mips_module" >&5
+$as_echo "$gcc_cv_as_mips_module" >&6; }
+if test $gcc_cv_as_mips_module = yes; then
+
+$as_echo "#define HAVE_AS_MODULE 1" >>confdefs.h
+
+fi
+ if test x$gcc_cv_as_mips_module = xno \
+ && test x$with_fp != x; then
+ as_fn_error "Requesting --with-fp= requires assembler support for .module." "$LINENO" 5
+ fi
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .micromips support" >&5
$as_echo_n "checking assembler for .micromips support... " >&6; }
if test "${gcc_cv_as_micromips_support+set}" = set; then :
diff --git a/gcc-4.9/gcc/configure.ac b/gcc-4.9/gcc/configure.ac
index 011ec822e..a130db62e 100644
--- a/gcc-4.9/gcc/configure.ac
+++ b/gcc-4.9/gcc/configure.ac
@@ -4230,6 +4230,17 @@ LCF0:
[AC_DEFINE(HAVE_AS_GNU_ATTRIBUTE, 1,
[Define if your assembler supports .gnu_attribute.])])
+ gcc_GAS_CHECK_FEATURE([.module support],
+ gcc_cv_as_mips_module,,,
+ [.module fp=32],,
+ [AC_DEFINE(HAVE_AS_MODULE, 1,
+ [Define if yout assembler supports .module.])])
+ if test x$gcc_cv_as_mips_module = xno \
+ && test x$with_fp != x; then
+ AC_MSG_ERROR(
+ [Requesting --with-fp= requires assembler support for .module.])
+ fi
+
gcc_GAS_CHECK_FEATURE([.micromips support],
gcc_cv_as_micromips_support,,[--fatal-warnings],
[.set micromips],,
diff --git a/gcc-4.9/gcc/doc/extend.texi b/gcc-4.9/gcc/doc/extend.texi
index 9780d9238..169857fc9 100644
--- a/gcc-4.9/gcc/doc/extend.texi
+++ b/gcc-4.9/gcc/doc/extend.texi
@@ -9122,6 +9122,7 @@ instructions, but allow the compiler to schedule those calls.
* MIPS DSP Built-in Functions::
* MIPS Paired-Single Support::
* MIPS Loongson Built-in Functions::
+* MIPS SIMD Architecture Functions::
* Other MIPS Built-in Functions::
* MSP430 Built-in Functions::
* NDS32 Built-in Functions::
@@ -12259,6 +12260,8 @@ value is the upper one. The opposite order applies to big-endian targets.
For example, the code above sets the lower half of @code{a} to
@code{1.5} on little-endian targets and @code{9.1} on big-endian targets.
+
+
@node MIPS Loongson Built-in Functions
@subsection MIPS Loongson Built-in Functions
@@ -12385,6 +12388,786 @@ int16x4_t punpcklhw_s (int16x4_t s, int16x4_t t);
int8x8_t punpcklbh_s (int8x8_t s, int8x8_t t);
@end smallexample
+@node MIPS SIMD Architecture Functions
+@subsection MIPS SIMD Architecture Functions
+
+GCC provides intrinsics to access the SIMD instructions provided by the
+MSA MIPS SIMD Architecture. The interface is made available by
+including @code{<msa.h>} and using @option{-mmsa -mhart-float -mfp64 -mnan=2008}
+
+@itemize
+@item @code{v16i8}, a vector of sixteen signed 8-bit integers;
+@item @code{v16u8}, a vector of sixteen unsigned 8-bit integers;
+@item @code{v8i16}, a vector of eight signed 16-bit integers;
+@item @code{v8u16}, a vector of eight unsigned 16-bit integers;
+@item @code{v4i32}, a vector of four signed 32-bit integers;
+@item @code{v4u32}, a vector of four unsigned 32-bit integers;
+@item @code{v2i64}, a vector of two signed 64-bit integers;
+@item @code{v2u64}, a vector of two unsigned 64-bit integers;
+@item @code{v4f32}, a vector of four 32-bit floats;
+@item @code{v2f64}, a vector of two 64-bit doubles.
+@end itemize
+
+@itemize
+@item @code{imm0_1}, an integer literal in range 0 to 1;
+@item @code{imm0_3}, an integer literal in range 0 to 3;
+@item @code{imm0_7}, an integer literal in range 0 to 7;
+@item @code{imm0_15}, an integer literal in range 0 to 15;
+@item @code{imm0_31}, an integer literal in range 0 to 31;
+@item @code{imm0_63}, an integer literal in range 0 to 63;
+@item @code{imm0_255}, an integer literal in range 0 to 255;
+@item @code{imm_n16_15}, an integer literal in range -16 to 15;
+@item @code{imm_n512_511}, an integer literal in range -512 to 511;
+@item @code{imm_n1024_1022}, an integer literal in range -512 to 511 left shifted by 1 bit, i.e., -1024, -1022, @dots{}, 1020, 1022;
+@item @code{imm_n2048_2044}, an integer literal in range -512 to 511 left shifted by 2 bits, i.e., -2048, -2044, @dots{}, 2040, 2044;
+@item @code{imm_n4096_4088}, an integer literal in range -512 to 511 left shifted by 3 bits, i.e., -4096, -4088, @dots{}, 4080, 4088;
+@item @code{imm1_4}, an integer literal in range 1 to 4.
+@end itemize
+
+@smallexample
+@{
+typedef int i32;
+#if __LONG_MAX__ == __LONG_LONG_MAX__
+typedef long i64;
+#else
+typedef long long i64;
+#endif
+
+typedef unsigned int u32;
+#if __LONG_MAX__ == __LONG_LONG_MAX__
+typedef unsigned long u64;
+#else
+typedef unsigned long long u64;
+#endif
+
+typedef double f64;
+typedef float f32;
+@}
+@end smallexample
+
+The intrinsics provided are listed below; each is named after the
+machine instruction.
+
+@smallexample
+v16i8 __builtin_msa_add_a_b (v16i8, v16i8);
+v8i16 __builtin_msa_add_a_h (v8i16, v8i16);
+v4i32 __builtin_msa_add_a_w (v4i32, v4i32);
+v2i64 __builtin_msa_add_a_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_adds_a_b (v16i8, v16i8);
+v8i16 __builtin_msa_adds_a_h (v8i16, v8i16);
+v4i32 __builtin_msa_adds_a_w (v4i32, v4i32);
+v2i64 __builtin_msa_adds_a_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_adds_s_b (v16i8, v16i8);
+v8i16 __builtin_msa_adds_s_h (v8i16, v8i16);
+v4i32 __builtin_msa_adds_s_w (v4i32, v4i32);
+v2i64 __builtin_msa_adds_s_d (v2i64, v2i64);
+
+v16u8 __builtin_msa_adds_u_b (v16u8, v16u8);
+v8u16 __builtin_msa_adds_u_h (v8u16, v8u16);
+v4u32 __builtin_msa_adds_u_w (v4u32, v4u32);
+v2u64 __builtin_msa_adds_u_d (v2u64, v2u64);
+
+v16i8 __builtin_msa_addv_b (v16i8, v16i8);
+v8i16 __builtin_msa_addv_h (v8i16, v8i16);
+v4i32 __builtin_msa_addv_w (v4i32, v4i32);
+v2i64 __builtin_msa_addv_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_addvi_b (v16i8, imm0_31);
+v8i16 __builtin_msa_addvi_h (v8i16, imm0_31);
+v4i32 __builtin_msa_addvi_w (v4i32, imm0_31);
+v2i64 __builtin_msa_addvi_d (v2i64, imm0_31);
+
+v16u8 __builtin_msa_and_v (v16u8, v16u8);
+
+v16u8 __builtin_msa_andi_b (v16u8, imm0_255);
+
+v16i8 __builtin_msa_asub_s_b (v16i8, v16i8);
+v8i16 __builtin_msa_asub_s_h (v8i16, v8i16);
+v4i32 __builtin_msa_asub_s_w (v4i32, v4i32);
+v2i64 __builtin_msa_asub_s_d (v2i64, v2i64);
+
+v16u8 __builtin_msa_asub_u_b (v16u8, v16u8);
+v8u16 __builtin_msa_asub_u_h (v8u16, v8u16);
+v4u32 __builtin_msa_asub_u_w (v4u32, v4u32);
+v2u64 __builtin_msa_asub_u_d (v2u64, v2u64);
+
+v16i8 __builtin_msa_ave_s_b (v16i8, v16i8);
+v8i16 __builtin_msa_ave_s_h (v8i16, v8i16);
+v4i32 __builtin_msa_ave_s_w (v4i32, v4i32);
+v2i64 __builtin_msa_ave_s_d (v2i64, v2i64);
+
+v16u8 __builtin_msa_ave_u_b (v16u8, v16u8);
+v8u16 __builtin_msa_ave_u_h (v8u16, v8u16);
+v4u32 __builtin_msa_ave_u_w (v4u32, v4u32);
+v2u64 __builtin_msa_ave_u_d (v2u64, v2u64);
+
+16i8 __builtin_msa_aver_s_b (v16i8, v16i8);
+8i16 __builtin_msa_aver_s_h (v8i16, v8i16);
+4i32 __builtin_msa_aver_s_w (v4i32, v4i32);
+2i64 __builtin_msa_aver_s_d (v2i64, v2i64);
+
+v16u8 __builtin_msa_aver_u_b (v16u8, v16u8);
+v8u16 __builtin_msa_aver_u_h (v8u16, v8u16);
+v4u32 __builtin_msa_aver_u_w (v4u32, v4u32);
+v2u64 __builtin_msa_aver_u_d (v2u64, v2u64);
+
+v16u8 __builtin_msa_bclr_b (v16u8, v16u8);
+v8u16 __builtin_msa_bclr_h (v8u16, v8u16);
+4u32 __builtin_msa_bclr_w (v4u32, v4u32);
+v2u64 __builtin_msa_bclr_d (v2u64, v2u64);
+
+v16u8 __builtin_msa_bclri_b (v16u8, imm0_7);
+v8u16 __builtin_msa_bclri_h (v8u16, imm0_15);
+v4u32 __builtin_msa_bclri_w (v4u32, imm0_31);
+v2u64 __builtin_msa_bclri_d (v2u64, imm0_63);
+
+v16u8 __builtin_msa_binsl_b (v16u8, v16u8, v16u8);
+v8u16 __builtin_msa_binsl_h (v8u16, v8u16, v8u16);
+v4u32 __builtin_msa_binsl_w (v4u32, v4u32, v4u32);
+v2u64 __builtin_msa_binsl_d (v2u64, v2u64, v2u64);
+
+v16u8 __builtin_msa_binsli_b (v16u8, v16u8, imm0_7);
+v8u16 __builtin_msa_binsli_h (v8u16, v8u16, imm0_15);
+v4u32 __builtin_msa_binsli_w (v4u32, v4u32, imm0_31);
+v2u64 __builtin_msa_binsli_d (v2u64, v2u64, imm0_63);
+
+v16u8 __builtin_msa_binsr_b (v16u8, v16u8, v16u8);
+v8u16 __builtin_msa_binsr_h (v8u16, v8u16, v8u16);
+v4u32 __builtin_msa_binsr_w (v4u32, v4u32, v4u32);
+v2u64 __builtin_msa_binsr_d (v2u64, v2u64, v2u64);
+
+v16u8 __builtin_msa_binsri_b (v16u8, v16u8, imm0_7);
+v8u16 __builtin_msa_binsri_h (v8u16, v8u16, imm0_15);
+v4u32 __builtin_msa_binsri_w (v4u32, v4u32, imm0_31);
+v2u64 __builtin_msa_binsri_d (v2u64, v2u64, imm0_63);
+
+v16u8 __builtin_msa_bmnz_v (v16u8, v16u8, v16u8);
+
+v16u8 __builtin_msa_bmnzi_b (v16u8, v16u8, imm0_255);
+
+v16u8 __builtin_msa_bmz_v (v16u8, v16u8, v16u8);
+
+v16u8 __builtin_msa_bmzi_b (v16u8, v16u8, imm0_255);
+
+v16u8 __builtin_msa_bneg_b (v16u8, v16u8);
+v8u16 __builtin_msa_bneg_h (v8u16, v8u16);
+v4u32 __builtin_msa_bneg_w (v4u32, v4u32);
+v2u64 __builtin_msa_bneg_d (v2u64, v2u64);
+
+v16u8 __builtin_msa_bnegi_b (v16u8, imm0_7);
+v8u16 __builtin_msa_bnegi_h (v8u16, imm0_15);
+v4u32 __builtin_msa_bnegi_w (v4u32, imm0_31);
+v2u64 __builtin_msa_bnegi_d (v2u64, imm0_63);
+
+i32 __builtin_msa_bnz_b (v16u8);
+i32 __builtin_msa_bnz_h (v8u16);
+i32 __builtin_msa_bnz_w (v4u32);
+i32 __builtin_msa_bnz_d (v2u64);
+
+i32 __builtin_msa_bnz_v (v16u8);
+
+v16u8 __builtin_msa_bsel_v (v16u8, v16u8, v16u8);
+
+v16u8 __builtin_msa_bseli_b (v16u8, v16u8, imm0_255);
+
+v16u8 __builtin_msa_bset_b (v16u8, v16u8);
+v8u16 __builtin_msa_bset_h (v8u16, v8u16);
+v4u32 __builtin_msa_bset_w (v4u32, v4u32);
+v2u64 __builtin_msa_bset_d (v2u64, v2u64);
+
+v16u8 __builtin_msa_bseti_b (v16u8, imm0_7);
+v8u16 __builtin_msa_bseti_h (v8u16, imm0_15);
+v4u32 __builtin_msa_bseti_w (v4u32, imm0_31);
+v2u64 __builtin_msa_bseti_d (v2u64, imm0_63);
+
+i32 __builtin_msa_bz_b (v16u8);
+i32 __builtin_msa_bz_h (v8u16);
+i32 __builtin_msa_bz_w (v4u32);
+i32 __builtin_msa_bz_d (v2u64);
+
+i32 __builtin_msa_bz_v (v16u8);
+
+v16i8 __builtin_msa_ceq_b (v16i8, v16i8);
+v8i16 __builtin_msa_ceq_h (v8i16, v8i16);
+v4i32 __builtin_msa_ceq_w (v4i32, v4i32);
+v2i64 __builtin_msa_ceq_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_ceqi_b (v16i8, imm_n16_15);
+v8i16 __builtin_msa_ceqi_h (v8i16, imm_n16_15);
+v4i32 __builtin_msa_ceqi_w (v4i32, imm_n16_15);
+v2i64 __builtin_msa_ceqi_d (v2i64, imm_n16_15);
+
+i32 __builtin_msa_cfcmsa (imm0_31);
+
+v16i8 __builtin_msa_cle_s_b (v16i8, v16i8);
+v8i16 __builtin_msa_cle_s_h (v8i16, v8i16);
+v4i32 __builtin_msa_cle_s_w (v4i32, v4i32);
+v2i64 __builtin_msa_cle_s_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_cle_u_b (v16u8, v16u8);
+v8i16 __builtin_msa_cle_u_h (v8u16, v8u16);
+v4i32 __builtin_msa_cle_u_w (v4u32, v4u32);
+v2i64 __builtin_msa_cle_u_d (v2u64, v2u64);
+
+v16i8 __builtin_msa_clei_s_b (v16i8, imm_n16_15);
+v8i16 __builtin_msa_clei_s_h (v8i16, imm_n16_15);
+v4i32 __builtin_msa_clei_s_w (v4i32, imm_n16_15);
+v2i64 __builtin_msa_clei_s_d (v2i64, imm_n16_15);
+
+v16i8 __builtin_msa_clei_u_b (v16u8, imm0_31);
+v8i16 __builtin_msa_clei_u_h (v8u16, imm0_31);
+v4i32 __builtin_msa_clei_u_w (v4u32, imm0_31);
+v2i64 __builtin_msa_clei_u_d (v2u64, imm0_31);
+
+v16i8 __builtin_msa_clt_s_b (v16i8, v16i8);
+v8i16 __builtin_msa_clt_s_h (v8i16, v8i16);
+v4i32 __builtin_msa_clt_s_w (v4i32, v4i32);
+v2i64 __builtin_msa_clt_s_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_clt_u_b (v16u8, v16u8);
+v8i16 __builtin_msa_clt_u_h (v8u16, v8u16);
+v4i32 __builtin_msa_clt_u_w (v4u32, v4u32);
+v2i64 __builtin_msa_clt_u_d (v2u64, v2u64);
+
+v16i8 __builtin_msa_clti_s_b (v16i8, imm_n16_15);
+v8i16 __builtin_msa_clti_s_h (v8i16, imm_n16_15);
+v4i32 __builtin_msa_clti_s_w (v4i32, imm_n16_15);
+v2i64 __builtin_msa_clti_s_d (v2i64, imm_n16_15);
+
+v16i8 __builtin_msa_clti_u_b (v16u8, imm0_31);
+v8i16 __builtin_msa_clti_u_h (v8u16, imm0_31);
+v4i32 __builtin_msa_clti_u_w (v4u32, imm0_31);
+v2i64 __builtin_msa_clti_u_d (v2u64, imm0_31);
+
+i32 __builtin_msa_copy_s_b (v16i8, imm0_15);
+i32 __builtin_msa_copy_s_h (v8i16, imm0_7);
+i32 __builtin_msa_copy_s_w (v4i32, imm0_3);
+i64 __builtin_msa_copy_s_d (v2i64, imm0_1);
+
+u32 __builtin_msa_copy_u_b (v16i8, imm0_15);
+u32 __builtin_msa_copy_u_h (v8i16, imm0_7);
+u32 __builtin_msa_copy_u_w (v4i32, imm0_3);
+u64 __builtin_msa_copy_u_d (v2i64, imm0_1);
+
+void __builtin_msa_ctcmsa (imm0_31, i32);
+
+v16i8 __builtin_msa_div_s_b (v16i8, v16i8);
+v8i16 __builtin_msa_div_s_h (v8i16, v8i16);
+v4i32 __builtin_msa_div_s_w (v4i32, v4i32);
+v2i64 __builtin_msa_div_s_d (v2i64, v2i64);
+
+v16u8 __builtin_msa_div_u_b (v16u8, v16u8);
+v8u16 __builtin_msa_div_u_h (v8u16, v8u16);
+v4u32 __builtin_msa_div_u_w (v4u32, v4u32);
+v2u64 __builtin_msa_div_u_d (v2u64, v2u64);
+
+v8i16 __builtin_msa_dotp_s_h (v16i8, v16i8);
+v4i32 __builtin_msa_dotp_s_w (v8i16, v8i16);
+v2i64 __builtin_msa_dotp_s_d (v4i32, v4i32);
+
+v8u16 __builtin_msa_dotp_u_h (v16u8, v16u8);
+v4u32 __builtin_msa_dotp_u_w (v8u16, v8u16);
+v2u64 __builtin_msa_dotp_u_d (v4u32, v4u32);
+
+v8i16 __builtin_msa_dpadd_s_h (v8i16, v16i8, v16i8);
+v4i32 __builtin_msa_dpadd_s_w (v4i32, v8i16, v8i16);
+v2i64 __builtin_msa_dpadd_s_d (v2i64, v4i32, v4i32);
+
+v8u16 __builtin_msa_dpadd_u_h (v8u16, v16u8, v16u8);
+v4u32 __builtin_msa_dpadd_u_w (v4u32, v8u16, v8u16);
+v2u64 __builtin_msa_dpadd_u_d (v2u64, v4u32, v4u32);
+
+v8i16 __builtin_msa_dpsub_s_h (v8i16, v16i8, v16i8);
+v4i32 __builtin_msa_dpsub_s_w (v4i32, v8i16, v8i16);
+v2i64 __builtin_msa_dpsub_s_d (v2i64, v4i32, v4i32);
+
+v8i16 __builtin_msa_dpsub_u_h (v8i16, v16u8, v16u8);
+v4i32 __builtin_msa_dpsub_u_w (v4i32, v8u16, v8u16);
+v2i64 __builtin_msa_dpsub_u_d (v2i64, v4u32, v4u32);
+
+v4f32 __builtin_msa_fadd_w (v4f32, v4f32);
+v2f64 __builtin_msa_fadd_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fcaf_w (v4f32, v4f32);
+v2i64 __builtin_msa_fcaf_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fceq_w (v4f32, v4f32);
+v2i64 __builtin_msa_fceq_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fclass_w (v4f32);
+v2i64 __builtin_msa_fclass_d (v2f64);
+
+v4i32 __builtin_msa_fcle_w (v4f32, v4f32);
+v2i64 __builtin_msa_fcle_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fclt_w (v4f32, v4f32);
+v2i64 __builtin_msa_fclt_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fcne_w (v4f32, v4f32);
+v2i64 __builtin_msa_fcne_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fcor_w (v4f32, v4f32);
+v2i64 __builtin_msa_fcor_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fcueq_w (v4f32, v4f32);
+v2i64 __builtin_msa_fcueq_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fcule_w (v4f32, v4f32);
+v2i64 __builtin_msa_fcule_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fcult_w (v4f32, v4f32);
+v2i64 __builtin_msa_fcult_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fcun_w (v4f32, v4f32);
+v2i64 __builtin_msa_fcun_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fcune_w (v4f32, v4f32);
+v2i64 __builtin_msa_fcune_d (v2f64, v2f64);
+
+v4f32 __builtin_msa_fdiv_w (v4f32, v4f32);
+v2f64 __builtin_msa_fdiv_d (v2f64, v2f64);
+
+v8i16 __builtin_msa_fexdo_h (v4f32, v4f32);
+v4f32 __builtin_msa_fexdo_w (v2f64, v2f64);
+
+v4f32 __builtin_msa_fexp2_w (v4f32, v4i32);
+v2f64 __builtin_msa_fexp2_d (v2f64, v2i64);
+
+v4f32 __builtin_msa_fexupl_w (v8i16);
+v2f64 __builtin_msa_fexupl_d (v4f32);
+
+v4f32 __builtin_msa_fexupr_w (v8i16);
+v2f64 __builtin_msa_fexupr_d (v4f32);
+
+v4f32 __builtin_msa_ffint_s_w (v4i32);
+v2f64 __builtin_msa_ffint_s_d (v2i64);
+
+v4f32 __builtin_msa_ffint_u_w (v4u32);
+v2f64 __builtin_msa_ffint_u_d (v2u64);
+
+v4f32 __builtin_msa_ffql_w (v8i16);
+v2f64 __builtin_msa_ffql_d (v4i32);
+
+v4f32 __builtin_msa_ffqr_w (v8i16);
+v2f64 __builtin_msa_ffqr_d (v4i32);
+
+v16i8 __builtin_msa_fill_b (i32);
+v8i16 __builtin_msa_fill_h (i32);
+v4i32 __builtin_msa_fill_w (i32);
+v2i64 __builtin_msa_fill_d (i64);
+
+v4f32 __builtin_msa_flog2_w (v4f32);
+v2f64 __builtin_msa_flog2_d (v2f64);
+
+v4f32 __builtin_msa_fmadd_w (v4f32, v4f32, v4f32);
+v2f64 __builtin_msa_fmadd_d (v2f64, v2f64, v2f64);
+
+v4f32 __builtin_msa_fmax_w (v4f32, v4f32);
+v2f64 __builtin_msa_fmax_d (v2f64, v2f64);
+
+v4f32 __builtin_msa_fmax_a_w (v4f32, v4f32);
+v2f64 __builtin_msa_fmax_a_d (v2f64, v2f64);
+
+v4f32 __builtin_msa_fmin_w (v4f32, v4f32);
+v2f64 __builtin_msa_fmin_d (v2f64, v2f64);
+
+v4f32 __builtin_msa_fmin_a_w (v4f32, v4f32);
+v2f64 __builtin_msa_fmin_a_d (v2f64, v2f64);
+
+v4f32 __builtin_msa_fmsub_w (v4f32, v4f32, v4f32);
+v2f64 __builtin_msa_fmsub_d (v2f64, v2f64, v2f64);
+
+v4f32 __builtin_msa_fmul_w (v4f32, v4f32);
+v2f64 __builtin_msa_fmul_d (v2f64, v2f64);
+
+v4f32 __builtin_msa_frint_w (v4f32);
+v2f64 __builtin_msa_frint_d (v2f64);
+
+v4f32 __builtin_msa_frcp_w (v4f32);
+v2f64 __builtin_msa_frcp_d (v2f64);
+
+v4f32 __builtin_msa_frsqrt_w (v4f32);
+v2f64 __builtin_msa_frsqrt_d (v2f64);
+
+v4i32 __builtin_msa_fsaf_w (v4f32, v4f32);
+v2i64 __builtin_msa_fsaf_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fseq_w (v4f32, v4f32);
+v2i64 __builtin_msa_fseq_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fsle_w (v4f32, v4f32);
+v2i64 __builtin_msa_fsle_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fslt_w (v4f32, v4f32);
+v2i64 __builtin_msa_fslt_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fsne_w (v4f32, v4f32);
+v2i64 __builtin_msa_fsne_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fsor_w (v4f32, v4f32);
+v2i64 __builtin_msa_fsor_d (v2f64, v2f64);
+
+v4f32 __builtin_msa_fsqrt_w (v4f32);
+v2f64 __builtin_msa_fsqrt_d (v2f64);
+
+v4f32 __builtin_msa_fsub_w (v4f32, v4f32);
+v2f64 __builtin_msa_fsub_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fsueq_w (v4f32, v4f32);
+v2i64 __builtin_msa_fsueq_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fsule_w (v4f32, v4f32);
+v2i64 __builtin_msa_fsule_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fsult_w (v4f32, v4f32);
+v2i64 __builtin_msa_fsult_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fsun_w (v4f32, v4f32);
+v2i64 __builtin_msa_fsun_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_fsune_w (v4f32, v4f32);
+v2i64 __builtin_msa_fsune_d (v2f64, v2f64);
+
+v4i32 __builtin_msa_ftint_s_w (v4f32);
+v2i64 __builtin_msa_ftint_s_d (v2f64);
+
+v4u32 __builtin_msa_ftint_u_w (v4f32);
+v2u64 __builtin_msa_ftint_u_d (v2f64);
+
+v8i16 __builtin_msa_ftq_h (v4f32, v4f32);
+v4i32 __builtin_msa_ftq_w (v2f64, v2f64);
+
+v4i32 __builtin_msa_ftrunc_s_w (v4f32);
+v2i64 __builtin_msa_ftrunc_s_d (v2f64);
+
+v4u32 __builtin_msa_ftrunc_u_w (v4f32);
+v2u64 __builtin_msa_ftrunc_u_d (v2f64);
+
+v8i16 __builtin_msa_hadd_s_h (v16i8, v16i8);
+v4i32 __builtin_msa_hadd_s_w (v8i16, v8i16);
+v2i64 __builtin_msa_hadd_s_d (v4i32, v4i32);
+
+v8u16 __builtin_msa_hadd_u_h (v16u8, v16u8);
+v4u32 __builtin_msa_hadd_u_w (v8u16, v8u16);
+v2u64 __builtin_msa_hadd_u_d (v4u32, v4u32);
+
+v8i16 __builtin_msa_hsub_s_h (v16i8, v16i8);
+v4i32 __builtin_msa_hsub_s_w (v8i16, v8i16);
+v2i64 __builtin_msa_hsub_s_d (v4i32, v4i32);
+
+v8i16 __builtin_msa_hsub_u_h (v16u8, v16u8);
+v4i32 __builtin_msa_hsub_u_w (v8u16, v8u16);
+v2i64 __builtin_msa_hsub_u_d (v4u32, v4u32);
+
+v16i8 __builtin_msa_ilvev_b (v16i8, v16i8);
+v8i16 __builtin_msa_ilvev_h (v8i16, v8i16);
+v4i32 __builtin_msa_ilvev_w (v4i32, v4i32);
+v2i64 __builtin_msa_ilvev_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_ilvl_b (v16i8, v16i8);
+v8i16 __builtin_msa_ilvl_h (v8i16, v8i16);
+v4i32 __builtin_msa_ilvl_w (v4i32, v4i32);
+v2i64 __builtin_msa_ilvl_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_ilvod_b (v16i8, v16i8);
+v8i16 __builtin_msa_ilvod_h (v8i16, v8i16);
+v4i32 __builtin_msa_ilvod_w (v4i32, v4i32);
+v2i64 __builtin_msa_ilvod_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_ilvr_b (v16i8, v16i8);
+v8i16 __builtin_msa_ilvr_h (v8i16, v8i16);
+v4i32 __builtin_msa_ilvr_w (v4i32, v4i32);
+v2i64 __builtin_msa_ilvr_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_insert_b (v16i8, imm0_15, i32);
+v8i16 __builtin_msa_insert_h (v8i16, imm0_7, i32);
+v4i32 __builtin_msa_insert_w (v4i32, imm0_3, i32);
+v2i64 __builtin_msa_insert_d (v2i64, imm0_1, i64);
+
+v16i8 __builtin_msa_insve_b (v16i8, imm0_15, v16i8);
+v8i16 __builtin_msa_insve_h (v8i16, imm0_7, v8i16);
+v4i32 __builtin_msa_insve_w (v4i32, imm0_3, v4i32);
+v2i64 __builtin_msa_insve_d (v2i64, imm0_1, v2i64);
+
+v16i8 __builtin_msa_ld_b (void *, imm_n512_511);
+v8i16 __builtin_msa_ld_h (void *, imm_n1024_1022);
+v4i32 __builtin_msa_ld_w (void *, imm_n2048_2044);
+v2i64 __builtin_msa_ld_d (void *, imm_n4096_4088);
+
+v16i8 __builtin_msa_ldi_b (imm_n512_511);
+v8i16 __builtin_msa_ldi_h (imm_n512_511);
+v4i32 __builtin_msa_ldi_w (imm_n512_511);
+v2i64 __builtin_msa_ldi_d (imm_n512_511);
+
+v8i16 __builtin_msa_madd_q_h (v8i16, v8i16, v8i16);
+v4i32 __builtin_msa_madd_q_w (v4i32, v4i32, v4i32);
+
+v8i16 __builtin_msa_maddr_q_h (v8i16, v8i16, v8i16);
+v4i32 __builtin_msa_maddr_q_w (v4i32, v4i32, v4i32);
+
+v16i8 __builtin_msa_maddv_b (v16i8, v16i8, v16i8);
+v8i16 __builtin_msa_maddv_h (v8i16, v8i16, v8i16);
+v4i32 __builtin_msa_maddv_w (v4i32, v4i32, v4i32);
+v2i64 __builtin_msa_maddv_d (v2i64, v2i64, v2i64);
+
+v16i8 __builtin_msa_max_a_b (v16i8, v16i8);
+v8i16 __builtin_msa_max_a_h (v8i16, v8i16);
+v4i32 __builtin_msa_max_a_w (v4i32, v4i32);
+v2i64 __builtin_msa_max_a_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_max_s_b (v16i8, v16i8);
+v8i16 __builtin_msa_max_s_h (v8i16, v8i16);
+v4i32 __builtin_msa_max_s_w (v4i32, v4i32);
+v2i64 __builtin_msa_max_s_d (v2i64, v2i64);
+
+v16u8 __builtin_msa_max_u_b (v16u8, v16u8);
+v8u16 __builtin_msa_max_u_h (v8u16, v8u16);
+v4u32 __builtin_msa_max_u_w (v4u32, v4u32);
+v2u64 __builtin_msa_max_u_d (v2u64, v2u64);
+
+v16i8 __builtin_msa_maxi_s_b (v16i8, imm_n16_15);
+v8i16 __builtin_msa_maxi_s_h (v8i16, imm_n16_15);
+v4i32 __builtin_msa_maxi_s_w (v4i32, imm_n16_15);
+v2i64 __builtin_msa_maxi_s_d (v2i64, imm_n16_15);
+
+v16u8 __builtin_msa_maxi_u_b (v16u8, imm0_31);
+v8u16 __builtin_msa_maxi_u_h (v8u16, imm0_31);
+v4u32 __builtin_msa_maxi_u_w (v4u32, imm0_31);
+v2u64 __builtin_msa_maxi_u_d (v2u64, imm0_31);
+
+v16i8 __builtin_msa_min_a_b (v16i8, v16i8);
+v8i16 __builtin_msa_min_a_h (v8i16, v8i16);
+v4i32 __builtin_msa_min_a_w (v4i32, v4i32);
+v2i64 __builtin_msa_min_a_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_min_s_b (v16i8, v16i8);
+v8i16 __builtin_msa_min_s_h (v8i16, v8i16);
+v4i32 __builtin_msa_min_s_w (v4i32, v4i32);
+v2i64 __builtin_msa_min_s_d (v2i64, v2i64);
+
+v16u8 __builtin_msa_min_u_b (v16u8, v16u8);
+v8u16 __builtin_msa_min_u_h (v8u16, v8u16);
+v4u32 __builtin_msa_min_u_w (v4u32, v4u32);
+v2u64 __builtin_msa_min_u_d (v2u64, v2u64);
+
+v16i8 __builtin_msa_mini_s_b (v16i8, imm_n16_15);
+v8i16 __builtin_msa_mini_s_h (v8i16, imm_n16_15);
+v4i32 __builtin_msa_mini_s_w (v4i32, imm_n16_15);
+v2i64 __builtin_msa_mini_s_d (v2i64, imm_n16_15);
+
+v16u8 __builtin_msa_mini_u_b (v16u8, imm0_31);
+v8u16 __builtin_msa_mini_u_h (v8u16, imm0_31);
+v4u32 __builtin_msa_mini_u_w (v4u32, imm0_31);
+v2u64 __builtin_msa_mini_u_d (v2u64, imm0_31);
+
+v16i8 __builtin_msa_mod_s_b (v16i8, v16i8);
+v8i16 __builtin_msa_mod_s_h (v8i16, v8i16);
+v4i32 __builtin_msa_mod_s_w (v4i32, v4i32);
+v2i64 __builtin_msa_mod_s_d (v2i64, v2i64);
+
+v16u8 __builtin_msa_mod_u_b (v16u8, v16u8);
+v8u16 __builtin_msa_mod_u_h (v8u16, v8u16);
+v4u32 __builtin_msa_mod_u_w (v4u32, v4u32);
+v2u64 __builtin_msa_mod_u_d (v2u64, v2u64);
+
+v16i8 __builtin_msa_move_v_b (v16i8);
+
+v8i16 __builtin_msa_msub_q_h (v8i16, v8i16, v8i16);
+v4i32 __builtin_msa_msub_q_w (v4i32, v4i32, v4i32);
+
+v8i16 __builtin_msa_msubr_q_h (v8i16, v8i16, v8i16);
+v4i32 __builtin_msa_msubr_q_w (v4i32, v4i32, v4i32);
+
+v16i8 __builtin_msa_msubv_b (v16i8, v16i8, v16i8);
+v8i16 __builtin_msa_msubv_h (v8i16, v8i16, v8i16);
+v4i32 __builtin_msa_msubv_w (v4i32, v4i32, v4i32);
+v2i64 __builtin_msa_msubv_d (v2i64, v2i64, v2i64);
+
+v8i16 __builtin_msa_mul_q_h (v8i16, v8i16);
+v4i32 __builtin_msa_mul_q_w (v4i32, v4i32);
+
+v8i16 __builtin_msa_mulr_q_h (v8i16, v8i16);
+v4i32 __builtin_msa_mulr_q_w (v4i32, v4i32);
+
+v16i8 __builtin_msa_mulv_b (v16i8, v16i8);
+v8i16 __builtin_msa_mulv_h (v8i16, v8i16);
+v4i32 __builtin_msa_mulv_w (v4i32, v4i32);
+v2i64 __builtin_msa_mulv_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_nloc_b (v16i8);
+v8i16 __builtin_msa_nloc_h (v8i16);
+v4i32 __builtin_msa_nloc_w (v4i32);
+v2i64 __builtin_msa_nloc_d (v2i64);
+
+v16i8 __builtin_msa_nlzc_b (v16i8);
+v8i16 __builtin_msa_nlzc_h (v8i16);
+v4i32 __builtin_msa_nlzc_w (v4i32);
+v2i64 __builtin_msa_nlzc_d (v2i64);
+
+v16u8 __builtin_msa_nor_v (v16u8, v16u8);
+
+v16u8 __builtin_msa_nori_b (v16u8, imm0_255);
+
+v16u8 __builtin_msa_or_v (v16u8, v16u8);
+
+v16u8 __builtin_msa_ori_b (v16u8, imm0_255);
+
+v16i8 __builtin_msa_pckev_b (v16i8, v16i8);
+v8i16 __builtin_msa_pckev_h (v8i16, v8i16);
+v4i32 __builtin_msa_pckev_w (v4i32, v4i32);
+v2i64 __builtin_msa_pckev_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_pckod_b (v16i8, v16i8);
+v8i16 __builtin_msa_pckod_h (v8i16, v8i16);
+v4i32 __builtin_msa_pckod_w (v4i32, v4i32);
+v2i64 __builtin_msa_pckod_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_pcnt_b (v16i8);
+v8i16 __builtin_msa_pcnt_h (v8i16);
+v4i32 __builtin_msa_pcnt_w (v4i32);
+v2i64 __builtin_msa_pcnt_d (v2i64);
+
+v16i8 __builtin_msa_sat_s_b (v16i8, imm0_7);
+v8i16 __builtin_msa_sat_s_h (v8i16, imm0_15);
+v4i32 __builtin_msa_sat_s_w (v4i32, imm0_31);
+v2i64 __builtin_msa_sat_s_d (v2i64, imm0_63);
+
+v16u8 __builtin_msa_sat_u_b (v16u8, imm0_7);
+v8u16 __builtin_msa_sat_u_h (v8u16, imm0_15);
+v4u32 __builtin_msa_sat_u_w (v4u32, imm0_31);
+v2u64 __builtin_msa_sat_u_d (v2u64, imm0_63);
+
+v16i8 __builtin_msa_shf_b (v16i8, imm0_255);
+
+v8i16 __builtin_msa_shf_h (v8i16, imm0_255);
+
+v4i32 __builtin_msa_shf_w (v4i32, imm0_255);
+
+v16i8 __builtin_msa_sld_b (v16i8, v16i8, i32);
+v8i16 __builtin_msa_sld_h (v8i16, v8i16, i32);
+v4i32 __builtin_msa_sld_w (v4i32, v4i32, i32);
+v2i64 __builtin_msa_sld_d (v2i64, v2i64, i32);
+
+v16i8 __builtin_msa_sldi_b (v16i8, v16i8, imm0_15);
+v8i16 __builtin_msa_sldi_h (v8i16, v8i16, imm0_7);
+v4i32 __builtin_msa_sldi_w (v4i32, v4i32, imm0_3);
+v2i64 __builtin_msa_sldi_d (v2i64, v2i64, imm0_1);
+
+v16i8 __builtin_msa_sll_b (v16i8, v16i8);
+v8i16 __builtin_msa_sll_h (v8i16, v8i16);
+v4i32 __builtin_msa_sll_w (v4i32, v4i32);
+v2i64 __builtin_msa_sll_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_slli_b (v16i8, imm0_7);
+v8i16 __builtin_msa_slli_h (v8i16, imm0_15);
+v4i32 __builtin_msa_slli_w (v4i32, imm0_31);
+v2i64 __builtin_msa_slli_d (v2i64, imm0_63);
+
+v16i8 __builtin_msa_splat_b (v16i8, i32);
+v8i16 __builtin_msa_splat_h (v8i16, i32);
+v4i32 __builtin_msa_splat_w (v4i32, i32);
+v2i64 __builtin_msa_splat_d (v2i64, i32);
+
+v16i8 __builtin_msa_splati_b (v16i8, imm0_15);
+v8i16 __builtin_msa_splati_h (v8i16, imm0_7);
+v4i32 __builtin_msa_splati_w (v4i32, imm0_3);
+v2i64 __builtin_msa_splati_d (v2i64, imm0_1);
+
+v16i8 __builtin_msa_sra_b (v16i8, v16i8);
+v8i16 __builtin_msa_sra_h (v8i16, v8i16);
+v4i32 __builtin_msa_sra_w (v4i32, v4i32);
+v2i64 __builtin_msa_sra_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_srai_b (v16i8, imm0_7);
+v8i16 __builtin_msa_srai_h (v8i16, imm0_15);
+v4i32 __builtin_msa_srai_w (v4i32, imm0_31);
+v2i64 __builtin_msa_srai_d (v2i64, imm0_63);
+
+v16i8 __builtin_msa_srar_b (v16i8, v16i8);
+v8i16 __builtin_msa_srar_h (v8i16, v8i16);
+v4i32 __builtin_msa_srar_w (v4i32, v4i32);
+v2i64 __builtin_msa_srar_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_srari_b (v16i8, imm0_7);
+v8i16 __builtin_msa_srari_h (v8i16, imm0_15);
+v4i32 __builtin_msa_srari_w (v4i32, imm0_31);
+v2i64 __builtin_msa_srari_d (v2i64, imm0_63);
+
+v16i8 __builtin_msa_srl_b (v16i8, v16i8);
+v8i16 __builtin_msa_srl_h (v8i16, v8i16);
+v4i32 __builtin_msa_srl_w (v4i32, v4i32);
+v2i64 __builtin_msa_srl_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_srli_b (v16i8, imm0_7);
+v8i16 __builtin_msa_srli_h (v8i16, imm0_15);
+v4i32 __builtin_msa_srli_w (v4i32, imm0_31);
+v2i64 __builtin_msa_srli_d (v2i64, imm0_63);
+
+v16i8 __builtin_msa_srlr_b (v16i8, v16i8);
+v8i16 __builtin_msa_srlr_h (v8i16, v8i16);
+v4i32 __builtin_msa_srlr_w (v4i32, v4i32);
+v2i64 __builtin_msa_srlr_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_srlri_b (v16i8, imm0_7);
+v8i16 __builtin_msa_srlri_h (v8i16, imm0_15);
+v4i32 __builtin_msa_srlri_w (v4i32, imm0_31);
+v2i64 __builtin_msa_srlri_d (v2i64, imm0_63);
+
+void __builtin_msa_st_b (v16i8, void *, imm_n512_511);
+void __builtin_msa_st_h (v8i16, void *, imm_n1024_1022);
+void __builtin_msa_st_w (v4i32, void *, imm_n2048_2044);
+void __builtin_msa_st_d (v2i64, void *, imm_n4096_4088);
+
+v16i8 __builtin_msa_subs_s_b (v16i8, v16i8);
+v8i16 __builtin_msa_subs_s_h (v8i16, v8i16);
+v4i32 __builtin_msa_subs_s_w (v4i32, v4i32);
+v2i64 __builtin_msa_subs_s_d (v2i64, v2i64);
+
+v16u8 __builtin_msa_subs_u_b (v16u8, v16u8);
+v8u16 __builtin_msa_subs_u_h (v8u16, v8u16);
+v4u32 __builtin_msa_subs_u_w (v4u32, v4u32);
+v2u64 __builtin_msa_subs_u_d (v2u64, v2u64);
+
+v16u8 __builtin_msa_subsus_u_b (v16u8, v16i8);
+v8u16 __builtin_msa_subsus_u_h (v8u16, v8i16);
+v4u32 __builtin_msa_subsus_u_w (v4u32, v4i32);
+v2u64 __builtin_msa_subsus_u_d (v2u64, v2i64);
+
+v16i8 __builtin_msa_subsuu_s_b (v16u8, v16u8);
+v8i16 __builtin_msa_subsuu_s_h (v8u16, v8u16);
+v4i32 __builtin_msa_subsuu_s_w (v4u32, v4u32);
+v2i64 __builtin_msa_subsuu_s_d (v2u64, v2u64);
+
+v16i8 __builtin_msa_subv_b (v16i8, v16i8);
+v8i16 __builtin_msa_subv_h (v8i16, v8i16);
+v4i32 __builtin_msa_subv_w (v4i32, v4i32);
+v2i64 __builtin_msa_subv_d (v2i64, v2i64);
+
+v16i8 __builtin_msa_subvi_b (v16i8, imm0_31);
+v8i16 __builtin_msa_subvi_h (v8i16, imm0_31);
+v4i32 __builtin_msa_subvi_w (v4i32, imm0_31);
+v2i64 __builtin_msa_subvi_d (v2i64, imm0_31);
+
+v16i8 __builtin_msa_vshf_b (v16i8, v16i8, v16i8);
+v8i16 __builtin_msa_vshf_h (v8i16, v8i16, v8i16);
+v4i32 __builtin_msa_vshf_w (v4i32, v4i32, v4i32);
+v2i64 __builtin_msa_vshf_d (v2i64, v2i64, v2i64);
+
+v16u8 __builtin_msa_xor_v (v16u8, v16u8);
+
+v16u8 __builtin_msa_xori_b (v16u8, imm0_255);
+
+v4f32 __builtin_msa_cast_to_vector_float (f32);
+v2f64 __builtin_msa_cast_to_vector_double (f64);
+f32 __builtin_msa_cast_to_scalar_float (v4f32);
+f64 __builtin_msa_cast_to_scalar_double (v2f64);
+
+i32 __builtin_msa_lsa (i32, i32, imm1_4);
+i64 __builtin_msa_dlsa (i64, i64, imm1_4);
+@end smallexample
+
@menu
* Paired-Single Arithmetic::
* Paired-Single Built-in Functions::
diff --git a/gcc-4.9/gcc/doc/invoke.texi b/gcc-4.9/gcc/doc/invoke.texi
index 0324a8571..a74c6c54e 100644
--- a/gcc-4.9/gcc/doc/invoke.texi
+++ b/gcc-4.9/gcc/doc/invoke.texi
@@ -766,8 +766,8 @@ Objective-C and Objective-C++ Dialects}.
@emph{MIPS Options}
@gccoptlist{-EL -EB -march=@var{arch} -mtune=@var{arch} @gol
--mips1 -mips2 -mips3 -mips4 -mips32 -mips32r2 @gol
--mips64 -mips64r2 @gol
+-mips1 -mips2 -mips3 -mips4 -mips32 -mips32r2 -mips32r3 -mips32r5 @gol
+-mips32r6 -mips64 -mips64r2 -mips64r3 -mips64r5 -mips64r6 @gol
-mips16 -mno-mips16 -mflip-mips16 @gol
-minterlink-compressed -mno-interlink-compressed @gol
-minterlink-mips16 -mno-interlink-mips16 @gol
@@ -775,12 +775,15 @@ Objective-C and Objective-C++ Dialects}.
-mshared -mno-shared -mplt -mno-plt -mxgot -mno-xgot @gol
-mgp32 -mgp64 -mfp32 -mfp64 -mhard-float -msoft-float @gol
-mno-float -msingle-float -mdouble-float @gol
+-modd-spreg -mno-odd-spreg @gol
-mabs=@var{mode} -mnan=@var{encoding} @gol
-mdsp -mno-dsp -mdspr2 -mno-dspr2 @gol
-mmcu -mmno-mcu @gol
-meva -mno-eva @gol
-mvirt -mno-virt @gol
+-mxpa -mno-xpa @gol
-mmicromips -mno-micromips @gol
+-mmsa -mno-msa @gol
-mfpu=@var{fpu-type} @gol
-msmartmips -mno-smartmips @gol
-mpaired-single -mno-paired-single -mdmx -mno-mdmx @gol
@@ -17035,7 +17038,9 @@ Generate code that runs on @var{arch}, which can be the name of a
generic MIPS ISA, or the name of a particular processor.
The ISA names are:
@samp{mips1}, @samp{mips2}, @samp{mips3}, @samp{mips4},
-@samp{mips32}, @samp{mips32r2}, @samp{mips64} and @samp{mips64r2}.
+@samp{mips32}, @samp{mips32r2}, @samp{mips32r3}, @samp{mips32r5},
+@samp{mips32r6}, @samp{mips64}, @samp{mips64r2}, @samp{mips64r3},
+@samp{mips64r5} and @samp{mips64r6}.
The processor names are:
@samp{4kc}, @samp{4km}, @samp{4kp}, @samp{4ksc},
@samp{4kec}, @samp{4kem}, @samp{4kep}, @samp{4ksd},
@@ -17051,6 +17056,7 @@ The processor names are:
@samp{m14k}, @samp{m14kc}, @samp{m14ke}, @samp{m14kec},
@samp{octeon}, @samp{octeon+}, @samp{octeon2},
@samp{orion},
+@samp{p5600},
@samp{r2000}, @samp{r3000}, @samp{r3900}, @samp{r4000}, @samp{r4400},
@samp{r4600}, @samp{r4650}, @samp{r4700}, @samp{r6000}, @samp{r8000},
@samp{rm7000}, @samp{rm9000},
@@ -17133,9 +17139,17 @@ Equivalent to @option{-march=mips4}.
@opindex mips32
Equivalent to @option{-march=mips32}.
-@item -mips32r2
-@opindex mips32r2
-Equivalent to @option{-march=mips32r2}.
+@item -mips32r3
+@opindex mips32r3
+Equivalent to @option{-march=mips32r3}.
+
+@item -mips32r5
+@opindex mips32r5
+Equivalent to @option{-march=mips32r5}.
+
+@item -mips32r6
+@opindex mips32r6
+Equivalent to @option{-march=mips32r6}.
@item -mips64
@opindex mips64
@@ -17145,6 +17159,18 @@ Equivalent to @option{-march=mips64}.
@opindex mips64r2
Equivalent to @option{-march=mips64r2}.
+@item -mips64r3
+@opindex mips64r3
+Equivalent to @option{-march=mips64r3}.
+
+@item -mips64r5
+@opindex mips64r5
+Equivalent to @option{-march=mips64r5}.
+
+@item -mips64r6
+@opindex mips64r6
+Equivalent to @option{-march=mips64r6}.
+
@item -mips16
@itemx -mno-mips16
@opindex mips16
@@ -17205,7 +17231,7 @@ GCC supports a variant of the o32 ABI in which floating-point registers
are 64 rather than 32 bits wide. You can select this combination with
@option{-mabi=32} @option{-mfp64}. This ABI relies on the @code{mthc1}
and @code{mfhc1} instructions and is therefore only supported for
-MIPS32R2 processors.
+MIPS32R2, MIPS32R3 and MIPS32R5 processors.
The register assignments for arguments and return values remain the
same, but each scalar value is passed in a single 64-bit register
@@ -17332,6 +17358,15 @@ operations.
Assume that the floating-point coprocessor supports double-precision
operations. This is the default.
+@item -modd-spreg
+@itemx -mno-odd-spreg
+@opindex modd-spreg
+@opindex mno-odd-spreg
+Enable the use of odd-numbered single-precision floating-point registers
+for the O32 ABI. This is the default for specific processors that are
+known to support these registers. The O32 FPXX extension sets
+@code{-mno-odd-spreg} by default when targetting generic architectures.
+
@item -mabs=2008
@itemx -mabs=legacy
@opindex mabs=2008
@@ -17471,6 +17506,12 @@ Use (do not use) the MIPS Enhanced Virtual Addressing instructions.
@opindex mno-virt
Use (do not use) the MIPS Virtualization Application Specific instructions.
+@item -mxpa
+@itemx -mno-xpa
+@opindex mxpa
+@opindex mno-xpa
+Use (do not use) the MIPS eXtended Physical Address (XPA) instructions.
+
@item -mlong64
@opindex mlong64
Force @code{long} types to be 64 bits wide. See @option{-mlong32} for
diff --git a/gcc-4.9/gcc/doc/md.texi b/gcc-4.9/gcc/doc/md.texi
index 13e34b5e2..e2fe11b32 100644
--- a/gcc-4.9/gcc/doc/md.texi
+++ b/gcc-4.9/gcc/doc/md.texi
@@ -3013,10 +3013,8 @@ operands can be used for microMIPS instructions such as @code{ll} and
equivalent to @code{R}.
@item ZD
-When compiling microMIPS code, this constraint matches an address operand
-that is formed from a base register and a 12-bit offset. These operands
-can be used for microMIPS instructions such as @code{prefetch}. When
-not compiling for microMIPS code, @code{ZD} is equivalent to @code{p}.
+An address suitable for a @code{prefetch} instruction, or for any other
+instruction with the same addressing mode as @code{prefetch}.
@end table
@item Motorola 680x0---@file{config/m68k/constraints.md}
diff --git a/gcc-4.9/gcc/dwarf2cfi.c b/gcc-4.9/gcc/dwarf2cfi.c
index abcdeb344..3259efd75 100644
--- a/gcc-4.9/gcc/dwarf2cfi.c
+++ b/gcc-4.9/gcc/dwarf2cfi.c
@@ -252,6 +252,10 @@ init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
gen_int_mode (size, mode));
}
+#ifndef DWARF_REG_MODE
+#define DWARF_REG_MODE(REGNO, MODE) (MODE)
+#endif
+
/* Generate code to initialize the register size table. */
void
@@ -276,6 +280,7 @@ expand_builtin_init_dwarf_reg_sizes (tree address)
if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
save_mode = choose_hard_reg_mode (i, 1, true);
+ save_mode = DWARF_REG_MODE (i, save_mode);
if (dnum == DWARF_FRAME_RETURN_COLUMN)
{
if (save_mode == VOIDmode)
diff --git a/gcc-4.9/gcc/lra-constraints.c b/gcc-4.9/gcc/lra-constraints.c
index aac50876d..b7bfc57a5 100644
--- a/gcc-4.9/gcc/lra-constraints.c
+++ b/gcc-4.9/gcc/lra-constraints.c
@@ -317,6 +317,118 @@ in_mem_p (int regno)
return get_reg_class (regno) == NO_REGS;
}
+/* Return 1 if ADDR is a valid memory address for mode MODE in address
+ space AS, and check that each pseudo has the proper kind of hard
+ reg. */
+static int
+valid_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx addr, addr_space_t as)
+{
+#ifdef GO_IF_LEGITIMATE_ADDRESS
+ lra_assert (ADDR_SPACE_GENERIC_P (as));
+ GO_IF_LEGITIMATE_ADDRESS (mode, addr, win);
+ return 0;
+
+ win:
+ return 1;
+#else
+ return targetm.addr_space.legitimate_address_p (mode, addr, 0, as);
+#endif
+}
+
+namespace {
+ /* Temporarily eliminates registers in an address (for the lifetime of
+ the object). */
+ class address_eliminator {
+ public:
+ address_eliminator (struct address_info *ad);
+ ~address_eliminator ();
+
+ private:
+ struct address_info *m_ad;
+ rtx *m_base_loc;
+ rtx m_base_reg;
+ rtx *m_index_loc;
+ rtx m_index_reg;
+ };
+}
+
+address_eliminator::address_eliminator (struct address_info *ad)
+ : m_ad (ad),
+ m_base_loc (strip_subreg (ad->base_term)),
+ m_base_reg (NULL_RTX),
+ m_index_loc (strip_subreg (ad->index_term)),
+ m_index_reg (NULL_RTX)
+{
+ if (m_base_loc != NULL)
+ {
+ m_base_reg = *m_base_loc;
+ lra_eliminate_reg_if_possible (m_base_loc);
+ if (m_ad->base_term2 != NULL)
+ *m_ad->base_term2 = *m_ad->base_term;
+ }
+ if (m_index_loc != NULL)
+ {
+ m_index_reg = *m_index_loc;
+ lra_eliminate_reg_if_possible (m_index_loc);
+ }
+}
+
+address_eliminator::~address_eliminator ()
+{
+ if (m_base_loc && *m_base_loc != m_base_reg)
+ {
+ *m_base_loc = m_base_reg;
+ if (m_ad->base_term2 != NULL)
+ *m_ad->base_term2 = *m_ad->base_term;
+ }
+ if (m_index_loc && *m_index_loc != m_index_reg)
+ *m_index_loc = m_index_reg;
+}
+
+/* Return true if the eliminated form of AD is a legitimate target address. */
+static bool
+valid_address_p (struct address_info *ad)
+{
+ address_eliminator eliminator (ad);
+ return valid_address_p (ad->mode, *ad->outer, ad->as);
+}
+
+#ifdef EXTRA_CONSTRAINT_STR
+/* Return true if the eliminated form of memory reference OP satisfies
+ extra memory constraint CONSTRAINT. */
+static bool
+satisfies_memory_constraint_p (rtx op, const char *constraint)
+{
+ struct address_info ad;
+
+ decompose_mem_address (&ad, op);
+ address_eliminator eliminator (&ad);
+ return EXTRA_CONSTRAINT_STR (op, *constraint, constraint);
+}
+
+/* Return true if the eliminated form of address AD satisfies extra
+ address constraint CONSTRAINT. */
+static bool
+satisfies_address_constraint_p (struct address_info *ad,
+ const char *constraint)
+{
+ address_eliminator eliminator (ad);
+ return EXTRA_CONSTRAINT_STR (*ad->outer, *constraint, constraint);
+}
+
+/* Return true if the eliminated form of address OP satisfies extra
+ address constraint CONSTRAINT. */
+static bool
+satisfies_address_constraint_p (rtx op, const char *constraint)
+{
+ struct address_info ad;
+
+ decompose_lea_address (&ad, &op);
+ return satisfies_address_constraint_p (&ad, constraint);
+}
+#endif
+
/* Initiate equivalences for LRA. As we keep original equivalences
before any elimination, we need to make copies otherwise any change
in insns might change the equivalences. */
@@ -1941,7 +2053,8 @@ process_alt_operands (int only_alternative)
#ifdef EXTRA_CONSTRAINT_STR
if (EXTRA_MEMORY_CONSTRAINT (c, p))
{
- if (EXTRA_CONSTRAINT_STR (op, c, p))
+ if (MEM_P (op)
+ && satisfies_memory_constraint_p (op, p))
win = true;
else if (spilled_pseudo_p (op))
win = true;
@@ -1960,7 +2073,7 @@ process_alt_operands (int only_alternative)
}
if (EXTRA_ADDRESS_CONSTRAINT (c, p))
{
- if (EXTRA_CONSTRAINT_STR (op, c, p))
+ if (satisfies_address_constraint_p (op, p))
win = true;
/* If we didn't already win, we can reload
@@ -2576,58 +2689,37 @@ process_alt_operands (int only_alternative)
return ok_p;
}
-/* Return 1 if ADDR is a valid memory address for mode MODE in address
- space AS, and check that each pseudo has the proper kind of hard
- reg. */
-static int
-valid_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
- rtx addr, addr_space_t as)
+/* Make reload base reg from address AD. */
+static rtx
+base_to_reg (struct address_info *ad)
{
-#ifdef GO_IF_LEGITIMATE_ADDRESS
- lra_assert (ADDR_SPACE_GENERIC_P (as));
- GO_IF_LEGITIMATE_ADDRESS (mode, addr, win);
- return 0;
-
- win:
- return 1;
-#else
- return targetm.addr_space.legitimate_address_p (mode, addr, 0, as);
-#endif
-}
-
-/* Return whether address AD is valid. */
+ enum reg_class cl;
+ int code = -1;
+ rtx new_inner = NULL_RTX;
+ rtx new_reg = NULL_RTX;
+ rtx insn;
+ rtx last_insn = get_last_insn();
-static bool
-valid_address_p (struct address_info *ad)
-{
- /* Some ports do not check displacements for eliminable registers,
- so we replace them temporarily with the elimination target. */
- rtx saved_base_reg = NULL_RTX;
- rtx saved_index_reg = NULL_RTX;
- rtx *base_term = strip_subreg (ad->base_term);
- rtx *index_term = strip_subreg (ad->index_term);
- if (base_term != NULL)
- {
- saved_base_reg = *base_term;
- lra_eliminate_reg_if_possible (base_term);
- if (ad->base_term2 != NULL)
- *ad->base_term2 = *ad->base_term;
- }
- if (index_term != NULL)
- {
- saved_index_reg = *index_term;
- lra_eliminate_reg_if_possible (index_term);
- }
- bool ok_p = valid_address_p (ad->mode, *ad->outer, ad->as);
- if (saved_base_reg != NULL_RTX)
+ lra_assert (ad->base == ad->base_term && ad->disp == ad->disp_term);
+ cl = base_reg_class (ad->mode, ad->as, ad->base_outer_code,
+ get_index_code (ad));
+ new_reg = lra_create_new_reg (GET_MODE (*ad->base_term), NULL_RTX,
+ cl, "base");
+ new_inner = simplify_gen_binary (PLUS, GET_MODE (new_reg), new_reg,
+ ad->disp_term == NULL
+ ? gen_int_mode (0, ad->mode)
+ : *ad->disp_term);
+ if (!valid_address_p (ad->mode, new_inner, ad->as))
+ return NULL_RTX;
+ insn = emit_insn (gen_rtx_SET (ad->mode, new_reg, *ad->base_term));
+ code = recog_memoized (insn);
+ if (code < 0)
{
- *base_term = saved_base_reg;
- if (ad->base_term2 != NULL)
- *ad->base_term2 = *ad->base_term;
+ delete_insns_since (last_insn);
+ return NULL_RTX;
}
- if (saved_index_reg != NULL_RTX)
- *index_term = saved_index_reg;
- return ok_p;
+
+ return new_inner;
}
/* Make reload base reg + disp from address AD. Return the new pseudo. */
@@ -2832,7 +2924,7 @@ process_address (int nop, rtx *before, rtx *after)
EXTRA_CONSTRAINT_STR for the validation. */
if (constraint[0] != 'p'
&& EXTRA_ADDRESS_CONSTRAINT (constraint[0], constraint)
- && EXTRA_CONSTRAINT_STR (op, constraint[0], constraint))
+ && satisfies_address_constraint_p (&ad, constraint))
return change_p;
#endif
@@ -2847,6 +2939,8 @@ process_address (int nop, rtx *before, rtx *after)
3) the address is a frame address with an invalid offset.
+ 4) the address is a frame address with an invalid base.
+
All these cases involve a non-autoinc address, so there is no
point revalidating other types. */
if (ad.autoinc_p || valid_address_p (&ad))
@@ -2928,14 +3022,19 @@ process_address (int nop, rtx *before, rtx *after)
int regno;
enum reg_class cl;
rtx set, insns, last_insn;
+ /* Try to reload base into register only if the base is invalid
+ for the address but with valid offset, case (4) above. */
+ start_sequence ();
+ new_reg = base_to_reg (&ad);
+
/* base + disp => new base, cases (1) and (3) above. */
/* Another option would be to reload the displacement into an
index register. However, postreload has code to optimize
address reloads that have the same base and different
displacements, so reloading into an index register would
not necessarily be a win. */
- start_sequence ();
- new_reg = base_plus_disp_to_reg (&ad);
+ if (new_reg == NULL_RTX)
+ new_reg = base_plus_disp_to_reg (&ad);
insns = get_insns ();
last_insn = get_last_insn ();
/* If we generated at least two insns, try last insn source as
@@ -3539,7 +3638,7 @@ curr_insn_transform (void)
break;
#ifdef EXTRA_CONSTRAINT_STR
if (EXTRA_MEMORY_CONSTRAINT (c, constraint)
- && EXTRA_CONSTRAINT_STR (tem, c, constraint))
+ && satisfies_memory_constraint_p (tem, constraint))
break;
#endif
}
diff --git a/gcc-4.9/gcc/prefix.c b/gcc-4.9/gcc/prefix.c
index 71ddf5d23..3a7267208 100644
--- a/gcc-4.9/gcc/prefix.c
+++ b/gcc-4.9/gcc/prefix.c
@@ -67,6 +67,7 @@ License along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#if defined(_WIN32) && defined(ENABLE_WIN32_REGISTRY)
+#define CINTERFACE
#include <windows.h>
#endif
#include "prefix.h"
diff --git a/gcc-4.9/gcc/regcprop.c b/gcc-4.9/gcc/regcprop.c
index 101de76ef..5b63cb74d 100644
--- a/gcc-4.9/gcc/regcprop.c
+++ b/gcc-4.9/gcc/regcprop.c
@@ -1011,7 +1011,6 @@ copyprop_hardreg_forward_1 (basic_block bb, struct value_data *vd)
unsigned int set_nregs = 0;
unsigned int regno;
rtx exp;
- hard_reg_set_iterator hrsi;
for (exp = CALL_INSN_FUNCTION_USAGE (insn); exp; exp = XEXP (exp, 1))
{
@@ -1030,8 +1029,10 @@ copyprop_hardreg_forward_1 (basic_block bb, struct value_data *vd)
}
}
- EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, regno, hrsi)
- if (regno < set_regno || regno >= set_regno + set_nregs)
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)
+ || HARD_REGNO_CALL_PART_CLOBBERED (regno, vd->e[regno].mode))
+ && (regno < set_regno || regno >= set_regno + set_nregs))
kill_value_regno (regno, 1, vd);
/* If SET was seen in CALL_INSN_FUNCTION_USAGE, and SET_SRC
diff --git a/gcc-4.9/gcc/testsuite/gcc.dg/vect/tree-vect.h b/gcc-4.9/gcc/testsuite/gcc.dg/vect/tree-vect.h
index ed59d7976..600ef6c67 100644
--- a/gcc-4.9/gcc/testsuite/gcc.dg/vect/tree-vect.h
+++ b/gcc-4.9/gcc/testsuite/gcc.dg/vect/tree-vect.h
@@ -66,6 +66,8 @@ check_vect (void)
if (a != 1)
exit (0);
}
+#elif defined(__mips)
+ asm volatile ("or.v $w0,$w0,$w0");
#endif
signal (SIGILL, SIG_DFL);
}
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/args-1.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/args-1.c
index 3a132deaf..643df2426 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/args-1.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/args-1.c
@@ -5,7 +5,7 @@
const char *compiled_for = _MIPS_ARCH;
const char *optimized_for = _MIPS_TUNE;
-#if __mips_fpr != 32 && __mips_fpr != 64
+#if __mips_fpr != 32 && __mips_fpr != 64 && __mips_fpr != 0
#error Bad __mips_fpr
#endif
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/args-3.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/args-3.c
index 6a79ce674..5eddabf83 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/args-3.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/args-3.c
@@ -24,7 +24,7 @@ int foo (float inf, int64 in64, int32 in32)
abort ();
#endif
-#if (__mips == 4 || __mips == 32 || __mips == 64) && !defined (__mips16)
+#if (__mips == 4 || ((__mips == 32 || __mips == 64) && __mips_isa_rev < 6)) && !defined (__mips16)
__asm__ ("move %0,%.\n\tmovn %0,%1,%2"
: "=&r" (res32) : "r" (in32), "r" (in64 != 0));
if (res32 != 60)
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/branch-cost-2.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/branch-cost-2.c
index 3b2c4a13e..39e181fa6 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/branch-cost-2.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/branch-cost-2.c
@@ -1,4 +1,4 @@
-/* { dg-options "-mbranch-cost=10 isa>=4" } */
+/* { dg-options "-mbranch-cost=10 isa>=4 forbid_cpu=mips.*r6" } */
/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
NOMIPS16 int
foo (int x, int y, int z, int k)
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-1.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-1.c
new file mode 100644
index 000000000..ecb994f21
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-1.c
@@ -0,0 +1,21 @@
+/* Check that we handle call-clobbered FPRs correctly. */
+/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
+/* { dg-options "isa>=2 -mabi=32 -ffixed-f0 -ffixed-f1 -ffixed-f2 -ffixed-f3 -ffixed-f4 -ffixed-f5 -ffixed-f6 -ffixed-f7 -ffixed-f8 -ffixed-f9 -ffixed-f10 -ffixed-f11 -ffixed-f12 -ffixed-f13 -ffixed-f14 -ffixed-f15 -ffixed-f16 -ffixed-f17 -ffixed-f18 -ffixed-f19" } */
+
+void bar (void);
+double a;
+double
+foo ()
+{
+ double b = a + 1.0;
+ bar();
+ return b;
+}
+/* { dg-final { scan-assembler-not "lwc1" } } */
+/* { dg-final { scan-assembler-not "swc1" } } */
+/* { dg-final { scan-assembler-times "sdc1" 2 } } */
+/* { dg-final { scan-assembler-times "ldc1" 4 } } */
+/* { dg-final { scan-assembler-not "mtc" } } */
+/* { dg-final { scan-assembler-not "mfc" } } */
+/* { dg-final { scan-assembler-not "mthc" } } */
+/* { dg-final { scan-assembler-not "mfhc" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-2.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-2.c
new file mode 100644
index 000000000..7d9278e73
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-2.c
@@ -0,0 +1,21 @@
+/* Check that we handle call-clobbered FPRs correctly. */
+/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
+/* { dg-options "-mabi=32 -modd-spreg -mfp32 -ffixed-f0 -ffixed-f1 -ffixed-f2 -ffixed-f3 -ffixed-f4 -ffixed-f5 -ffixed-f6 -ffixed-f7 -ffixed-f8 -ffixed-f9 -ffixed-f10 -ffixed-f11 -ffixed-f12 -ffixed-f13 -ffixed-f14 -ffixed-f15 -ffixed-f16 -ffixed-f17 -ffixed-f18 -ffixed-f19 -ffixed-f20 -ffixed-f22 -ffixed-f24 -ffixed-f26 -ffixed-f28 -ffixed-f30" } */
+
+void bar (void);
+float a;
+float
+foo ()
+{
+ float b = a + 1.0f;
+ bar();
+ return b;
+}
+/* { dg-final { scan-assembler-times "lwc1" 2 } } */
+/* { dg-final { scan-assembler-not "swc1" } } */
+/* { dg-final { scan-assembler-times "sdc1" 2 } } */
+/* { dg-final { scan-assembler-times "ldc1" 2 } } */
+/* { dg-final { scan-assembler-not "mtc" } } */
+/* { dg-final { scan-assembler-not "mfc" } } */
+/* { dg-final { scan-assembler-not "mthc" } } */
+/* { dg-final { scan-assembler-not "mfhc" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-3.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-3.c
new file mode 100644
index 000000000..1cb763af5
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-3.c
@@ -0,0 +1,23 @@
+/* Check that we handle call-clobbered FPRs correctly. */
+/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
+/* Refer to call-clobbered-4.c to see the expected output from -Os builds. */
+/* { dg-skip-if "uses callee-saved GPR" { *-*-* } { "-Os" } { "" } } */
+/* { dg-options "-mabi=32 -modd-spreg -mfpxx -ffixed-f0 -ffixed-f1 -ffixed-f2 -ffixed-f3 -ffixed-f4 -ffixed-f5 -ffixed-f6 -ffixed-f7 -ffixed-f8 -ffixed-f9 -ffixed-f10 -ffixed-f11 -ffixed-f12 -ffixed-f13 -ffixed-f14 -ffixed-f15 -ffixed-f16 -ffixed-f17 -ffixed-f18 -ffixed-f19 -ffixed-f20 -ffixed-f22 -ffixed-f24 -ffixed-f26 -ffixed-f28 -ffixed-f30" } */
+
+void bar (void);
+float a;
+float
+foo ()
+{
+ float b = a + 1.0f;
+ bar();
+ return b;
+}
+/* { dg-final { scan-assembler-times "lwc1" 3 } } */
+/* { dg-final { scan-assembler-times "swc1" 1 } } */
+/* { dg-final { scan-assembler-times "sdc1" 2 } } */
+/* { dg-final { scan-assembler-times "ldc1" 2 } } */
+/* { dg-final { scan-assembler-not "mtc" } } */
+/* { dg-final { scan-assembler-not "mfc" } } */
+/* { dg-final { scan-assembler-not "mthc" } } */
+/* { dg-final { scan-assembler-not "mfhc" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-4.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-4.c
new file mode 100644
index 000000000..b498a054f
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-4.c
@@ -0,0 +1,22 @@
+/* Check that we handle call-clobbered FPRs correctly.
+ This test differs from call-clobbered-3.c because when optimising for size
+ a callee-saved GPR is used for 'b' to cross the call. */
+/* { dg-skip-if "code quality test" { *-*-* } { "*" } { "-Os" } } */
+/* { dg-options "-mabi=32 -modd-spreg -mfpxx -ffixed-f0 -ffixed-f1 -ffixed-f2 -ffixed-f3 -ffixed-f4 -ffixed-f5 -ffixed-f6 -ffixed-f7 -ffixed-f8 -ffixed-f9 -ffixed-f10 -ffixed-f11 -ffixed-f12 -ffixed-f13 -ffixed-f14 -ffixed-f15 -ffixed-f16 -ffixed-f17 -ffixed-f18 -ffixed-f19 -ffixed-f20 -ffixed-f22 -ffixed-f24 -ffixed-f26 -ffixed-f28 -ffixed-f30" } */
+
+void bar (void);
+float a;
+float
+foo ()
+{
+ float b = a + 1.0f;
+ bar();
+ return b;
+}
+/* { dg-final { scan-assembler-times "lwc1" 2 } } */
+/* { dg-final { scan-assembler-times "sdc1" 2 } } */
+/* { dg-final { scan-assembler-times "ldc1" 2 } } */
+/* { dg-final { scan-assembler-times "mtc" 1 } } */
+/* { dg-final { scan-assembler-times "mfc" 1 } } */
+/* { dg-final { scan-assembler-not "mthc" } } */
+/* { dg-final { scan-assembler-not "mfhc" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-5.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-5.c
new file mode 100644
index 000000000..c7cd7cac7
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-clobbered-5.c
@@ -0,0 +1,21 @@
+/* Check that we handle call-clobbered FPRs correctly. */
+/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
+/* { dg-options "-mabi=32 -mfp64 -ffixed-f0 -ffixed-f1 -ffixed-f2 -ffixed-f3 -ffixed-f4 -ffixed-f5 -ffixed-f6 -ffixed-f7 -ffixed-f8 -ffixed-f9 -ffixed-f10 -ffixed-f11 -ffixed-f12 -ffixed-f13 -ffixed-f14 -ffixed-f15 -ffixed-f16 -ffixed-f17 -ffixed-f18 -ffixed-f19 -ffixed-f20 -ffixed-f22 -ffixed-f24 -ffixed-f26 -ffixed-f28 -ffixed-f30" } */
+
+void bar (void);
+float a;
+float
+foo ()
+{
+ float b = a + 1.0f;
+ bar();
+ return b;
+}
+/* { dg-final { scan-assembler-times "lwc1" 3 } } */
+/* { dg-final { scan-assembler-times "swc1" 1 } } */
+/* { dg-final { scan-assembler-not "sdc1" } } */
+/* { dg-final { scan-assembler-not "ldc1" } } */
+/* { dg-final { scan-assembler-not "mtc" } } */
+/* { dg-final { scan-assembler-not "mfc" } } */
+/* { dg-final { scan-assembler-not "mthc" } } */
+/* { dg-final { scan-assembler-not "mfhc" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-4.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-4.c
new file mode 100644
index 000000000..e12617558
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-4.c
@@ -0,0 +1,32 @@
+/* Check that we save the correct call-saved GPRs and FPRs. */
+/* { dg-options "isa>=2 -mabi=32 -mfp32" } */
+
+void bar (void);
+
+void
+foo (int x)
+{
+ __builtin_unwind_init ();
+ __builtin_eh_return (x, bar);
+}
+/* { dg-final { scan-assembler "\\\$16" } } */
+/* { dg-final { scan-assembler "\\\$17" } } */
+/* { dg-final { scan-assembler "\\\$18" } } */
+/* { dg-final { scan-assembler "\\\$19" } } */
+/* { dg-final { scan-assembler "\\\$20" } } */
+/* { dg-final { scan-assembler "\\\$21" } } */
+/* { dg-final { scan-assembler "\\\$22" } } */
+/* { dg-final { scan-assembler "\\\$23" } } */
+/* { dg-final { scan-assembler "\\\$(30|fp)" } } */
+/* { dg-final { scan-assembler "\\\$f20" } } */
+/* { dg-final { scan-assembler "\\\$f22" } } */
+/* { dg-final { scan-assembler "\\\$f24" } } */
+/* { dg-final { scan-assembler "\\\$f26" } } */
+/* { dg-final { scan-assembler "\\\$f28" } } */
+/* { dg-final { scan-assembler "\\\$f30" } } */
+/* { dg-final { scan-assembler-not "\\\$f21" } } */
+/* { dg-final { scan-assembler-not "\\\$f23" } } */
+/* { dg-final { scan-assembler-not "\\\$f25" } } */
+/* { dg-final { scan-assembler-not "\\\$f27" } } */
+/* { dg-final { scan-assembler-not "\\\$f29" } } */
+/* { dg-final { scan-assembler-not "\\\$f31" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-5.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-5.c
new file mode 100644
index 000000000..2937b316d
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-5.c
@@ -0,0 +1,32 @@
+/* Check that we save the correct call-saved GPRs and FPRs. */
+/* { dg-options "-mabi=32 -mfpxx" } */
+
+void bar (void);
+
+void
+foo (int x)
+{
+ __builtin_unwind_init ();
+ __builtin_eh_return (x, bar);
+}
+/* { dg-final { scan-assembler "\\\$16" } } */
+/* { dg-final { scan-assembler "\\\$17" } } */
+/* { dg-final { scan-assembler "\\\$18" } } */
+/* { dg-final { scan-assembler "\\\$19" } } */
+/* { dg-final { scan-assembler "\\\$20" } } */
+/* { dg-final { scan-assembler "\\\$21" } } */
+/* { dg-final { scan-assembler "\\\$22" } } */
+/* { dg-final { scan-assembler "\\\$23" } } */
+/* { dg-final { scan-assembler "\\\$(30|fp)" } } */
+/* { dg-final { scan-assembler "\\\$f20" } } */
+/* { dg-final { scan-assembler "\\\$f22" } } */
+/* { dg-final { scan-assembler "\\\$f24" } } */
+/* { dg-final { scan-assembler "\\\$f26" } } */
+/* { dg-final { scan-assembler "\\\$f28" } } */
+/* { dg-final { scan-assembler "\\\$f30" } } */
+/* { dg-final { scan-assembler-not "\\\$f21" } } */
+/* { dg-final { scan-assembler-not "\\\$f23" } } */
+/* { dg-final { scan-assembler-not "\\\$f25" } } */
+/* { dg-final { scan-assembler-not "\\\$f27" } } */
+/* { dg-final { scan-assembler-not "\\\$f29" } } */
+/* { dg-final { scan-assembler-not "\\\$f31" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-6.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-6.c
new file mode 100644
index 000000000..0d1a4c845
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/call-saved-6.c
@@ -0,0 +1,32 @@
+/* Check that we save the correct call-saved GPRs and FPRs. */
+/* { dg-options "-mabi=32 -mfp64" } */
+
+void bar (void);
+
+void
+foo (int x)
+{
+ __builtin_unwind_init ();
+ __builtin_eh_return (x, bar);
+}
+/* { dg-final { scan-assembler "\\\$16" } } */
+/* { dg-final { scan-assembler "\\\$17" } } */
+/* { dg-final { scan-assembler "\\\$18" } } */
+/* { dg-final { scan-assembler "\\\$19" } } */
+/* { dg-final { scan-assembler "\\\$20" } } */
+/* { dg-final { scan-assembler "\\\$21" } } */
+/* { dg-final { scan-assembler "\\\$22" } } */
+/* { dg-final { scan-assembler "\\\$23" } } */
+/* { dg-final { scan-assembler "\\\$(30|fp)" } } */
+/* { dg-final { scan-assembler "\\\$f20" } } */
+/* { dg-final { scan-assembler "\\\$f22" } } */
+/* { dg-final { scan-assembler "\\\$f24" } } */
+/* { dg-final { scan-assembler "\\\$f26" } } */
+/* { dg-final { scan-assembler "\\\$f28" } } */
+/* { dg-final { scan-assembler "\\\$f30" } } */
+/* { dg-final { scan-assembler-not "\\\$f21" } } */
+/* { dg-final { scan-assembler-not "\\\$f23" } } */
+/* { dg-final { scan-assembler-not "\\\$f25" } } */
+/* { dg-final { scan-assembler-not "\\\$f27" } } */
+/* { dg-final { scan-assembler-not "\\\$f29" } } */
+/* { dg-final { scan-assembler-not "\\\$f31" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/dmult-1.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/dmult-1.c
index f8c0b8b44..92573168d 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/dmult-1.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/dmult-1.c
@@ -1,4 +1,4 @@
-/* { dg-options "forbid_cpu=octeon.* -mgp64" } */
+/* { dg-options "isa_rev<=5 forbid_cpu=octeon.* -mgp64" } */
/* { dg-final { scan-assembler "\tdmult\t" } } */
/* { dg-final { scan-assembler "\tmflo\t" } } */
/* { dg-final { scan-assembler-not "\tdmul\t" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/fpcmp-1.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/fpcmp-1.c
index c0594ff35..03c2f7926 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/fpcmp-1.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/fpcmp-1.c
@@ -1,5 +1,5 @@
/* We used to use c.lt.fmt instead of c.ule.fmt here. */
-/* { dg-options "-mhard-float" } */
+/* { dg-options "isa_rev<=5 -mhard-float" } */
NOMIPS16 int f1 (float x, float y) { return __builtin_isless (x, y); }
NOMIPS16 int f2 (double x, double y) { return __builtin_isless (x, y); }
/* { dg-final { scan-assembler "\tc\\.ule\\.s\t" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/fpcmp-2.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/fpcmp-2.c
index 23d5cb0c4..6936b9009 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/fpcmp-2.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/fpcmp-2.c
@@ -1,5 +1,5 @@
/* We used to use c.le.fmt instead of c.ult.fmt here. */
-/* { dg-options "-mhard-float" } */
+/* { dg-options "isa_rev<=5 -mhard-float" } */
NOMIPS16 int f1 (float x, float y) { return __builtin_islessequal (x, y); }
NOMIPS16 int f2 (double x, double y) { return __builtin_islessequal (x, y); }
/* { dg-final { scan-assembler "\tc\\.ult\\.s\t" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/madd-3.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/madd-3.c
index 29f4c9b37..55e05e78d 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/madd-3.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/madd-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "isa_rev>=1 -mgp32" } */
+/* { dg-options "isa_rev>=1 -mgp32 forbid_cpu=mips.*r6" } */
/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
/* { dg-final { scan-assembler-times "\tmadd\t" 3 } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/madd-9.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/madd-9.c
index 28681a910..d89a9fd3e 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/madd-9.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/madd-9.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "isa_rev>=1 -mgp32 -mtune=4kc" } */
+/* { dg-options "isa_rev>=1 -mgp32 -mtune=4kc forbid_cpu=mips.*r6" } */
/* References to X within the loop need to have a higher frequency than
references to X outside the loop, otherwise there is no reason
to prefer multiply/accumulator registers over GPRs. */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/maddu-3.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/maddu-3.c
index 27a7350f0..14278f26a 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/maddu-3.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/maddu-3.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
/* This test requires widening_mul */
-/* { dg-options "isa_rev>=1 -mgp32 -fexpensive-optimizations" } */
+/* { dg-options "isa_rev>=1 -mgp32 -fexpensive-optimizations forbid_cpu=mips.*r6" } */
/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
/* { dg-final { scan-assembler-times "\tmaddu\t" 3 } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/mips-ps-type-2.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/mips-ps-type-2.c
index f52cf91e8..e3b441b01 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/mips-ps-type-2.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/mips-ps-type-2.c
@@ -1,7 +1,7 @@
/* Test v2sf calculations. The nmadd and nmsub patterns need
-ffinite-math-only. */
/* { dg-do compile } */
-/* { dg-options "isa_rev>=2 -mgp32 -mpaired-single -ffinite-math-only" } */
+/* { dg-options "isa_rev>=2 -mgp32 -mpaired-single -ffinite-math-only forbid_cpu=mips.*r6" } */
/* { dg-skip-if "nmadd and nmsub need combine" { *-*-* } { "-O0" } { "" } } */
/* { dg-final { scan-assembler "\tcvt.ps.s\t" } } */
/* { dg-final { scan-assembler "\tmov.ps\t" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/mips.exp b/gcc-4.9/gcc/testsuite/gcc.target/mips/mips.exp
index 8c72cff72..ec0b6f80c 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/mips.exp
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/mips.exp
@@ -235,7 +235,7 @@ set mips_option_groups {
endianness "-E(L|B)|-me(l|b)"
float "-m(hard|soft)-float"
forbid_cpu "forbid_cpu=.*"
- fp "-mfp(32|64)"
+ fp "-mfp(32|xx|64)"
gp "-mgp(32|64)"
long "-mlong(32|64)"
micromips "-mmicromips|-mno-micromips"
@@ -248,6 +248,10 @@ set mips_option_groups {
dump "-fdump-.*"
}
+for { set option 0 } { $option < 32 } { incr option } {
+ lappend mips_option_groups "fixed-f$option" "-ffixed-f$option"
+}
+
# Add -mfoo/-mno-foo options to mips_option_groups.
foreach option {
abicalls
@@ -270,6 +274,8 @@ foreach option {
synci
relax-pic-calls
mcount-ra-address
+ odd-spreg
+ msa
} {
lappend mips_option_groups $option "-m(no-|)$option"
}
@@ -722,8 +728,12 @@ proc mips-dg-init {} {
#if __mips_fpr == 64
"-mfp64",
#else
+ #if __mips_fpr == 0
+ "-mfpxx",
+ #else
"-mfp32",
#endif
+ #endif
#ifdef __mips64
"-mgp64",
@@ -755,6 +765,12 @@ proc mips-dg-init {} {
"-mno-paired-single",
#endif
+ #if _MIPS_SPFPSET == 32
+ "-modd-spreg",
+ #else
+ "-mno-odd-spreg",
+ #endif
+
#if __mips_abicalls
"-mabicalls",
#else
@@ -789,6 +805,12 @@ proc mips-dg-init {} {
"-mno-synci",
#endif
+ #ifdef __mips_msa
+ "-mmsa"
+ #else
+ "-mno-msa"
+ #endif
+
0
};
}]
@@ -840,6 +862,8 @@ proc mips-dg-finish {} {
# | |
# -mfp64 -mfp32
# | |
+# -modd-spreg -mno-odd-spreg
+# | |
# -mabs=2008/-mabs=legacy <no option>
# | |
# -mhard-float -msoft-float
@@ -929,6 +953,7 @@ proc mips-dg-options { args } {
mips_option_dependency options "-mips3d" "-mpaired-single"
mips_option_dependency options "-mpaired-single" "-mfp64"
mips_option_dependency options "-mfp64" "-mhard-float"
+ mips_option_dependency options "-mfp64" "-modd-spreg"
mips_option_dependency options "-mabs=2008" "-mhard-float"
mips_option_dependency options "-mabs=legacy" "-mhard-float"
mips_option_dependency options "-mrelax-pic-calls" "-mno-plt"
@@ -973,20 +998,25 @@ proc mips-dg-options { args } {
set arch "-march=loongson2f"
}
} else {
- if { ![regexp {^(isa(?:|_rev))(=|<=|>=)([0-9]*)$} \
- $spec dummy prop relation value nocpus] } {
+ if { ![regexp {^(isa(?:|_rev))(=|<=|>=)([0-9]*)-?([0-9]*)$} \
+ $spec dummy prop relation minvalue maxvalue nocpus] } {
error "Unrecognized isa specification: $spec"
}
+ if { ![string equal $maxvalue ""] && ![string equal $relation "="] } {
+ error "Unsupported use of isa ranges: $spec"
+ } else if { [string equal $maxvalue ""] } {
+ set maxvalue $minvalue
+ }
set current [mips_arch_info $arch $prop]
if { $force_generic_isa_p
- || ($current < $value && ![string equal $relation "<="])
- || ($current > $value && ![string equal $relation ">="])
+ || ($current < $minvalue && ![string equal $relation "<="])
+ || ($current > $maxvalue && ![string equal $relation ">="])
|| ([mips_have_test_option_p options "-mgp64"]
&& [mips_32bit_arch_p $arch]) } {
# The current setting is out of range; it cannot
# possibly be used. Find a replacement that can.
if { [string equal $prop "isa"] } {
- set arch "-mips$value"
+ set arch "-mips$maxvalue"
} elseif { $value == 0 } {
set arch "-mips4"
} else {
@@ -995,8 +1025,8 @@ proc mips-dg-options { args } {
} else {
set arch "-mips64"
}
- if { $value > 1 } {
- append arch "r$value"
+ if { $maxvalue > 1 } {
+ append arch "r$maxvalue"
}
}
}
@@ -1045,10 +1075,13 @@ proc mips-dg-options { args } {
# We need a MIPS32 or MIPS64 ISA for:
#
# - paired-single instructions(*)
+ # - odd numbered single precision registers
#
# (*) Note that we don't support MIPS V at the moment.
} elseif { $isa_rev < 1
- && [mips_have_test_option_p options "-mpaired-single"] } {
+ && ([mips_have_test_option_p options "-mpaired-single"]
+ || ([mips_have_test_option_p options "-modd-spreg"]
+ && ![mips_have_test_option_p options "-mfp64"]))} {
if { $gp_size == 32 } {
mips_make_test_option options "-mips32"
} else {
@@ -1070,8 +1103,30 @@ proc mips-dg-options { args } {
# (*) needed by both -mbranch-likely and -mfix-r10000
} elseif { $isa < 2
&& ([mips_have_test_option_p options "-mbranch-likely"]
- || [mips_have_test_option_p options "-mfix-r10000"]) } {
+ || [mips_have_test_option_p options "-mfix-r10000"]
+ || ($gp_size == 32
+ && [mips_have_test_option_p options "-mfpxx"])) } {
mips_make_test_option options "-mips2"
+ # Check whether we need to switch from mips*r6 down to mips*r5 due
+ # to options that are incompatible with mips*r6. If we do, use
+ # -mnan=2008 because r6 is nan2008 by default and without this flag
+ # tests that include stdlib.h will fail due to not finding
+ # stubs-o32_hard.h (r6 compilers only have stubs-o32_hard_2008.h)
+ } elseif { $isa_rev > 5
+ && ([mips_have_test_option_p options "-mdsp"]
+ || [mips_have_test_option_p options "-mdspr2"]
+ || [mips_have_test_option_p options "-mips16"]
+ || [mips_have_test_option_p options "-mfp32"]
+ || [mips_have_test_option_p options "-mfix-r10000"]
+ || [mips_have_test_option_p options "-mpaired-single"]
+ || [mips_have_test_option_p options "-mnan=legacy"]
+ || [mips_have_test_option_p options "-mabs=legacy"]) } {
+ if { $gp_size == 32 } {
+ mips_make_test_option options "-mips32r5"
+ } else {
+ mips_make_test_option options "-mips64r5"
+ }
+ mips_make_test_option options "-mnan=2008"
# Check whether we need to switch from a 32-bit processor to the
# "nearest" 64-bit processor.
} elseif { $gp_size == 64 && [mips_32bit_arch_p $arch] } {
@@ -1096,6 +1151,10 @@ proc mips-dg-options { args } {
unset isa_rev
}
+ # Re-calculate the isa_rev for use in the abi handling code below
+ set arch [mips_option options arch]
+ set isa_rev [mips_arch_info $arch isa_rev]
+
# Set an appropriate ABI, handling dependencies between the pre-abi
# options and the abi options. This should mirror the abi and post-abi
# code below.
@@ -1121,6 +1180,9 @@ proc mips-dg-options { args } {
} elseif { [mips_have_test_option_p options "-mlong64"]
&& [mips_long32_abi_p $abi] } {
set force_abi 1
+ } elseif { [mips_have_test_option_p options "-mfpxx"]
+ && ![mips_same_option_p $abi "-mabi=32"] } {
+ set force_abi 1
} else {
set force_abi 0
}
@@ -1157,8 +1219,8 @@ proc mips-dg-options { args } {
if { $abi_test_option_p } {
if { $eabi_p } {
mips_make_test_option options "-mno-abicalls"
- if { $gp_size == 32 } {
- mips_make_test_option options "-mfp32"
+ if { $isa_rev < 6 && $gp_size == 32 } {
+ mips_make_test_option options "-mfp32"
}
}
if { [mips_using_mips16_p options]
@@ -1192,6 +1254,7 @@ proc mips-dg-options { args } {
}
if { $isa_rev < 1 } {
mips_make_test_option options "-mno-paired-single"
+ mips_make_test_option options "-mno-odd-spreg"
}
if { $isa_rev < 2 } {
if { $gp_size == 32 } {
@@ -1200,6 +1263,17 @@ proc mips-dg-options { args } {
mips_make_test_option options "-mno-dsp"
mips_make_test_option options "-mno-synci"
}
+ if { $isa_rev > 5 } {
+ mips_make_test_option options "-mno-dsp"
+ mips_make_test_option options "-mno-mips16"
+ if { [mips_have_test_option_p options "-mdsp"] } {
+ mips_make_test_option options "-mfp64"
+ }
+ mips_make_test_option options "-mno-fix-r10000"
+ mips_make_test_option options "-mno-paired-single"
+ mips_make_test_option options "-mnan=2008"
+ mips_make_test_option options "-mabs=2008"
+ }
unset arch
unset isa
unset isa_rev
@@ -1222,6 +1296,7 @@ proc mips-dg-options { args } {
mips_option_dependency options "-mplt" "-mno-shared"
mips_option_dependency options "-mno-shared" "-fno-pic"
mips_option_dependency options "-mfp32" "-mno-paired-single"
+ mips_option_dependency options "-mfpxx" "-mno-paired-single"
mips_option_dependency options "-msoft-float" "-mno-paired-single"
mips_option_dependency options "-mno-paired-single" "-mno-mips3d"
@@ -1243,7 +1318,9 @@ proc mips-dg-options { args } {
foreach group $mips_abi_groups {
set old_option [mips_original_option $group]
set new_option [mips_option options $group]
- if { ![mips_same_option_p $old_option $new_option] } {
+ if { ![mips_same_option_p $old_option $new_option]
+ && ![mips_same_option_p $old_option "-mfpxx"]
+ && ![mips_same_option_p $new_option "-mfpxx"] } {
switch -- [lindex $do_what 0] {
link -
run {
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-1.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-1.c
index b3fe188d2..435e5fefc 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-1.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "isa>=4" } */
+/* { dg-options "isa>=4 forbid_cpu=mips.*r6" } */
/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
/* { dg-final { scan-assembler "\tmovz\t" } } */
/* { dg-final { scan-assembler "\tmovn\t" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-2.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-2.c
index 2638d51fd..95130eb03 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-2.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "isa>=4" } */
+/* { dg-options "isa>=4 forbid_cpu=mips.*r6" } */
/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
/* { dg-final { scan-assembler "\tmovz\t" } } */
/* { dg-final { scan-assembler "\tmovn\t" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-3.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-3.c
index f356465c8..07f06b42c 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-3.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/movcc-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "isa>=4 -mhard-float" } */
+/* { dg-options "isa>=4 -mhard-float forbid_cpu=mips.*r6" } */
/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
/* { dg-final { scan-assembler "\tmovt\t" } } */
/* { dg-final { scan-assembler "\tmovf\t" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-1.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-1.c
new file mode 100644
index 000000000..54a4634f7
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-1.c
@@ -0,0 +1,13 @@
+/* Check that we move DFmode values via memory between FP and GP. */
+/* { dg-options "-mabi=32 -mfpxx isa=2" } */
+
+void bar (void);
+
+double
+foo (int x, double a)
+{
+ return a;
+}
+/* { dg-final { scan-assembler-not "mthc1" } } */
+/* { dg-final { scan-assembler-not "mtc1" } } */
+/* { dg-final { scan-assembler-times "ldc1" 1 } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-2.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-2.c
new file mode 100644
index 000000000..0390843de
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-2.c
@@ -0,0 +1,13 @@
+/* Check that we move DFmode values using mthc between FP and GP. */
+/* { dg-options "-mabi=32 -mfpxx isa_rev=2" } */
+
+void bar (void);
+
+double
+foo (int x, double a)
+{
+ return a;
+}
+/* { dg-final { scan-assembler "mthc1" } } */
+/* { dg-final { scan-assembler "mtc1" } } */
+/* { dg-final { scan-assembler-not "ldc1" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-3.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-3.c
new file mode 100644
index 000000000..f89747378
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/movdf-3.c
@@ -0,0 +1,12 @@
+/* Check that we move DFmode values using mtc1 between FP and GP. */
+/* { dg-options "-mabi=32 -mfp32 isa=2" } */
+
+void bar (void);
+
+double
+foo (int x, double a)
+{
+ return a;
+}
+/* { dg-final { scan-assembler-times "mtc1" 2 } } */
+/* { dg-final { scan-assembler-not "ldc1" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/msa-type.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/msa-type.c
new file mode 100644
index 000000000..1d4817f0a
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/msa-type.c
@@ -0,0 +1,254 @@
+/* Test MIPS MSA ASE instructions */
+/* { dg-do compile } */
+/* { dg-options "-mfp64 -mhard-float -mmsa" } */
+/* { dg-skip-if "madd and msub need combine" { *-*-* } { "-O0" } { "" } } */
+/* { dg-final { scan-assembler-times "\taddv.b\t" 2 } } */
+/* { dg-final { scan-assembler-times "\taddv.h\t" 2 } } */
+/* { dg-final { scan-assembler-times "\taddv.w\t" 2 } } */
+/* { dg-final { scan-assembler-times "\taddv.d\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tfadd.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tfadd.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tsubv.b\t" 4 } } */
+/* { dg-final { scan-assembler-times "\tsubv.h\t" 4 } } */
+/* { dg-final { scan-assembler-times "\tsubv.w\t" 4 } } */
+/* { dg-final { scan-assembler-times "\tsubv.d\t" 4 } } */
+/* { dg-final { scan-assembler-times "\tfsub.w\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tfsub.d\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmulv.b\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmulv.h\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmulv.w\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmulv.d\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tfmul.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tfmul.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tdiv_s.b\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tdiv_s.h\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tdiv_s.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tdiv_s.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tfdiv.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tfdiv.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tdiv_u.b\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tdiv_u.h\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tdiv_u.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tdiv_u.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tmod_s.b\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tmod_s.h\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tmod_s.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tmod_s.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tmod_u.b\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tmod_u.h\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tmod_u.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tmod_u.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\txor.v\t" 8 } } */
+/* { dg-final { scan-assembler-times "\tor.v\t" 8 } } */
+/* { dg-final { scan-assembler-times "\tand.v\t" 8 } } */
+/* { dg-final { scan-assembler-times "\tsra.b\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tsra.h\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tsra.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tsra.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tsrl.b\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tsrl.h\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tsrl.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tsrl.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tsll.b\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tsll.h\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tsll.w\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tsll.d\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tldi.b\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tldi.h\t" 4 } } */
+/* { dg-final { scan-assembler-times "\tldi.w\t" 5 } } */
+/* { dg-final { scan-assembler-times "\tldi.d\t" 5 } } */
+/* { dg-final { scan-assembler-times "\tnor.v\t" 6 } } */
+/* { dg-final { scan-assembler-times "\tnori.b\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmaddv.b\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmaddv.h\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmaddv.w\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmaddv.d\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmove.v\t" 40 } } */
+/* { dg-final { scan-assembler-times "\tfmadd.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tfmadd.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tmsubv.b\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmsubv.h\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmsubv.w\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tmsubv.d\t" 2 } } */
+/* { dg-final { scan-assembler-times "\tfmsub.w\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tfmsub.d\t" 1 } } */
+/* { dg-final { scan-assembler-times "\tvshf.b\t" 4 } } */
+/* { dg-final { scan-assembler-times "\tvshf.h\t" 4 } } */
+/* { dg-final { scan-assembler-times "\tvshf.w\t" 6 } } */
+/* { dg-final { scan-assembler-times "\tvshf.d\t" 6 } } */
+
+typedef signed char v16i8 __attribute__ ((vector_size(16)));
+typedef short v8i16 __attribute__ ((vector_size(16)));
+typedef int v4i32 __attribute__ ((vector_size(16)));
+typedef long long v2i64 __attribute__ ((vector_size(16)));
+typedef unsigned char v16u8 __attribute__ ((vector_size(16)));
+typedef unsigned short v8u16 __attribute__ ((vector_size(16)));
+typedef unsigned int v4u32 __attribute__ ((vector_size(16)));
+typedef unsigned long long v2u64 __attribute__ ((vector_size(16)));
+typedef float v4f32 __attribute__ ((vector_size(16)));
+typedef double v2f64 __attribute__ ((vector_size(16)));
+
+/*
+typedef signed char v8i8 __attribute__ ((vector_size(8)));
+typedef short v4i16 __attribute__ ((vector_size(8)));
+typedef int v2i32 __attribute__ ((vector_size(8)));
+typedef float v2f32 __attribute__ ((vector_size(8)));
+
+typedef signed char v4i8 __attribute__ ((vector_size(4)));
+typedef short v2i16 __attribute__ ((vector_size(4)));
+*/
+
+typedef long long i64;
+typedef int i32;
+typedef short i16;
+typedef signed char i8;
+typedef double f64;
+typedef float f32;
+
+#define DECLARE(TYPE) TYPE TYPE ## _0, TYPE ## _1, TYPE ## _2;
+#define RETURN(TYPE) NOMIPS16 TYPE test0_ ## TYPE () { return TYPE ## _0; }
+#define ASSIGN(TYPE) NOMIPS16 void test1_ ## TYPE (TYPE i) { TYPE ## _1 = i; }
+#define ADD(TYPE) NOMIPS16 TYPE test2_ ## TYPE (TYPE i, TYPE j) { return i + j; }
+#define SUB(TYPE) NOMIPS16 TYPE test3_ ## TYPE (TYPE i, TYPE j) { return i - j; }
+#define MUL(TYPE) NOMIPS16 TYPE test4_ ## TYPE (TYPE i, TYPE j) { return i * j; }
+#define DIV(TYPE) TYPE test5_ ## TYPE (TYPE i, TYPE j) { return i / j; }
+#define MOD(TYPE) TYPE test6_ ## TYPE (TYPE i, TYPE j) { return i % j; }
+#define MINUS(TYPE) TYPE test7_ ## TYPE (TYPE i) { return -i; }
+#define XOR(TYPE) TYPE test8_ ## TYPE (TYPE i, TYPE j) { return i ^ j; }
+#define OR(TYPE) TYPE test9_ ## TYPE (TYPE i, TYPE j) { return i | j; }
+#define AND(TYPE) TYPE test10_ ## TYPE (TYPE i, TYPE j) { return i & j; }
+#define BIT_COMPLEMENT(TYPE) TYPE test11_ ## TYPE (TYPE i) { return ~i; }
+#define SHIFT_RIGHT(TYPE) TYPE test12_ ## TYPE (TYPE i, TYPE j) { return i >> j; }
+#define SHIFT_LEFT(TYPE) TYPE test13_ ## TYPE (TYPE i, TYPE j) { return i << j; }
+#define EQ(TYPE) TYPE test14_ ## TYPE (TYPE i, TYPE j) { return i == j; }
+#define NEQ(TYPE) TYPE test15_ ## TYPE (TYPE i, TYPE j) { return i != j; }
+#define LT(TYPE) TYPE test16_ ## TYPE (TYPE i, TYPE j) { return i < j; }
+#define LEQ(TYPE) TYPE test17_ ## TYPE (TYPE i, TYPE j) { return i <= j; }
+#define GT(TYPE) TYPE test18_ ## TYPE (TYPE i, TYPE j) { return i > j; }
+#define GEQ(TYPE) TYPE test19_ ## TYPE (TYPE i, TYPE j) { return i >= j; }
+
+#define ADD_I(TYPE) TYPE test20_ ## TYPE (TYPE i) { return i + 37; }
+#define SUB_I(TYPE) TYPE test21_ ## TYPE (TYPE i) { return i - 37; }
+#define MUL_I(TYPE) TYPE test22_ ## TYPE (TYPE i) { return i * 37; }
+#define DIV_I(TYPE) TYPE test23_ ## TYPE (TYPE i) { return i / 37; }
+#define MOD_I(TYPE) TYPE test24_ ## TYPE (TYPE i) { return i % 37; }
+#define XOR_I(TYPE) TYPE test25_ ## TYPE (TYPE i) { return i ^ 37; }
+#define OR_I(TYPE) TYPE test26_ ## TYPE (TYPE i) { return i | 37; }
+#define AND_I(TYPE) TYPE test27_ ## TYPE (TYPE i) { return i & 37; }
+#define SHIFT_RIGHT_I(TYPE) TYPE test28_ ## TYPE (TYPE i) { return i >> 3; }
+#define SHIFT_LEFT_I(TYPE) TYPE test29_ ## TYPE (TYPE i) { return i << 3; }
+
+#define ADD_F(TYPE) TYPE test30_ ## TYPE (TYPE i) { return i + 37.0; }
+#define SUB_F(TYPE) TYPE test31_ ## TYPE (TYPE i) { return i - 37.0; }
+#define MUL_F(TYPE) TYPE test32_ ## TYPE (TYPE i) { return i * 37.0; }
+#define DIV_F(TYPE) TYPE test33_ ## TYPE (TYPE i) { return i / 37.0; }
+
+#define SHUFFLE1(TYPE) TYPE test34_ ## TYPE (TYPE i, TYPE mask) { return __builtin_shuffle (i, mask); }
+#define SHUFFLE2(TYPE) TYPE test35_ ## TYPE (TYPE i, TYPE j, TYPE mask) { return __builtin_shuffle (i, j, mask); }
+
+#define REAL_SHUFFLE1(TYPE, MASK_TYPE) TYPE test36_ ## TYPE (TYPE i, MASK_TYPE mask) { return __builtin_shuffle (i, mask); }
+#define REAL_SHUFFLE2(TYPE, MASK_TYPE) TYPE test37_ ## TYPE (TYPE i, TYPE j, MASK_TYPE mask) { return __builtin_shuffle (i, j, mask); }
+
+#define MADD(TYPE) TYPE test38_ ## TYPE (TYPE i, TYPE j, TYPE k) { return i * j + k; }
+#define MSUB(TYPE) TYPE test39_ ## TYPE (TYPE i, TYPE j, TYPE k) { return k - i * j; }
+
+#define ITERATE_FOR_ALL_INT_VECTOR_TYPES(FUNC) \
+ FUNC (v16i8) \
+ FUNC (v8i16) \
+ FUNC (v4i32) \
+ FUNC (v2i64) \
+ FUNC (v16u8) \
+ FUNC (v8u16) \
+ FUNC (v4u32) \
+ FUNC (v2u64)
+
+/*
+ FUNC (v8i8) \
+ FUNC (v4i16) \
+ FUNC (v2i32) \
+ FUNC (v4i8) \
+ FUNC (v2i16)
+*/
+
+#define ITERATE_FOR_ALL_INT_SCALAR_TYPES(FUNC) \
+ FUNC (i64) \
+ FUNC (i32) \
+ FUNC (i16) \
+ FUNC (i8)
+
+#define ITERATE_FOR_ALL_INT_TYPES(FUNC) \
+ ITERATE_FOR_ALL_INT_VECTOR_TYPES(FUNC) \
+
+/*
+ ITERATE_FOR_ALL_INT_SCALAR_TYPES(FUNC)
+*/
+
+#define ITERATE_FOR_ALL_REAL_VECTOR_TYPES(FUNC) \
+ FUNC (v4f32) \
+ FUNC (v2f64) \
+
+/*
+ FUNC (v2f32)
+*/
+
+#define ITERATE_FOR_ALL_REAL_SCALAR_TYPES(FUNC) \
+ FUNC (f64) \
+ FUNC (f32)
+
+#define ITERATE_FOR_ALL_REAL_TYPES(FUNC) \
+ ITERATE_FOR_ALL_REAL_VECTOR_TYPES(FUNC) \
+
+/*
+ ITERATE_FOR_ALL_REAL_SCALAR_TYPES(FUNC)
+*/
+
+#define ITERATE_FOR_ALL_TYPES(FUNC) \
+ ITERATE_FOR_ALL_INT_TYPES(FUNC) \
+ ITERATE_FOR_ALL_REAL_TYPES(FUNC)
+
+ITERATE_FOR_ALL_TYPES (ADD)
+ITERATE_FOR_ALL_TYPES (SUB)
+ITERATE_FOR_ALL_TYPES (MUL)
+ITERATE_FOR_ALL_TYPES (DIV)
+ITERATE_FOR_ALL_INT_TYPES (MOD)
+ITERATE_FOR_ALL_INT_TYPES (XOR)
+ITERATE_FOR_ALL_INT_TYPES (OR)
+ITERATE_FOR_ALL_INT_TYPES (AND)
+ITERATE_FOR_ALL_INT_TYPES (SHIFT_RIGHT)
+ITERATE_FOR_ALL_INT_TYPES (SHIFT_LEFT)
+ITERATE_FOR_ALL_TYPES (MINUS)
+ITERATE_FOR_ALL_INT_TYPES (BIT_COMPLEMENT)
+ITERATE_FOR_ALL_TYPES (MADD)
+ITERATE_FOR_ALL_TYPES (MSUB)
+ITERATE_FOR_ALL_INT_VECTOR_TYPES (SHUFFLE1)
+ITERATE_FOR_ALL_INT_VECTOR_TYPES (SHUFFLE2)
+REAL_SHUFFLE1 (v2f64, v2i64)
+REAL_SHUFFLE2 (v2f64, v2i64)
+REAL_SHUFFLE1 (v4f32, v4i32)
+REAL_SHUFFLE2 (v4f32, v4i32)
+
+/*
+ITERATE_FOR_ALL_TYPES (DECLARE)
+ITERATE_FOR_ALL_TYPES (RETURN)
+ITERATE_FOR_ALL_TYPES (ASSIGN)
+ITERATE_FOR_ALL_INT_TYPES (ADD_I)
+ITERATE_FOR_ALL_INT_TYPES (SUB_I)
+ITERATE_FOR_ALL_INT_TYPES (MUL_I)
+ITERATE_FOR_ALL_INT_TYPES (DIV_I)
+ITERATE_FOR_ALL_INT_TYPES (MOD_I)
+ITERATE_FOR_ALL_INT_TYPES (XOR_I)
+ITERATE_FOR_ALL_INT_TYPES (OR_I)
+ITERATE_FOR_ALL_INT_TYPES (AND_I)
+ITERATE_FOR_ALL_INT_TYPES (SHIFT_RIGHT_I)
+ITERATE_FOR_ALL_INT_TYPES (SHIFT_LEFT_I)
+ITERATE_FOR_ALL_REAL_TYPES (ADD_F)
+ITERATE_FOR_ALL_REAL_TYPES (SUB_F)
+ITERATE_FOR_ALL_REAL_TYPES (MUL_F)
+ITERATE_FOR_ALL_REAL_TYPES (DIV_F)
+ITERATE_FOR_ALL_TYPES (EQ)
+ITERATE_FOR_ALL_TYPES (NEQ)
+ITERATE_FOR_ALL_TYPES (LT)
+ITERATE_FOR_ALL_TYPES (LEQ)
+ITERATE_FOR_ALL_TYPES (GT)
+ITERATE_FOR_ALL_TYPES (GEQ)
+*/
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/msa.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/msa.c
new file mode 100644
index 000000000..19ecfe828
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/msa.c
@@ -0,0 +1,151 @@
+/* Test MIPS MSA ASE instructions */
+/* { dg-do compile } */
+/* { dg-options "-mips32r2 -mfp64 -mhard-float -mmsa" } */
+
+typedef signed char v16i8 __attribute__ ((vector_size(16)));
+typedef short v8i16 __attribute__ ((vector_size(16)));
+typedef int v4i32 __attribute__ ((vector_size(16)));
+typedef long long v2i64 __attribute__ ((vector_size(16)));
+typedef float v4f32 __attribute__ ((vector_size(16)));
+typedef double v2f64 __attribute__ ((vector_size(16)));
+
+typedef signed char v8i8 __attribute__ ((vector_size(8)));
+typedef short v4i16 __attribute__ ((vector_size(8)));
+typedef int v2i32 __attribute__ ((vector_size(8)));
+typedef float v2f32 __attribute__ ((vector_size(8)));
+
+typedef signed char v4i8 __attribute__ ((vector_size(4)));
+typedef short v2i16 __attribute__ ((vector_size(4)));
+
+typedef long long i64;
+typedef int i32;
+typedef short i16;
+typedef signed char i8;
+typedef double f64;
+typedef float f32;
+
+#define DECLARE(TYPE) TYPE TYPE ## _0, TYPE ## _1, TYPE ## _2;
+#define RETURN(TYPE) TYPE test0_ ## TYPE () { return TYPE ## _0; }
+#define ASSIGN(TYPE) void test1_ ## TYPE (TYPE i) { TYPE ## _1 = i; }
+#define ADD(TYPE) TYPE test2_ ## TYPE (TYPE i, TYPE j) { return i + j; }
+#define SUB(TYPE) TYPE test3_ ## TYPE (TYPE i, TYPE j) { return i - j; }
+#define MUL(TYPE) TYPE test4_ ## TYPE (TYPE i, TYPE j) { return i * j; }
+#define DIV(TYPE) TYPE test5_ ## TYPE (TYPE i, TYPE j) { return i / j; }
+#define MOD(TYPE) TYPE test6_ ## TYPE (TYPE i, TYPE j) { return i % j; }
+#define MINUS(TYPE) TYPE test7_ ## TYPE (TYPE i) { return -i; }
+#define XOR(TYPE) TYPE test8_ ## TYPE (TYPE i, TYPE j) { return i ^ j; }
+#define OR(TYPE) TYPE test9_ ## TYPE (TYPE i, TYPE j) { return i | j; }
+#define AND(TYPE) TYPE test10_ ## TYPE (TYPE i, TYPE j) { return i & j; }
+#define BIT_COMPLEMENT(TYPE) TYPE test11_ ## TYPE (TYPE i) { return ~i; }
+#define SHIFT_RIGHT(TYPE) TYPE test12_ ## TYPE (TYPE i, TYPE j) { return i >> j; }
+#define SHIFT_LEFT(TYPE) TYPE test13_ ## TYPE (TYPE i, TYPE j) { return i << j; }
+#define EQ(TYPE) TYPE test14_ ## TYPE (TYPE i, TYPE j) { return i == j; }
+#define NEQ(TYPE) TYPE test15_ ## TYPE (TYPE i, TYPE j) { return i != j; }
+#define LT(TYPE) TYPE test16_ ## TYPE (TYPE i, TYPE j) { return i < j; }
+#define LEQ(TYPE) TYPE test17_ ## TYPE (TYPE i, TYPE j) { return i <= j; }
+#define GT(TYPE) TYPE test18_ ## TYPE (TYPE i, TYPE j) { return i > j; }
+#define GEQ(TYPE) TYPE test19_ ## TYPE (TYPE i, TYPE j) { return i >= j; }
+
+#define ADD_I(TYPE) TYPE test20_ ## TYPE (TYPE i) { return i + 37; }
+#define SUB_I(TYPE) TYPE test21_ ## TYPE (TYPE i) { return i - 37; }
+#define MUL_I(TYPE) TYPE test22_ ## TYPE (TYPE i) { return i * 37; }
+#define DIV_I(TYPE) TYPE test23_ ## TYPE (TYPE i) { return i / 37; }
+#define MOD_I(TYPE) TYPE test24_ ## TYPE (TYPE i) { return i % 37; }
+#define XOR_I(TYPE) TYPE test25_ ## TYPE (TYPE i) { return i ^ 37; }
+#define OR_I(TYPE) TYPE test26_ ## TYPE (TYPE i) { return i | 37; }
+#define AND_I(TYPE) TYPE test27_ ## TYPE (TYPE i) { return i & 37; }
+#define SHIFT_RIGHT_I(TYPE) TYPE test28_ ## TYPE (TYPE i) { return i >> 3; }
+#define SHIFT_LEFT_I(TYPE) TYPE test29_ ## TYPE (TYPE i) { return i << 3; }
+
+#define ADD_F(TYPE) TYPE test30_ ## TYPE (TYPE i) { return i + 37.0; }
+#define SUB_F(TYPE) TYPE test31_ ## TYPE (TYPE i) { return i - 37.0; }
+#define MUL_F(TYPE) TYPE test32_ ## TYPE (TYPE i) { return i * 37.0; }
+#define DIV_F(TYPE) TYPE test33_ ## TYPE (TYPE i) { return i / 37.0; }
+
+#define SHUFFLE1(TYPE) TYPE test34_ ## TYPE (TYPE i, TYPE mask) { return __builtin_shuffle (i, mask); }
+#define SHUFFLE2(TYPE) TYPE test35_ ## TYPE (TYPE i, TYPE j, TYPE mask) { return __builtin_shuffle (i, j, mask); }
+
+#define REAL_SHUFFLE1(TYPE, MASK_TYPE) TYPE test36_ ## TYPE (TYPE i, MASK_TYPE mask) { return __builtin_shuffle (i, mask); }
+#define REAL_SHUFFLE2(TYPE, MASK_TYPE) TYPE test37_ ## TYPE (TYPE i, TYPE j, MASK_TYPE mask) { return __builtin_shuffle (i, j, mask); }
+
+#define ITERATE_FOR_ALL_INT_VECTOR_TYPES(FUNC) \
+ FUNC (v16i8) \
+ FUNC (v8i16) \
+ FUNC (v4i32) \
+ FUNC (v2i64) \
+ FUNC (v8i8) \
+ FUNC (v4i16) \
+ FUNC (v2i32) \
+ FUNC (v4i8) \
+ FUNC (v2i16)
+
+#define ITERATE_FOR_ALL_INT_SCALAR_TYPES(FUNC) \
+ FUNC (i64) \
+ FUNC (i32) \
+ FUNC (i16) \
+ FUNC (i8)
+
+#define ITERATE_FOR_ALL_INT_TYPES(FUNC) \
+ ITERATE_FOR_ALL_INT_VECTOR_TYPES(FUNC) \
+ ITERATE_FOR_ALL_INT_SCALAR_TYPES(FUNC)
+
+#define ITERATE_FOR_ALL_REAL_VECTOR_TYPES(FUNC) \
+ FUNC (v4f32) \
+ FUNC (v2f64) \
+ FUNC (v2f32)
+
+#define ITERATE_FOR_ALL_REAL_SCALAR_TYPES(FUNC) \
+ FUNC (f64) \
+ FUNC (f32)
+
+#define ITERATE_FOR_ALL_REAL_TYPES(FUNC) \
+ ITERATE_FOR_ALL_REAL_VECTOR_TYPES(FUNC) \
+ ITERATE_FOR_ALL_REAL_SCALAR_TYPES(FUNC)
+
+#define ITERATE_FOR_ALL_TYPES(FUNC) \
+ ITERATE_FOR_ALL_INT_TYPES(FUNC) \
+ ITERATE_FOR_ALL_REAL_TYPES(FUNC)
+
+ITERATE_FOR_ALL_TYPES (DECLARE)
+ITERATE_FOR_ALL_TYPES (RETURN)
+ITERATE_FOR_ALL_TYPES (ASSIGN)
+ITERATE_FOR_ALL_TYPES (ADD)
+ITERATE_FOR_ALL_TYPES (SUB)
+ITERATE_FOR_ALL_TYPES (MUL)
+ITERATE_FOR_ALL_TYPES (DIV)
+ITERATE_FOR_ALL_TYPES (MINUS)
+ITERATE_FOR_ALL_INT_TYPES (MOD)
+ITERATE_FOR_ALL_INT_TYPES (XOR)
+ITERATE_FOR_ALL_INT_TYPES (OR)
+ITERATE_FOR_ALL_INT_TYPES (AND)
+ITERATE_FOR_ALL_INT_TYPES (BIT_COMPLEMENT)
+ITERATE_FOR_ALL_INT_TYPES (SHIFT_RIGHT)
+ITERATE_FOR_ALL_INT_TYPES (SHIFT_LEFT)
+ITERATE_FOR_ALL_INT_TYPES (ADD_I)
+ITERATE_FOR_ALL_INT_TYPES (SUB_I)
+ITERATE_FOR_ALL_INT_TYPES (MUL_I)
+ITERATE_FOR_ALL_INT_TYPES (DIV_I)
+ITERATE_FOR_ALL_INT_TYPES (MOD_I)
+ITERATE_FOR_ALL_INT_TYPES (XOR_I)
+ITERATE_FOR_ALL_INT_TYPES (OR_I)
+ITERATE_FOR_ALL_INT_TYPES (AND_I)
+ITERATE_FOR_ALL_INT_TYPES (SHIFT_RIGHT_I)
+ITERATE_FOR_ALL_INT_TYPES (SHIFT_LEFT_I)
+ITERATE_FOR_ALL_REAL_TYPES (ADD_F)
+ITERATE_FOR_ALL_REAL_TYPES (SUB_F)
+ITERATE_FOR_ALL_REAL_TYPES (MUL_F)
+ITERATE_FOR_ALL_REAL_TYPES (DIV_F)
+ITERATE_FOR_ALL_INT_VECTOR_TYPES (SHUFFLE1)
+ITERATE_FOR_ALL_INT_VECTOR_TYPES (SHUFFLE2)
+REAL_SHUFFLE1 (v2f64, v2i64)
+REAL_SHUFFLE2 (v2f64, v2i64)
+REAL_SHUFFLE1 (v4f32, v4i32)
+REAL_SHUFFLE2 (v4f32, v4i32)
+REAL_SHUFFLE1 (v2f32, v2i32)
+REAL_SHUFFLE2 (v2f32, v2i32)
+ITERATE_FOR_ALL_TYPES (EQ)
+ITERATE_FOR_ALL_TYPES (NEQ)
+ITERATE_FOR_ALL_TYPES (LT)
+ITERATE_FOR_ALL_TYPES (LEQ)
+ITERATE_FOR_ALL_TYPES (GT)
+ITERATE_FOR_ALL_TYPES (GEQ)
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/msub-3.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/msub-3.c
index aedd04302..6284fe8f2 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/msub-3.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/msub-3.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
/* This test requires widening_mul */
-/* { dg-options "isa_rev>=1 -mgp32 -fexpensive-optimizations" } */
+/* { dg-options "isa_rev>=1 -mgp32 -fexpensive-optimizations forbid_cpu=mips.*r6" } */
/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
/* { dg-final { scan-assembler-times "\tmsub\t" 2 } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/msubu-3.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/msubu-3.c
index 2e936ebe0..e6fc5479c 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/msubu-3.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/msubu-3.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
/* This test requires widening_mul */
-/* { dg-options "isa_rev>=1 -mgp32 -fexpensive-optimizations" } */
+/* { dg-options "isa_rev>=1 -mgp32 -fexpensive-optimizations forbid_cpu=mips.*r6" } */
/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
/* { dg-final { scan-assembler-times "\tmsubu\t" 2 } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/neg-abs-2.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/neg-abs-2.c
index 435751e0c..59e797def 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/neg-abs-2.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/neg-abs-2.c
@@ -1,7 +1,7 @@
/* Make sure that we avoid abs.fmt and neg.fmt when the signs of NaNs
matter. */
/* { dg-do compile } */
-/* { dg-options "-mhard-float -fno-finite-math-only" } */
+/* { dg-options "isa_rev<=5 -mhard-float -fno-finite-math-only -mabs=legacy" } */
/* { dg-final { scan-assembler-not "\tneg.s\t" } } */
/* { dg-final { scan-assembler-not "\tneg.d\t" } } */
/* { dg-final { scan-assembler-not "\tabs.s\t" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-1.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-1.c
new file mode 100644
index 000000000..a9c695736
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-1.c
@@ -0,0 +1,13 @@
+/* Check that we enable odd-numbered single precision registers. */
+/* { dg-options "-mabi=32 -modd-spreg -mhard-float" } */
+
+#if _MIPS_SPFPSET != 32
+#error "Incorrect number of single-precision registers reported"
+#endif
+
+void
+foo ()
+{
+ register float foo asm ("$f1");
+ asm volatile ("" : "=f" (foo));
+}
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-2.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-2.c
new file mode 100644
index 000000000..e2e0a2660
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-2.c
@@ -0,0 +1,10 @@
+/* Check that we disable odd-numbered single precision registers. */
+/* { dg-skip-if "needs asm output" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
+/* { dg-options "-mabi=32 -mno-odd-spreg -mhard-float" } */
+
+void
+foo ()
+{
+ register float foo asm ("$f1"); /* { dg-error "isn't suitable for" } */
+ asm volatile ("" : "=f" (foo));
+}
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-3.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-3.c
new file mode 100644
index 000000000..8a2eb63e9
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-3.c
@@ -0,0 +1,10 @@
+/* Check that we disable odd-numbered single precision registers. */
+/* { dg-skip-if "needs asm output" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
+/* { dg-options "-mabi=32 -march=loongson3a -mhard-float" } */
+
+void
+foo ()
+{
+ register float foo asm ("$f1"); /* { dg-error "isn't suitable for" } */
+ asm volatile ("" : "=f" (foo));
+}
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-4.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-4.c
new file mode 100644
index 000000000..723424a39
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-4.c
@@ -0,0 +1,15 @@
+/* Check that we disable odd-numbered single precision registers and can
+ still generate code. */
+/* { dg-options "-mabi=32 -mno-odd-spreg -mhard-float" } */
+
+#if _MIPS_SPFPSET != 16
+#error "Incorrect number of single-precision registers reported"
+#endif
+
+float a;
+float
+foo ()
+{
+ float b = a + 1.0f;
+ return b;
+}
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-5.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-5.c
new file mode 100644
index 000000000..2d1b12969
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-5.c
@@ -0,0 +1,15 @@
+/* Check that we disable odd-numbered single precision registers and can
+ still generate code. */
+/* { dg-options "-mabi=64 -mno-odd-spreg -mhard-float" } */
+
+#if _MIPS_SPFPSET != 32
+#error "Incorrect number of single-precision registers reported"
+#endif
+
+float a;
+float
+foo ()
+{
+ float b = a + 1.0f;
+ return b;
+}
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-6.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-6.c
new file mode 100644
index 000000000..b1e79c1fa
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-6.c
@@ -0,0 +1,13 @@
+/* Check that we enable odd-numbered single precision registers. */
+/* { dg-options "-mabi=32 -march=octeon -mhard-float" } */
+
+#if _MIPS_SPFPSET != 32
+#error "Incorrect number of single-precision registers reported"
+#endif
+
+void
+foo ()
+{
+ register float foo asm ("$f1");
+ asm volatile ("" : "=f" (foo));
+}
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-7.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-7.c
new file mode 100644
index 000000000..89480aba0
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/oddspreg-7.c
@@ -0,0 +1,10 @@
+/* Check that we disable odd-numbered single precision registers for FPXX. */
+/* { dg-skip-if "needs asm output" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
+/* { dg-options "-mabi=32 -mips32r2 -mfpxx -mhard-float" } */
+
+void
+foo ()
+{
+ register float foo asm ("$f1"); /* { dg-error "isn't suitable for" } */
+ asm volatile ("" : "=f" (foo));
+}
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/pr37362.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/pr37362.c
index 848d879d3..2ad4e8dbc 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/pr37362.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/pr37362.c
@@ -1,5 +1,5 @@
/* mips*-sde-elf doesn't have 128-bit long doubles. */
-/* { dg-do compile { target { ! { mips*-sde-elf mips*-mti-elf } } } } */
+/* { dg-do compile { target { ! { mips*-sde-elf mips*-mti-elf mips*-img-elf } } } } */
/* { dg-options "-march=mips64r2 -mabi=n32" } */
typedef float TFtype __attribute__((mode(TF)));
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/timode-1.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/timode-1.c
index 606fee0cb..be3d317cb 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/timode-1.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/timode-1.c
@@ -1,4 +1,4 @@
-/* { dg-options "-mgp64" } */
+/* { dg-options "isa_rev<=5 -mgp64" } */
/* { dg-skip-if "we deliberately use calls when optimizing for size" { *-*-* } { "-Os" } { "" } } */
typedef int int128_t __attribute__((mode(TI)));
typedef unsigned int uint128_t __attribute__((mode(TI)));
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/umips-store16-1.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/umips-store16-1.c
new file mode 100644
index 000000000..6377e8569
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/umips-store16-1.c
@@ -0,0 +1,30 @@
+/* { dg-options "(-mmicromips)" } */
+/* { dg-do assemble } */
+
+register unsigned int global asm ("$16");
+
+extern void exit (int) __attribute__((noreturn));
+
+MICROMIPS void
+test_sb (unsigned char *ptr, void (*f) (void))
+{
+ ptr[0] = global;
+ f ();
+ exit (0);
+}
+
+MICROMIPS void
+test_sh (unsigned short *ptr, void (*f) (void))
+{
+ ptr[0] = global;
+ f ();
+ exit (0);
+}
+
+MICROMIPS void
+test_sw (unsigned int *ptr, void (*f) (void))
+{
+ ptr[0] = global;
+ f ();
+ exit (0);
+}
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/umips-store16-2.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/umips-store16-2.c
new file mode 100644
index 000000000..0748edb56
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/umips-store16-2.c
@@ -0,0 +1,22 @@
+/* { dg-options "(-mmicromips) -dp" } */
+
+MICROMIPS void
+f1 (unsigned char *ptr)
+{
+ *ptr = 0;
+}
+
+MICROMIPS void
+f2 (unsigned short *ptr)
+{
+ *ptr = 0;
+}
+
+MICROMIPS void
+f3 (unsigned int *ptr)
+{
+ *ptr = 0;
+}
+/* { dg-final { scan-assembler "\tsb\t\\\$0,0\\(\\\$\[0-9\]+\\)\[^\n\]*length = 2" } } */
+/* { dg-final { scan-assembler "\tsh\t\\\$0,0\\(\\\$\[0-9\]+\\)\[^\n\]*length = 2" } } */
+/* { dg-final { scan-assembler "\tsw\t\\\$0,0\\(\\\$\[0-9\]+\\)\[^\n\]*length = 2" } } */
diff --git a/gcc-4.9/gcc/testsuite/gcc.target/mips/unaligned-1.c b/gcc-4.9/gcc/testsuite/gcc.target/mips/unaligned-1.c
index 938f52d21..4888ca8b5 100644
--- a/gcc-4.9/gcc/testsuite/gcc.target/mips/unaligned-1.c
+++ b/gcc-4.9/gcc/testsuite/gcc.target/mips/unaligned-1.c
@@ -1,4 +1,4 @@
-/* { dg-options "-mgp64" } */
+/* { dg-options "isa_rev<=5 -mgp64" } */
/* { dg-skip-if "code quality test" { *-*-* } { "-O0" } { "" } } */
/* { dg-final { scan-assembler-times "\tsdl\t" 1 } } */
/* { dg-final { scan-assembler-times "\tsdr\t" 1 } } */
diff --git a/gcc-4.9/gcc/testsuite/lib/target-supports.exp b/gcc-4.9/gcc/testsuite/lib/target-supports.exp
index 7d296d902..4f1325bde 100644
--- a/gcc-4.9/gcc/testsuite/lib/target-supports.exp
+++ b/gcc-4.9/gcc/testsuite/lib/target-supports.exp
@@ -918,6 +918,17 @@ proc check_effective_target_nomips16 { } {
}]
}
+# Return true if the target is a MIPS target that does not produce
+# micromips code.
+
+proc check_effective_target_nomicromips { } {
+ return [check_no_compiler_messages nomicromips object {
+ #ifdef __mips_micromips
+ #error MICROMIPS
+ #endif
+ }]
+}
+
# Add the options needed for MIPS16 function attributes. At the moment,
# we don't support MIPS16 PIC.
@@ -1287,6 +1298,32 @@ proc check_sse_hw_available { } {
}]
}
+# Return 1 if the target supports executing MSA instructions, 0
+# otherwise. Cache the result.
+
+proc check_msa_hw_available { } {
+ return [check_cached_effective_target msa_hw_available {
+ # If this is not the right target then we can skip the test.
+ if { !([istarget mips*-*-*]) } {
+ expr 0
+ } else {
+ check_runtime_nocache msa_hw_available {
+ #if !defined(__mips_msa)
+ #error "MSA NOT AVAIL"
+ #else
+ #include <msa.h>
+
+ int main()
+ {
+ v8i16 v = __builtin_msa_ldi_h (0);
+ return v[0];
+ }
+ #endif
+ } "-mmsa -mfp64 -mnan=2008 -mips32r2 -mhard-float"
+ }
+ }]
+}
+
# Return 1 if the target supports executing SSE2 instructions, 0
# otherwise. Cache the result.
@@ -1356,6 +1393,24 @@ proc check_effective_target_sse2_runtime { } {
return 0
}
+# Return 1 if the target supports running SSE2 executables, 0 otherwise.
+
+proc check_effective_target_msa_runtime { } {
+ if { [check_effective_target_mips_msa]
+ && [check_msa_hw_available] } {
+ return 1
+ }
+ return 0
+}
+
+# Return 1 if msa and not mips16 and not micropmips
+
+proc check_effective_target_mips_msa_nomips16_nomicromips { } {
+ return [check_effective_target_mips_msa]
+ && [check_effective_target_nomip16]
+ && [check_eefective_target_nomicromips]
+}
+
# Return 1 if the target supports running AVX executables, 0 otherwise.
proc check_effective_target_avx_runtime { } {
@@ -2068,15 +2123,16 @@ proc check_effective_target_vect_int { } {
if { [istarget i?86-*-*]
|| ([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*])
- || [istarget spu-*-*]
- || [istarget x86_64-*-*]
- || [istarget sparc*-*-*]
- || [istarget alpha*-*-*]
- || [istarget ia64-*-*]
- || [istarget aarch64*-*-*]
- || [check_effective_target_arm32]
- || ([istarget mips*-*-*]
- && [check_effective_target_mips_loongson]) } {
+ || [istarget spu-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget sparc*-*-*]
+ || [istarget alpha*-*-*]
+ || [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
+ || [check_effective_target_arm32]
+ || ([istarget mips*-*-*]
+ && ([check_effective_target_mips_msa_nomips16_nomicromips]
+ || [check_effective_target_mips_loongson])) } {
set et_vect_int_saved 1
}
}
@@ -2100,7 +2156,9 @@ proc check_effective_target_vect_intfloat_cvt { } {
&& ![istarget powerpc-*-linux*paired*])
|| [istarget x86_64-*-*]
|| ([istarget arm*-*-*]
- && [check_effective_target_arm_neon_ok])} {
+ && [check_effective_target_arm_neon_ok])
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips]) } {
set et_vect_intfloat_cvt_saved 1
}
}
@@ -2139,7 +2197,9 @@ proc check_effective_target_vect_uintfloat_cvt { } {
|| [istarget x86_64-*-*]
|| [istarget aarch64*-*-*]
|| ([istarget arm*-*-*]
- && [check_effective_target_arm_neon_ok])} {
+ && [check_effective_target_arm_neon_ok])
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips]) } {
set et_vect_uintfloat_cvt_saved 1
}
}
@@ -2164,7 +2224,9 @@ proc check_effective_target_vect_floatint_cvt { } {
&& ![istarget powerpc-*-linux*paired*])
|| [istarget x86_64-*-*]
|| ([istarget arm*-*-*]
- && [check_effective_target_arm_neon_ok])} {
+ && [check_effective_target_arm_neon_ok])
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips]) } {
set et_vect_floatint_cvt_saved 1
}
}
@@ -2186,7 +2248,9 @@ proc check_effective_target_vect_floatuint_cvt { } {
if { ([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*])
|| ([istarget arm*-*-*]
- && [check_effective_target_arm_neon_ok])} {
+ && [check_effective_target_arm_neon_ok])
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips]) } {
set et_vect_floatuint_cvt_saved 1
}
}
@@ -2895,6 +2959,24 @@ proc check_effective_target_mips_loongson { } {
}]
}
+# Return 1 if a msa program can be compiled to object
+proc check_effective_target_mips_msa { } {
+ return [check_no_compiler_messages msa object {
+ #if !defined(__mips_msa)
+ #error "MSA NOT AVAIL"
+ #else
+ #include <msa.h>
+
+ int main()
+ {
+ v8i16 v = __builtin_msa_ldi_h (1);
+
+ return v[0];
+ }
+ #endif
+ } "-mmsa -mfp64 -mnan=2008 -mips32r2 -mhard-float" ]
+}
+
# Return 1 if this is an ARM target that adheres to the ABI for the ARM
# Architecture.
@@ -3281,14 +3363,15 @@ proc check_effective_target_vect_shift { } {
} else {
set et_vect_shift_saved 0
if { ([istarget powerpc*-*-*]
- && ![istarget powerpc-*-linux*paired*])
- || [istarget ia64-*-*]
- || [istarget i?86-*-*]
- || [istarget x86_64-*-*]
- || [istarget aarch64*-*-*]
- || [check_effective_target_arm32]
- || ([istarget mips*-*-*]
- && [check_effective_target_mips_loongson]) } {
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget ia64-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*]
+ || [check_effective_target_arm32]
+ || ([istarget mips*-*-*]
+ && ([check_effective_target_mips_msa_nomips16_nomicromips]
+ || [check_effective_target_mips_loongson])) } {
set et_vect_shift_saved 1
}
}
@@ -3307,8 +3390,10 @@ proc check_effective_target_vect_shift_char { } {
} else {
set et_vect_shift_char_saved 0
if { ([istarget powerpc*-*-*]
- && ![istarget powerpc-*-linux*paired*])
- || [check_effective_target_arm32] } {
+ && ![istarget powerpc-*-linux*paired*])
+ || [check_effective_target_arm32]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips]) } {
set et_vect_shift_char_saved 1
}
}
@@ -3328,7 +3413,9 @@ proc check_effective_target_vect_long { } {
&& [check_effective_target_ilp32])
|| [istarget x86_64-*-*]
|| [check_effective_target_arm32]
- || ([istarget sparc*-*-*] && [check_effective_target_ilp32]) } {
+ || ([istarget sparc*-*-*] && [check_effective_target_ilp32])
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips]) } {
set answer 1
} else {
set answer 0
@@ -3357,6 +3444,8 @@ proc check_effective_target_vect_float { } {
|| [istarget x86_64-*-*]
|| [istarget ia64-*-*]
|| [istarget aarch64*-*-*]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips])
|| [check_effective_target_arm32] } {
set et_vect_float_saved 1
}
@@ -3391,6 +3480,9 @@ proc check_effective_target_vect_double { } {
}
} elseif { [istarget spu-*-*] } {
set et_vect_double_saved 1
+ } elseif { [istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips] } {
+ set et_vect_dpouble_saved 1
}
}
@@ -3410,7 +3502,9 @@ proc check_effective_target_vect_long_long { } {
} else {
set et_vect_long_long_saved 0
if { [istarget i?86-*-*]
- || [istarget x86_64-*-*] } {
+ || [istarget x86_64-*-*]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips]) } {
set et_vect_long_long_saved 1
}
}
@@ -3436,7 +3530,8 @@ proc check_effective_target_vect_no_int_max { } {
|| [istarget spu-*-*]
|| [istarget alpha*-*-*]
|| ([istarget mips*-*-*]
- && [check_effective_target_mips_loongson]) } {
+ && (![check_effective_target_mips_msa_nomips16_nomicromips])
+ && [check_effective_target_mips_loongson]) } {
set et_vect_no_int_max_saved 1
}
}
@@ -3498,12 +3593,13 @@ proc check_effective_target_vect_perm { } {
|| ([istarget aarch64*-*-*]
&& [is-effective-target aarch64_little_endian])
|| [istarget powerpc*-*-*]
- || [istarget spu-*-*]
+ || [istarget spu-*-*]
|| [istarget i?86-*-*]
|| [istarget x86_64-*-*]
|| ([istarget mips*-*-*]
- && [check_effective_target_mpaired_single]) } {
- set et_vect_perm_saved 1
+ && ([check_effective_target_mips_msa_nomips16_nomicromips]
+ || [check_effective_target_mpaired_single])) } {
+ set et_vect_perm_saved 1
}
}
verbose "check_effective_target_vect_perm: returning $et_vect_perm_saved" 2
@@ -3527,7 +3623,9 @@ proc check_effective_target_vect_perm_byte { } {
|| ([istarget aarch64*-*-*]
&& [is-effective-target aarch64_little_endian])
|| [istarget powerpc*-*-*]
- || [istarget spu-*-*] } {
+ || [istarget spu-*-*]
+ || ([istarget mips-*.*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips]) } {
set et_vect_perm_byte_saved 1
}
}
@@ -3552,7 +3650,9 @@ proc check_effective_target_vect_perm_short { } {
|| ([istarget aarch64*-*-*]
&& [is-effective-target aarch64_little_endian])
|| [istarget powerpc*-*-*]
- || [istarget spu-*-*] } {
+ || [istarget spu-*-*]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips]) } {
set et_vect_perm_short_saved 1
}
}
@@ -3980,7 +4080,8 @@ proc check_effective_target_vect_no_align { } {
|| [istarget ia64-*-*]
|| [check_effective_target_arm_vect_no_misalign]
|| ([istarget mips*-*-*]
- && [check_effective_target_mips_loongson]) } {
+ && ![check_effective_target_mips_msa]
+ && [check_effective_target_mips_loongson]) } {
set et_vect_no_align_saved 1
}
}
@@ -4001,7 +4102,8 @@ proc check_effective_target_vect_hw_misalign { } {
set et_vect_hw_misalign_saved 0
if { ([istarget x86_64-*-*]
|| [istarget aarch64*-*-*]
- || [istarget i?86-*-*]) } {
+ || [istarget i?86-*-*])
+ || ([istarget mips*-*-*] && [check_effective_target_mips_msa_nomips16_nomicromips]) } {
set et_vect_hw_misalign_saved 1
}
}
@@ -4245,7 +4347,8 @@ proc check_effective_target_vect_short_mult { } {
|| [istarget aarch64*-*-*]
|| [check_effective_target_arm32]
|| ([istarget mips*-*-*]
- && [check_effective_target_mips_loongson]) } {
+ && ([check_effective_target_mips_msa_nomips16_nomicromips]
+ || [check_effective_target_mips_loongson])) } {
set et_vect_short_mult_saved 1
}
}
@@ -4269,6 +4372,8 @@ proc check_effective_target_vect_int_mult { } {
|| [istarget x86_64-*-*]
|| [istarget ia64-*-*]
|| [istarget aarch64*-*-*]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_msa_nomips16_nomicromips])
|| [check_effective_target_arm32] } {
set et_vect_int_mult_saved 1
}
@@ -4295,7 +4400,8 @@ proc check_effective_target_vect_extract_even_odd { } {
|| [istarget ia64-*-*]
|| [istarget spu-*-*]
|| ([istarget mips*-*-*]
- && [check_effective_target_mpaired_single]) } {
+ && ([check_effective_target_mips_msa_nomips16_nomicromips]
+ || [check_effective_target_mpaired_single])) } {
set et_vect_extract_even_odd_saved 1
}
}
@@ -4321,7 +4427,8 @@ proc check_effective_target_vect_interleave { } {
|| [istarget ia64-*-*]
|| [istarget spu-*-*]
|| ([istarget mips*-*-*]
- && [check_effective_target_mpaired_single]) } {
+ && ([check_effective_target_mips_msa_nomips16_nomicromips]
+ || [check_effective_target_mpaired_single])) } {
set et_vect_interleave_saved 1
}
}
@@ -5616,6 +5723,16 @@ proc check_vect_support_and_set_flags { } {
} else {
set dg-do-what-default compile
}
+ } elseif { [istarget mips*-*-*] } {
+ if { ([check_effective_target_mips_msa_nomips16_nomicromips]) } {
+ lappend DEFAULT_VECTCFLAGS "-mmsa" "-mfp64" "-mnan=2008" "-mips32r2" "-mhard-float"
+
+ if { [check_effective_target_msa_runtime] } {
+ set dg-do-what-default run
+ } else {
+ set dg-do-what-default compile
+ }
+ }
} elseif { [istarget mips*-*-*]
&& ([check_effective_target_mpaired_single]
|| [check_effective_target_mips_loongson])
@@ -5629,7 +5746,7 @@ proc check_vect_support_and_set_flags { } {
if [check_effective_target_ultrasparc_hw] {
set dg-do-what-default run
} else {
- set dg-do-what-default compile
+ set dg-do-what-default compile
}
} elseif [istarget alpha*-*-*] {
# Alpha's vectorization capabilities are extremely limited.
diff --git a/gcc-4.9/gcc/testsuite/lib/target-supports.exp.orig b/gcc-4.9/gcc/testsuite/lib/target-supports.exp.orig
new file mode 100644
index 000000000..7d296d902
--- /dev/null
+++ b/gcc-4.9/gcc/testsuite/lib/target-supports.exp.orig
@@ -0,0 +1,5791 @@
+# Copyright (C) 1999-2014 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Please email any bugs, comments, and/or additions to this file to:
+# gcc-patches@gcc.gnu.org
+
+# This file defines procs for determining features supported by the target.
+
+# Try to compile the code given by CONTENTS into an output file of
+# type TYPE, where TYPE is as for target_compile. Return a list
+# whose first element contains the compiler messages and whose
+# second element is the name of the output file.
+#
+# BASENAME is a prefix to use for source and output files.
+# If ARGS is not empty, its first element is a string that
+# should be added to the command line.
+#
+# Assume by default that CONTENTS is C code.
+# Otherwise, code should contain:
+# "// C++" for c++,
+# "! Fortran" for Fortran code,
+# "/* ObjC", for ObjC
+# "// ObjC++" for ObjC++
+# and "// Go" for Go
+# If the tool is ObjC/ObjC++ then we overide the extension to .m/.mm to
+# allow for ObjC/ObjC++ specific flags.
+proc check_compile {basename type contents args} {
+ global tool
+ verbose "check_compile tool: $tool for $basename"
+
+ if { [llength $args] > 0 } {
+ set options [list "additional_flags=[lindex $args 0]"]
+ } else {
+ set options ""
+ }
+ switch -glob -- $contents {
+ "*! Fortran*" { set src ${basename}[pid].f90 }
+ "*// C++*" { set src ${basename}[pid].cc }
+ "*// ObjC++*" { set src ${basename}[pid].mm }
+ "*/* ObjC*" { set src ${basename}[pid].m }
+ "*// Go*" { set src ${basename}[pid].go }
+ default {
+ switch -- $tool {
+ "objc" { set src ${basename}[pid].m }
+ "obj-c++" { set src ${basename}[pid].mm }
+ default { set src ${basename}[pid].c }
+ }
+ }
+ }
+
+ set compile_type $type
+ switch -glob $type {
+ assembly { set output ${basename}[pid].s }
+ object { set output ${basename}[pid].o }
+ executable { set output ${basename}[pid].exe }
+ "rtl-*" {
+ set output ${basename}[pid].s
+ lappend options "additional_flags=-fdump-$type"
+ set compile_type assembly
+ }
+ }
+ set f [open $src "w"]
+ puts $f $contents
+ close $f
+ set lines [${tool}_target_compile $src $output $compile_type "$options"]
+ file delete $src
+
+ set scan_output $output
+ # Don't try folding this into the switch above; calling "glob" before the
+ # file is created won't work.
+ if [regexp "rtl-(.*)" $type dummy rtl_type] {
+ set scan_output "[glob $src.\[0-9\]\[0-9\]\[0-9\]r.$rtl_type]"
+ file delete $output
+ }
+
+ return [list $lines $scan_output]
+}
+
+proc current_target_name { } {
+ global target_info
+ if [info exists target_info(target,name)] {
+ set answer $target_info(target,name)
+ } else {
+ set answer ""
+ }
+ return $answer
+}
+
+# Implement an effective-target check for property PROP by invoking
+# the Tcl command ARGS and seeing if it returns true.
+
+proc check_cached_effective_target { prop args } {
+ global et_cache
+
+ set target [current_target_name]
+ if {![info exists et_cache($prop,target)]
+ || $et_cache($prop,target) != $target} {
+ verbose "check_cached_effective_target $prop: checking $target" 2
+ set et_cache($prop,target) $target
+ set et_cache($prop,value) [uplevel eval $args]
+ }
+ set value $et_cache($prop,value)
+ verbose "check_cached_effective_target $prop: returning $value for $target" 2
+ return $value
+}
+
+# Like check_compile, but delete the output file and return true if the
+# compiler printed no messages.
+proc check_no_compiler_messages_nocache {args} {
+ set result [eval check_compile $args]
+ set lines [lindex $result 0]
+ set output [lindex $result 1]
+ remote_file build delete $output
+ return [string match "" $lines]
+}
+
+# Like check_no_compiler_messages_nocache, but cache the result.
+# PROP is the property we're checking, and doubles as a prefix for
+# temporary filenames.
+proc check_no_compiler_messages {prop args} {
+ return [check_cached_effective_target $prop {
+ eval [list check_no_compiler_messages_nocache $prop] $args
+ }]
+}
+
+# Like check_compile, but return true if the compiler printed no
+# messages and if the contents of the output file satisfy PATTERN.
+# If PATTERN has the form "!REGEXP", the contents satisfy it if they
+# don't match regular expression REGEXP, otherwise they satisfy it
+# if they do match regular expression PATTERN. (PATTERN can start
+# with something like "[!]" if the regular expression needs to match
+# "!" as the first character.)
+#
+# Delete the output file before returning. The other arguments are
+# as for check_compile.
+proc check_no_messages_and_pattern_nocache {basename pattern args} {
+ global tool
+
+ set result [eval [list check_compile $basename] $args]
+ set lines [lindex $result 0]
+ set output [lindex $result 1]
+
+ set ok 0
+ if { [string match "" $lines] } {
+ set chan [open "$output"]
+ set invert [regexp {^!(.*)} $pattern dummy pattern]
+ set ok [expr { [regexp $pattern [read $chan]] != $invert }]
+ close $chan
+ }
+
+ remote_file build delete $output
+ return $ok
+}
+
+# Like check_no_messages_and_pattern_nocache, but cache the result.
+# PROP is the property we're checking, and doubles as a prefix for
+# temporary filenames.
+proc check_no_messages_and_pattern {prop pattern args} {
+ return [check_cached_effective_target $prop {
+ eval [list check_no_messages_and_pattern_nocache $prop $pattern] $args
+ }]
+}
+
+# Try to compile and run an executable from code CONTENTS. Return true
+# if the compiler reports no messages and if execution "passes" in the
+# usual DejaGNU sense. The arguments are as for check_compile, with
+# TYPE implicitly being "executable".
+proc check_runtime_nocache {basename contents args} {
+ global tool
+
+ set result [eval [list check_compile $basename executable $contents] $args]
+ set lines [lindex $result 0]
+ set output [lindex $result 1]
+
+ set ok 0
+ if { [string match "" $lines] } {
+ # No error messages, everything is OK.
+ set result [remote_load target "./$output" "" ""]
+ set status [lindex $result 0]
+ verbose "check_runtime_nocache $basename: status is <$status>" 2
+ if { $status == "pass" } {
+ set ok 1
+ }
+ }
+ remote_file build delete $output
+ return $ok
+}
+
+# Like check_runtime_nocache, but cache the result. PROP is the
+# property we're checking, and doubles as a prefix for temporary
+# filenames.
+proc check_runtime {prop args} {
+ global tool
+
+ return [check_cached_effective_target $prop {
+ eval [list check_runtime_nocache $prop] $args
+ }]
+}
+
+###############################
+# proc check_weak_available { }
+###############################
+
+# weak symbols are only supported in some configs/object formats
+# this proc returns 1 if they're supported, 0 if they're not, or -1 if unsure
+
+proc check_weak_available { } {
+ global target_cpu
+
+ # All mips targets should support it
+
+ if { [ string first "mips" $target_cpu ] >= 0 } {
+ return 1
+ }
+
+ # All AIX targets should support it
+
+ if { [istarget *-*-aix*] } {
+ return 1
+ }
+
+ # All solaris2 targets should support it
+
+ if { [istarget *-*-solaris2*] } {
+ return 1
+ }
+
+ # Windows targets Cygwin and MingW32 support it
+
+ if { [istarget *-*-cygwin*] || [istarget *-*-mingw*] } {
+ return 1
+ }
+
+ # HP-UX 10.X doesn't support it
+
+ if { [istarget hppa*-*-hpux10*] } {
+ return 0
+ }
+
+ # ELF and ECOFF support it. a.out does with gas/gld but may also with
+ # other linkers, so we should try it
+
+ set objformat [gcc_target_object_format]
+
+ switch $objformat {
+ elf { return 1 }
+ ecoff { return 1 }
+ a.out { return 1 }
+ mach-o { return 1 }
+ som { return 1 }
+ unknown { return -1 }
+ default { return 0 }
+ }
+}
+
+###############################
+# proc check_weak_override_available { }
+###############################
+
+# Like check_weak_available, but return 0 if weak symbol definitions
+# cannot be overridden.
+
+proc check_weak_override_available { } {
+ if { [istarget *-*-mingw*] } {
+ return 0
+ }
+ return [check_weak_available]
+}
+
+###############################
+# proc check_visibility_available { what_kind }
+###############################
+
+# The visibility attribute is only support in some object formats
+# This proc returns 1 if it is supported, 0 if not.
+# The argument is the kind of visibility, default/protected/hidden/internal.
+
+proc check_visibility_available { what_kind } {
+ if [string match "" $what_kind] { set what_kind "hidden" }
+
+ return [check_no_compiler_messages visibility_available_$what_kind object "
+ void f() __attribute__((visibility(\"$what_kind\")));
+ void f() {}
+ "]
+}
+
+###############################
+# proc check_alias_available { }
+###############################
+
+# Determine if the target toolchain supports the alias attribute.
+
+# Returns 2 if the target supports aliases. Returns 1 if the target
+# only supports weak aliased. Returns 0 if the target does not
+# support aliases at all. Returns -1 if support for aliases could not
+# be determined.
+
+proc check_alias_available { } {
+ global alias_available_saved
+ global tool
+
+ if [info exists alias_available_saved] {
+ verbose "check_alias_available returning saved $alias_available_saved" 2
+ } else {
+ set src alias[pid].c
+ set obj alias[pid].o
+ verbose "check_alias_available compiling testfile $src" 2
+ set f [open $src "w"]
+ # Compile a small test program. The definition of "g" is
+ # necessary to keep the Solaris assembler from complaining
+ # about the program.
+ puts $f "#ifdef __cplusplus\nextern \"C\"\n#endif\n"
+ puts $f "void g() {} void f() __attribute__((alias(\"g\")));"
+ close $f
+ set lines [${tool}_target_compile $src $obj object ""]
+ file delete $src
+ remote_file build delete $obj
+
+ if [string match "" $lines] then {
+ # No error messages, everything is OK.
+ set alias_available_saved 2
+ } else {
+ if [regexp "alias definitions not supported" $lines] {
+ verbose "check_alias_available target does not support aliases" 2
+
+ set objformat [gcc_target_object_format]
+
+ if { $objformat == "elf" } {
+ verbose "check_alias_available but target uses ELF format, so it ought to" 2
+ set alias_available_saved -1
+ } else {
+ set alias_available_saved 0
+ }
+ } else {
+ if [regexp "only weak aliases are supported" $lines] {
+ verbose "check_alias_available target supports only weak aliases" 2
+ set alias_available_saved 1
+ } else {
+ set alias_available_saved -1
+ }
+ }
+ }
+
+ verbose "check_alias_available returning $alias_available_saved" 2
+ }
+
+ return $alias_available_saved
+}
+
+# Returns 1 if the target toolchain supports strong aliases, 0 otherwise.
+
+proc check_effective_target_alias { } {
+ if { [check_alias_available] < 2 } {
+ return 0
+ } else {
+ return 1
+ }
+}
+
+# Returns 1 if the target toolchain supports ifunc, 0 otherwise.
+
+proc check_ifunc_available { } {
+ return [check_no_compiler_messages ifunc_available object {
+ #ifdef __cplusplus
+ extern "C"
+ #endif
+ void g() {}
+ void f() __attribute__((ifunc("g")));
+ }]
+}
+
+# Returns true if --gc-sections is supported on the target.
+
+proc check_gc_sections_available { } {
+ global gc_sections_available_saved
+ global tool
+
+ if {![info exists gc_sections_available_saved]} {
+ # Some targets don't support gc-sections despite whatever's
+ # advertised by ld's options.
+ if { [istarget alpha*-*-*]
+ || [istarget ia64-*-*] } {
+ set gc_sections_available_saved 0
+ return 0
+ }
+
+ # elf2flt uses -q (--emit-relocs), which is incompatible with
+ # --gc-sections.
+ if { [board_info target exists ldflags]
+ && [regexp " -elf2flt\[ =\]" " [board_info target ldflags] "] } {
+ set gc_sections_available_saved 0
+ return 0
+ }
+
+ # VxWorks kernel modules are relocatable objects linked with -r,
+ # while RTP executables are linked with -q (--emit-relocs).
+ # Both of these options are incompatible with --gc-sections.
+ if { [istarget *-*-vxworks*] } {
+ set gc_sections_available_saved 0
+ return 0
+ }
+
+ # Check if the ld used by gcc supports --gc-sections.
+ set gcc_spec [${tool}_target_compile "-dumpspecs" "" "none" ""]
+ regsub ".*\n\\*linker:\[ \t\]*\n(\[^ \t\n\]*).*" "$gcc_spec" {\1} linker
+ set gcc_ld [lindex [${tool}_target_compile "-print-prog-name=$linker" "" "none" ""] 0]
+ set ld_output [remote_exec host "$gcc_ld" "--help"]
+ if { [ string first "--gc-sections" $ld_output ] >= 0 } {
+ set gc_sections_available_saved 1
+ } else {
+ set gc_sections_available_saved 0
+ }
+ }
+ return $gc_sections_available_saved
+}
+
+# Return 1 if according to target_info struct and explicit target list
+# target is supposed to support trampolines.
+
+proc check_effective_target_trampolines { } {
+ if [target_info exists no_trampolines] {
+ return 0
+ }
+ if { [istarget avr-*-*]
+ || [istarget msp430-*-*]
+ || [istarget hppa2.0w-hp-hpux11.23]
+ || [istarget hppa64-hp-hpux11.23] } {
+ return 0;
+ }
+ return 1
+}
+
+# Return 1 if according to target_info struct and explicit target list
+# target is supposed to keep null pointer checks. This could be due to
+# use of option fno-delete-null-pointer-checks or hardwired in target.
+
+proc check_effective_target_keeps_null_pointer_checks { } {
+ if [target_info exists keeps_null_pointer_checks] {
+ return 1
+ }
+ if { [istarget avr-*-*] } {
+ return 1;
+ }
+ return 0
+}
+
+# Return true if profiling is supported on the target.
+
+proc check_profiling_available { test_what } {
+ global profiling_available_saved
+
+ verbose "Profiling argument is <$test_what>" 1
+
+ # These conditions depend on the argument so examine them before
+ # looking at the cache variable.
+
+ # Tree profiling requires TLS runtime support.
+ if { $test_what == "-fprofile-generate" } {
+ if { ![check_effective_target_tls_runtime] } {
+ return 0
+ }
+ }
+
+ # Support for -p on solaris2 relies on mcrt1.o which comes with the
+ # vendor compiler. We cannot reliably predict the directory where the
+ # vendor compiler (and thus mcrt1.o) is installed so we can't
+ # necessarily find mcrt1.o even if we have it.
+ if { [istarget *-*-solaris2*] && $test_what == "-p" } {
+ return 0
+ }
+
+ # We don't yet support profiling for MIPS16.
+ if { [istarget mips*-*-*]
+ && ![check_effective_target_nomips16]
+ && ($test_what == "-p" || $test_what == "-pg") } {
+ return 0
+ }
+
+ # MinGW does not support -p.
+ if { [istarget *-*-mingw*] && $test_what == "-p" } {
+ return 0
+ }
+
+ # cygwin does not support -p.
+ if { [istarget *-*-cygwin*] && $test_what == "-p" } {
+ return 0
+ }
+
+ # uClibc does not have gcrt1.o.
+ if { [check_effective_target_uclibc]
+ && ($test_what == "-p" || $test_what == "-pg") } {
+ return 0
+ }
+
+ # Now examine the cache variable.
+ if {![info exists profiling_available_saved]} {
+ # Some targets don't have any implementation of __bb_init_func or are
+ # missing other needed machinery.
+ if { [istarget aarch64*-*-elf]
+ || [istarget am3*-*-linux*]
+ || [istarget arm*-*-eabi*]
+ || [istarget arm*-*-elf]
+ || [istarget arm*-*-symbianelf*]
+ || [istarget avr-*-*]
+ || [istarget bfin-*-*]
+ || [istarget cris-*-*]
+ || [istarget crisv32-*-*]
+ || [istarget fido-*-elf]
+ || [istarget h8300-*-*]
+ || [istarget lm32-*-*]
+ || [istarget m32c-*-elf]
+ || [istarget m68k-*-elf]
+ || [istarget m68k-*-uclinux*]
+ || [istarget mep-*-elf]
+ || [istarget mips*-*-elf*]
+ || [istarget mmix-*-*]
+ || [istarget mn10300-*-elf*]
+ || [istarget moxie-*-elf*]
+ || [istarget msp430-*-*]
+ || [istarget nds32*-*-elf]
+ || [istarget nios2-*-elf]
+ || [istarget picochip-*-*]
+ || [istarget powerpc-*-eabi*]
+ || [istarget powerpc-*-elf]
+ || [istarget rx-*-*]
+ || [istarget tic6x-*-elf]
+ || [istarget xstormy16-*]
+ || [istarget xtensa*-*-elf]
+ || [istarget *-*-rtems*]
+ || [istarget *-*-vxworks*] } {
+ set profiling_available_saved 0
+ } else {
+ set profiling_available_saved 1
+ }
+ }
+
+ return $profiling_available_saved
+}
+
+# Check to see if a target is "freestanding". This is as per the definition
+# in Section 4 of C99 standard. Effectively, it is a target which supports no
+# extra headers or libraries other than what is considered essential.
+proc check_effective_target_freestanding { } {
+ if { [istarget picochip-*-*] } then {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if target has packed layout of structure members by
+# default, 0 otherwise. Note that this is slightly different than
+# whether the target has "natural alignment": both attributes may be
+# false.
+
+proc check_effective_target_default_packed { } {
+ return [check_no_compiler_messages default_packed assembly {
+ struct x { char a; long b; } c;
+ int s[sizeof (c) == sizeof (char) + sizeof (long) ? 1 : -1];
+ }]
+}
+
+# Return 1 if target has PCC_BITFIELD_TYPE_MATTERS defined. See
+# documentation, where the test also comes from.
+
+proc check_effective_target_pcc_bitfield_type_matters { } {
+ # PCC_BITFIELD_TYPE_MATTERS isn't just about unnamed or empty
+ # bitfields, but let's stick to the example code from the docs.
+ return [check_no_compiler_messages pcc_bitfield_type_matters assembly {
+ struct foo1 { char x; char :0; char y; };
+ struct foo2 { char x; int :0; char y; };
+ int s[sizeof (struct foo1) != sizeof (struct foo2) ? 1 : -1];
+ }]
+}
+
+# Add to FLAGS all the target-specific flags needed to use thread-local storage.
+
+proc add_options_for_tls { flags } {
+ # On Solaris 9, __tls_get_addr/___tls_get_addr only lives in
+ # libthread, so always pass -pthread for native TLS. Same for AIX.
+ # Need to duplicate native TLS check from
+ # check_effective_target_tls_native to avoid recursion.
+ if { ([istarget *-*-solaris2.9*] || [istarget powerpc-ibm-aix*]) &&
+ [check_no_messages_and_pattern tls_native "!emutls" assembly {
+ __thread int i;
+ int f (void) { return i; }
+ void g (int j) { i = j; }
+ }] } {
+ return "$flags -pthread"
+ }
+ return $flags
+}
+
+# Return 1 if thread local storage (TLS) is supported, 0 otherwise.
+
+proc check_effective_target_tls {} {
+ return [check_no_compiler_messages tls assembly {
+ __thread int i;
+ int f (void) { return i; }
+ void g (int j) { i = j; }
+ }]
+}
+
+# Return 1 if *native* thread local storage (TLS) is supported, 0 otherwise.
+
+proc check_effective_target_tls_native {} {
+ # VxWorks uses emulated TLS machinery, but with non-standard helper
+ # functions, so we fail to automatically detect it.
+ if { [istarget *-*-vxworks*] } {
+ return 0
+ }
+
+ return [check_no_messages_and_pattern tls_native "!emutls" assembly {
+ __thread int i;
+ int f (void) { return i; }
+ void g (int j) { i = j; }
+ }]
+}
+
+# Return 1 if *emulated* thread local storage (TLS) is supported, 0 otherwise.
+
+proc check_effective_target_tls_emulated {} {
+ # VxWorks uses emulated TLS machinery, but with non-standard helper
+ # functions, so we fail to automatically detect it.
+ if { [istarget *-*-vxworks*] } {
+ return 1
+ }
+
+ return [check_no_messages_and_pattern tls_emulated "emutls" assembly {
+ __thread int i;
+ int f (void) { return i; }
+ void g (int j) { i = j; }
+ }]
+}
+
+# Return 1 if TLS executables can run correctly, 0 otherwise.
+
+proc check_effective_target_tls_runtime {} {
+ # MSP430 runtime does not have TLS support, but just
+ # running the test below is insufficient to show this.
+ if { [istarget msp430-*-*] } {
+ return 0
+ }
+ return [check_runtime tls_runtime {
+ __thread int thr = 0;
+ int main (void) { return thr; }
+ } [add_options_for_tls ""]]
+}
+
+# Return 1 if atomic compare-and-swap is supported on 'int'
+
+proc check_effective_target_cas_char {} {
+ return [check_no_compiler_messages cas_char assembly {
+ #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1
+ #error unsupported
+ #endif
+ } ""]
+}
+
+proc check_effective_target_cas_int {} {
+ return [check_no_compiler_messages cas_int assembly {
+ #if __INT_MAX__ == 0x7fff && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2
+ /* ok */
+ #elif __INT_MAX__ == 0x7fffffff && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
+ /* ok */
+ #else
+ #error unsupported
+ #endif
+ } ""]
+}
+
+# Return 1 if -ffunction-sections is supported, 0 otherwise.
+
+proc check_effective_target_function_sections {} {
+ # Darwin has its own scheme and silently accepts -ffunction-sections.
+ if { [istarget *-*-darwin*] } {
+ return 0
+ }
+
+ return [check_no_compiler_messages functionsections assembly {
+ void foo (void) { }
+ } "-ffunction-sections"]
+}
+
+# Return 1 if instruction scheduling is available, 0 otherwise.
+
+proc check_effective_target_scheduling {} {
+ return [check_no_compiler_messages scheduling object {
+ void foo (void) { }
+ } "-fschedule-insns"]
+}
+
+# Return 1 if trapping arithmetic is available, 0 otherwise.
+
+proc check_effective_target_trapping {} {
+ return [check_no_compiler_messages scheduling object {
+ add (int a, int b) { return a + b; }
+ } "-ftrapv"]
+}
+
+# Return 1 if compilation with -fgraphite is error-free for trivial
+# code, 0 otherwise.
+
+proc check_effective_target_fgraphite {} {
+ return [check_no_compiler_messages fgraphite object {
+ void foo (void) { }
+ } "-O1 -fgraphite"]
+}
+
+# Return 1 if compilation with -fopenmp is error-free for trivial
+# code, 0 otherwise.
+
+proc check_effective_target_fopenmp {} {
+ return [check_no_compiler_messages fopenmp object {
+ void foo (void) { }
+ } "-fopenmp"]
+}
+
+# Return 1 if compilation with -fgnu-tm is error-free for trivial
+# code, 0 otherwise.
+
+proc check_effective_target_fgnu_tm {} {
+ return [check_no_compiler_messages fgnu_tm object {
+ void foo (void) { }
+ } "-fgnu-tm"]
+}
+
+# Return 1 if the target supports mmap, 0 otherwise.
+
+proc check_effective_target_mmap {} {
+ return [check_function_available "mmap"]
+}
+
+# Return 1 if the target supports dlopen, 0 otherwise.
+proc check_effective_target_dlopen {} {
+ return [check_no_compiler_messages dlopen executable {
+ #include <dlfcn.h>
+ int main(void) { dlopen ("dummy.so", RTLD_NOW); }
+ } [add_options_for_dlopen ""]]
+}
+
+proc add_options_for_dlopen { flags } {
+ return "$flags -ldl"
+}
+
+# Return 1 if the target supports clone, 0 otherwise.
+proc check_effective_target_clone {} {
+ return [check_function_available "clone"]
+}
+
+# Return 1 if the target supports setrlimit, 0 otherwise.
+proc check_effective_target_setrlimit {} {
+ # Darwin has non-posix compliant RLIMIT_AS
+ if { [istarget *-*-darwin*] } {
+ return 0
+ }
+ return [check_function_available "setrlimit"]
+}
+
+# Return 1 if the target supports swapcontext, 0 otherwise.
+proc check_effective_target_swapcontext {} {
+ return [check_no_compiler_messages swapcontext executable {
+ #include <ucontext.h>
+ int main (void)
+ {
+ ucontext_t orig_context,child_context;
+ if (swapcontext(&child_context, &orig_context) < 0) { }
+ }
+ }]
+}
+
+# Return 1 if compilation with -pthread is error-free for trivial
+# code, 0 otherwise.
+
+proc check_effective_target_pthread {} {
+ return [check_no_compiler_messages pthread object {
+ void foo (void) { }
+ } "-pthread"]
+}
+
+# Return 1 if compilation with -mpe-aligned-commons is error-free
+# for trivial code, 0 otherwise.
+
+proc check_effective_target_pe_aligned_commons {} {
+ if { [istarget *-*-cygwin*] || [istarget *-*-mingw*] } {
+ return [check_no_compiler_messages pe_aligned_commons object {
+ int foo;
+ } "-mpe-aligned-commons"]
+ }
+ return 0
+}
+
+# Return 1 if the target supports -static
+proc check_effective_target_static {} {
+ return [check_no_compiler_messages static executable {
+ int main (void) { return 0; }
+ } "-static"]
+}
+
+# Return 1 if the target supports -fstack-protector
+proc check_effective_target_fstack_protector {} {
+ return [check_runtime fstack_protector {
+ int main (void) { return 0; }
+ } "-fstack-protector"]
+}
+
+# Return 1 if compilation with -freorder-blocks-and-partition is error-free
+# for trivial code, 0 otherwise.
+
+proc check_effective_target_freorder {} {
+ return [check_no_compiler_messages freorder object {
+ void foo (void) { }
+ } "-freorder-blocks-and-partition"]
+}
+
+# Return 1 if -fpic and -fPIC are supported, as in no warnings or errors
+# emitted, 0 otherwise. Whether a shared library can actually be built is
+# out of scope for this test.
+
+proc check_effective_target_fpic { } {
+ # Note that M68K has a multilib that supports -fpic but not
+ # -fPIC, so we need to check both. We test with a program that
+ # requires GOT references.
+ foreach arg {fpic fPIC} {
+ if [check_no_compiler_messages $arg object {
+ extern int foo (void); extern int bar;
+ int baz (void) { return foo () + bar; }
+ } "-$arg"] {
+ return 1
+ }
+ }
+ return 0
+}
+
+# Return 1 if -pie, -fpie and -fPIE are supported, 0 otherwise.
+
+proc check_effective_target_pie { } {
+ if { [istarget *-*-darwin\[912\]*]
+ || [istarget *-*-linux*]
+ || [istarget *-*-gnu*] } {
+ return 1;
+ }
+ return 0
+}
+
+# Return true if the target supports -mpaired-single (as used on MIPS).
+
+proc check_effective_target_mpaired_single { } {
+ return [check_no_compiler_messages mpaired_single object {
+ void foo (void) { }
+ } "-mpaired-single"]
+}
+
+# Return true if the target has access to FPU instructions.
+
+proc check_effective_target_hard_float { } {
+ if { [istarget mips*-*-*] } {
+ return [check_no_compiler_messages hard_float assembly {
+ #if (defined __mips_soft_float || defined __mips16)
+ #error FOO
+ #endif
+ }]
+ }
+
+ # This proc is actually checking the availabilty of FPU
+ # support for doubles, so on the RX we must fail if the
+ # 64-bit double multilib has been selected.
+ if { [istarget rx-*-*] } {
+ return 0
+ # return [check_no_compiler_messages hard_float assembly {
+ #if defined __RX_64_BIT_DOUBLES__
+ #error FOO
+ #endif
+ # }]
+ }
+
+ # The generic test equates hard_float with "no call for adding doubles".
+ return [check_no_messages_and_pattern hard_float "!\\(call" rtl-expand {
+ double a (double b, double c) { return b + c; }
+ }]
+}
+
+# Return true if the target is a 64-bit MIPS target.
+
+proc check_effective_target_mips64 { } {
+ return [check_no_compiler_messages mips64 assembly {
+ #ifndef __mips64
+ #error FOO
+ #endif
+ }]
+}
+
+# Return true if the target is a MIPS target that does not produce
+# MIPS16 code.
+
+proc check_effective_target_nomips16 { } {
+ return [check_no_compiler_messages nomips16 object {
+ #ifndef __mips
+ #error FOO
+ #else
+ /* A cheap way of testing for -mflip-mips16. */
+ void foo (void) { asm ("addiu $20,$20,1"); }
+ void bar (void) { asm ("addiu $20,$20,1"); }
+ #endif
+ }]
+}
+
+# Add the options needed for MIPS16 function attributes. At the moment,
+# we don't support MIPS16 PIC.
+
+proc add_options_for_mips16_attribute { flags } {
+ return "$flags -mno-abicalls -fno-pic -DMIPS16=__attribute__((mips16))"
+}
+
+# Return true if we can force a mode that allows MIPS16 code generation.
+# We don't support MIPS16 PIC, and only support MIPS16 -mhard-float
+# for o32 and o64.
+
+proc check_effective_target_mips16_attribute { } {
+ return [check_no_compiler_messages mips16_attribute assembly {
+ #ifdef PIC
+ #error FOO
+ #endif
+ #if defined __mips_hard_float \
+ && (!defined _ABIO32 || _MIPS_SIM != _ABIO32) \
+ && (!defined _ABIO64 || _MIPS_SIM != _ABIO64)
+ #error FOO
+ #endif
+ } [add_options_for_mips16_attribute ""]]
+}
+
+# Return 1 if the target supports long double larger than double when
+# using the new ABI, 0 otherwise.
+
+proc check_effective_target_mips_newabi_large_long_double { } {
+ return [check_no_compiler_messages mips_newabi_large_long_double object {
+ int dummy[sizeof(long double) > sizeof(double) ? 1 : -1];
+ } "-mabi=64"]
+}
+
+# Return true if the target is a MIPS target that has access
+# to the LL and SC instructions.
+
+proc check_effective_target_mips_llsc { } {
+ if { ![istarget mips*-*-*] } {
+ return 0
+ }
+ # Assume that these instructions are always implemented for
+ # non-elf* targets, via emulation if necessary.
+ if { ![istarget *-*-elf*] } {
+ return 1
+ }
+ # Otherwise assume LL/SC support for everything but MIPS I.
+ return [check_no_compiler_messages mips_llsc assembly {
+ #if __mips == 1
+ #error FOO
+ #endif
+ }]
+}
+
+# Return true if the target is a MIPS target that uses in-place relocations.
+
+proc check_effective_target_mips_rel { } {
+ if { ![istarget mips*-*-*] } {
+ return 0
+ }
+ return [check_no_compiler_messages mips_rel object {
+ #if (defined _ABIN32 && _MIPS_SIM == _ABIN32) \
+ || (defined _ABI64 && _MIPS_SIM == _ABI64)
+ #error FOO
+ #endif
+ }]
+}
+
+# Return true if the target is a MIPS target that uses the EABI.
+
+proc check_effective_target_mips_eabi { } {
+ if { ![istarget mips*-*-*] } {
+ return 0
+ }
+ return [check_no_compiler_messages mips_eabi object {
+ #ifndef __mips_eabi
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 if the current multilib does not generate PIC by default.
+
+proc check_effective_target_nonpic { } {
+ return [check_no_compiler_messages nonpic assembly {
+ #if __PIC__
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 if the target does not use a status wrapper.
+
+proc check_effective_target_unwrapped { } {
+ if { [target_info needs_status_wrapper] != "" \
+ && [target_info needs_status_wrapper] != "0" } {
+ return 0
+ }
+ return 1
+}
+
+# Return true if iconv is supported on the target. In particular IBM1047.
+
+proc check_iconv_available { test_what } {
+ global libiconv
+
+ # If the tool configuration file has not set libiconv, try "-liconv"
+ if { ![info exists libiconv] } {
+ set libiconv "-liconv"
+ }
+ set test_what [lindex $test_what 1]
+ return [check_runtime_nocache $test_what [subst {
+ #include <iconv.h>
+ int main (void)
+ {
+ iconv_t cd;
+
+ cd = iconv_open ("$test_what", "UTF-8");
+ if (cd == (iconv_t) -1)
+ return 1;
+ return 0;
+ }
+ }] $libiconv]
+}
+
+# Return true if Cilk Library is supported on the target.
+proc check_libcilkrts_available { } {
+ return [ check_no_compiler_messages_nocache libcilkrts_available executable {
+ #ifdef __cplusplus
+ extern "C"
+ #endif
+ int __cilkrts_set_param (const char *, const char *);
+ int main (void) {
+ int x = __cilkrts_set_param ("nworkers", "0");
+ return x;
+ }
+ } "-fcilkplus -lcilkrts" ]
+}
+
+# Return 1 if an ASCII locale is supported on this host, 0 otherwise.
+
+proc check_ascii_locale_available { } {
+ return 1
+}
+
+# Return true if named sections are supported on this target.
+
+proc check_named_sections_available { } {
+ return [check_no_compiler_messages named_sections assembly {
+ int __attribute__ ((section("whatever"))) foo;
+ }]
+}
+
+# Return true if the "naked" function attribute is supported on this target.
+
+proc check_effective_target_naked_functions { } {
+ return [check_no_compiler_messages naked_functions assembly {
+ void f() __attribute__((naked));
+ }]
+}
+
+# Return 1 if the target supports Fortran real kinds larger than real(8),
+# 0 otherwise.
+#
+# When the target name changes, replace the cached result.
+
+proc check_effective_target_fortran_large_real { } {
+ return [check_no_compiler_messages fortran_large_real executable {
+ ! Fortran
+ integer,parameter :: k = selected_real_kind (precision (0.0_8) + 1)
+ real(kind=k) :: x
+ x = cos (x)
+ end
+ }]
+}
+
+# Return 1 if the target supports Fortran real kind real(16),
+# 0 otherwise. Contrary to check_effective_target_fortran_large_real
+# this checks for Real(16) only; the other returned real(10) if
+# both real(10) and real(16) are available.
+#
+# When the target name changes, replace the cached result.
+
+proc check_effective_target_fortran_real_16 { } {
+ return [check_no_compiler_messages fortran_real_16 executable {
+ ! Fortran
+ real(kind=16) :: x
+ x = cos (x)
+ end
+ }]
+}
+
+
+# Return 1 if the target supports SQRT for the largest floating-point
+# type. (Some targets lack the libm support for this FP type.)
+# On most targets, this check effectively checks either whether sqrtl is
+# available or on __float128 systems whether libquadmath is installed,
+# which provides sqrtq.
+#
+# When the target name changes, replace the cached result.
+
+proc check_effective_target_fortran_largest_fp_has_sqrt { } {
+ return [check_no_compiler_messages fortran_largest_fp_has_sqrt executable {
+ ! Fortran
+ use iso_fortran_env, only: real_kinds
+ integer,parameter:: maxFP = real_kinds(ubound(real_kinds,dim=1))
+ real(kind=maxFP), volatile :: x
+ x = 2.0_maxFP
+ x = sqrt (x)
+ end
+ }]
+}
+
+
+# Return 1 if the target supports Fortran integer kinds larger than
+# integer(8), 0 otherwise.
+#
+# When the target name changes, replace the cached result.
+
+proc check_effective_target_fortran_large_int { } {
+ return [check_no_compiler_messages fortran_large_int executable {
+ ! Fortran
+ integer,parameter :: k = selected_int_kind (range (0_8) + 1)
+ integer(kind=k) :: i
+ end
+ }]
+}
+
+# Return 1 if the target supports Fortran integer(16), 0 otherwise.
+#
+# When the target name changes, replace the cached result.
+
+proc check_effective_target_fortran_integer_16 { } {
+ return [check_no_compiler_messages fortran_integer_16 executable {
+ ! Fortran
+ integer(16) :: i
+ end
+ }]
+}
+
+# Return 1 if we can statically link libgfortran, 0 otherwise.
+#
+# When the target name changes, replace the cached result.
+
+proc check_effective_target_static_libgfortran { } {
+ return [check_no_compiler_messages static_libgfortran executable {
+ ! Fortran
+ print *, 'test'
+ end
+ } "-static"]
+}
+
+# Return 1 if cilk-plus is supported by the target, 0 otherwise.
+
+proc check_effective_target_cilkplus { } {
+ # Skip cilk-plus tests on int16 and size16 targets for now.
+ # The cilk-plus tests are not generic enough to cover these
+ # cases and would throw hundreds of FAILs.
+ if { [check_effective_target_int16]
+ || ![check_effective_target_size32plus] } {
+ return 0;
+ }
+
+ # Skip AVR, its RAM is too small and too many tests would fail.
+ if { [istarget avr-*-*] } {
+ return 0;
+ }
+ return 1
+}
+
+proc check_linker_plugin_available { } {
+ return [check_no_compiler_messages_nocache linker_plugin executable {
+ int main() { return 0; }
+ } "-flto -fuse-linker-plugin"]
+}
+
+# Return 1 if the target supports executing 750CL paired-single instructions, 0
+# otherwise. Cache the result.
+
+proc check_750cl_hw_available { } {
+ return [check_cached_effective_target 750cl_hw_available {
+ # If this is not the right target then we can skip the test.
+ if { ![istarget powerpc-*paired*] } {
+ expr 0
+ } else {
+ check_runtime_nocache 750cl_hw_available {
+ int main()
+ {
+ #ifdef __MACH__
+ asm volatile ("ps_mul v0,v0,v0");
+ #else
+ asm volatile ("ps_mul 0,0,0");
+ #endif
+ return 0;
+ }
+ } "-mpaired"
+ }
+ }]
+}
+
+# Return 1 if the target OS supports running SSE executables, 0
+# otherwise. Cache the result.
+
+proc check_sse_os_support_available { } {
+ return [check_cached_effective_target sse_os_support_available {
+ # If this is not the right target then we can skip the test.
+ if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } {
+ expr 0
+ } elseif { [istarget i?86-*-solaris2*] } {
+ # The Solaris 2 kernel doesn't save and restore SSE registers
+ # before Solaris 9 4/04. Before that, executables die with SIGILL.
+ check_runtime_nocache sse_os_support_available {
+ int main ()
+ {
+ asm volatile ("movaps %xmm0,%xmm0");
+ return 0;
+ }
+ } "-msse"
+ } else {
+ expr 1
+ }
+ }]
+}
+
+# Return 1 if the target OS supports running AVX executables, 0
+# otherwise. Cache the result.
+
+proc check_avx_os_support_available { } {
+ return [check_cached_effective_target avx_os_support_available {
+ # If this is not the right target then we can skip the test.
+ if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } {
+ expr 0
+ } else {
+ # Check that OS has AVX and SSE saving enabled.
+ check_runtime_nocache avx_os_support_available {
+ int main ()
+ {
+ unsigned int eax, edx;
+
+ asm ("xgetbv" : "=a" (eax), "=d" (edx) : "c" (0));
+ return (eax & 6) != 6;
+ }
+ } ""
+ }
+ }]
+}
+
+# Return 1 if the target supports executing SSE instructions, 0
+# otherwise. Cache the result.
+
+proc check_sse_hw_available { } {
+ return [check_cached_effective_target sse_hw_available {
+ # If this is not the right target then we can skip the test.
+ if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } {
+ expr 0
+ } else {
+ check_runtime_nocache sse_hw_available {
+ #include "cpuid.h"
+ int main ()
+ {
+ unsigned int eax, ebx, ecx, edx;
+ if (__get_cpuid (1, &eax, &ebx, &ecx, &edx))
+ return !(edx & bit_SSE);
+ return 1;
+ }
+ } ""
+ }
+ }]
+}
+
+# Return 1 if the target supports executing SSE2 instructions, 0
+# otherwise. Cache the result.
+
+proc check_sse2_hw_available { } {
+ return [check_cached_effective_target sse2_hw_available {
+ # If this is not the right target then we can skip the test.
+ if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } {
+ expr 0
+ } else {
+ check_runtime_nocache sse2_hw_available {
+ #include "cpuid.h"
+ int main ()
+ {
+ unsigned int eax, ebx, ecx, edx;
+ if (__get_cpuid (1, &eax, &ebx, &ecx, &edx))
+ return !(edx & bit_SSE2);
+ return 1;
+ }
+ } ""
+ }
+ }]
+}
+
+# Return 1 if the target supports executing AVX instructions, 0
+# otherwise. Cache the result.
+
+proc check_avx_hw_available { } {
+ return [check_cached_effective_target avx_hw_available {
+ # If this is not the right target then we can skip the test.
+ if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } {
+ expr 0
+ } else {
+ check_runtime_nocache avx_hw_available {
+ #include "cpuid.h"
+ int main ()
+ {
+ unsigned int eax, ebx, ecx, edx;
+ if (__get_cpuid (1, &eax, &ebx, &ecx, &edx))
+ return ((ecx & (bit_AVX | bit_OSXSAVE))
+ != (bit_AVX | bit_OSXSAVE));
+ return 1;
+ }
+ } ""
+ }
+ }]
+}
+
+# Return 1 if the target supports running SSE executables, 0 otherwise.
+
+proc check_effective_target_sse_runtime { } {
+ if { [check_effective_target_sse]
+ && [check_sse_hw_available]
+ && [check_sse_os_support_available] } {
+ return 1
+ }
+ return 0
+}
+
+# Return 1 if the target supports running SSE2 executables, 0 otherwise.
+
+proc check_effective_target_sse2_runtime { } {
+ if { [check_effective_target_sse2]
+ && [check_sse2_hw_available]
+ && [check_sse_os_support_available] } {
+ return 1
+ }
+ return 0
+}
+
+# Return 1 if the target supports running AVX executables, 0 otherwise.
+
+proc check_effective_target_avx_runtime { } {
+ if { [check_effective_target_avx]
+ && [check_avx_hw_available]
+ && [check_avx_os_support_available] } {
+ return 1
+ }
+ return 0
+}
+
+# Return 1 if the target supports executing power8 vector instructions, 0
+# otherwise. Cache the result.
+
+proc check_p8vector_hw_available { } {
+ return [check_cached_effective_target p8vector_hw_available {
+ # Some simulators are known to not support VSX/power8 instructions.
+ # For now, disable on Darwin
+ if { [istarget powerpc-*-eabi] || [istarget powerpc*-*-eabispe] || [istarget *-*-darwin*]} {
+ expr 0
+ } else {
+ set options "-mpower8-vector"
+ check_runtime_nocache p8vector_hw_available {
+ int main()
+ {
+ #ifdef __MACH__
+ asm volatile ("xxlorc vs0,vs0,vs0");
+ #else
+ asm volatile ("xxlorc 0,0,0");
+ #endif
+ return 0;
+ }
+ } $options
+ }
+ }]
+}
+
+# Return 1 if the target supports executing VSX instructions, 0
+# otherwise. Cache the result.
+
+proc check_vsx_hw_available { } {
+ return [check_cached_effective_target vsx_hw_available {
+ # Some simulators are known to not support VSX instructions.
+ # For now, disable on Darwin
+ if { [istarget powerpc-*-eabi] || [istarget powerpc*-*-eabispe] || [istarget *-*-darwin*]} {
+ expr 0
+ } else {
+ set options "-mvsx"
+ check_runtime_nocache vsx_hw_available {
+ int main()
+ {
+ #ifdef __MACH__
+ asm volatile ("xxlor vs0,vs0,vs0");
+ #else
+ asm volatile ("xxlor 0,0,0");
+ #endif
+ return 0;
+ }
+ } $options
+ }
+ }]
+}
+
+# Return 1 if the target supports executing AltiVec instructions, 0
+# otherwise. Cache the result.
+
+proc check_vmx_hw_available { } {
+ return [check_cached_effective_target vmx_hw_available {
+ # Some simulators are known to not support VMX instructions.
+ if { [istarget powerpc-*-eabi] || [istarget powerpc*-*-eabispe] } {
+ expr 0
+ } else {
+ # Most targets don't require special flags for this test case, but
+ # Darwin does. Just to be sure, make sure VSX is not enabled for
+ # the altivec tests.
+ if { [istarget *-*-darwin*]
+ || [istarget *-*-aix*] } {
+ set options "-maltivec -mno-vsx"
+ } else {
+ set options "-mno-vsx"
+ }
+ check_runtime_nocache vmx_hw_available {
+ int main()
+ {
+ #ifdef __MACH__
+ asm volatile ("vor v0,v0,v0");
+ #else
+ asm volatile ("vor 0,0,0");
+ #endif
+ return 0;
+ }
+ } $options
+ }
+ }]
+}
+
+proc check_ppc_recip_hw_available { } {
+ return [check_cached_effective_target ppc_recip_hw_available {
+ # Some simulators may not support FRE/FRES/FRSQRTE/FRSQRTES
+ # For now, disable on Darwin
+ if { [istarget powerpc-*-eabi] || [istarget powerpc*-*-eabispe] || [istarget *-*-darwin*]} {
+ expr 0
+ } else {
+ set options "-mpowerpc-gfxopt -mpowerpc-gpopt -mpopcntb"
+ check_runtime_nocache ppc_recip_hw_available {
+ volatile double d_recip, d_rsqrt, d_four = 4.0;
+ volatile float f_recip, f_rsqrt, f_four = 4.0f;
+ int main()
+ {
+ asm volatile ("fres %0,%1" : "=f" (f_recip) : "f" (f_four));
+ asm volatile ("fre %0,%1" : "=d" (d_recip) : "d" (d_four));
+ asm volatile ("frsqrtes %0,%1" : "=f" (f_rsqrt) : "f" (f_four));
+ asm volatile ("frsqrte %0,%1" : "=f" (d_rsqrt) : "d" (d_four));
+ return 0;
+ }
+ } $options
+ }
+ }]
+}
+
+# Return 1 if the target supports executing AltiVec and Cell PPU
+# instructions, 0 otherwise. Cache the result.
+
+proc check_effective_target_cell_hw { } {
+ return [check_cached_effective_target cell_hw_available {
+ # Some simulators are known to not support VMX and PPU instructions.
+ if { [istarget powerpc-*-eabi*] } {
+ expr 0
+ } else {
+ # Most targets don't require special flags for this test
+ # case, but Darwin and AIX do.
+ if { [istarget *-*-darwin*]
+ || [istarget *-*-aix*] } {
+ set options "-maltivec -mcpu=cell"
+ } else {
+ set options "-mcpu=cell"
+ }
+ check_runtime_nocache cell_hw_available {
+ int main()
+ {
+ #ifdef __MACH__
+ asm volatile ("vor v0,v0,v0");
+ asm volatile ("lvlx v0,r0,r0");
+ #else
+ asm volatile ("vor 0,0,0");
+ asm volatile ("lvlx 0,0,0");
+ #endif
+ return 0;
+ }
+ } $options
+ }
+ }]
+}
+
+# Return 1 if the target supports executing 64-bit instructions, 0
+# otherwise. Cache the result.
+
+proc check_effective_target_powerpc64 { } {
+ global powerpc64_available_saved
+ global tool
+
+ if [info exists powerpc64_available_saved] {
+ verbose "check_effective_target_powerpc64 returning saved $powerpc64_available_saved" 2
+ } else {
+ set powerpc64_available_saved 0
+
+ # Some simulators are known to not support powerpc64 instructions.
+ if { [istarget powerpc-*-eabi*] || [istarget powerpc-ibm-aix*] } {
+ verbose "check_effective_target_powerpc64 returning 0" 2
+ return $powerpc64_available_saved
+ }
+
+ # Set up, compile, and execute a test program containing a 64-bit
+ # instruction. Include the current process ID in the file
+ # names to prevent conflicts with invocations for multiple
+ # testsuites.
+ set src ppc[pid].c
+ set exe ppc[pid].x
+
+ set f [open $src "w"]
+ puts $f "int main() {"
+ puts $f "#ifdef __MACH__"
+ puts $f " asm volatile (\"extsw r0,r0\");"
+ puts $f "#else"
+ puts $f " asm volatile (\"extsw 0,0\");"
+ puts $f "#endif"
+ puts $f " return 0; }"
+ close $f
+
+ set opts "additional_flags=-mcpu=G5"
+
+ verbose "check_effective_target_powerpc64 compiling testfile $src" 2
+ set lines [${tool}_target_compile $src $exe executable "$opts"]
+ file delete $src
+
+ if [string match "" $lines] then {
+ # No error message, compilation succeeded.
+ set result [${tool}_load "./$exe" "" ""]
+ set status [lindex $result 0]
+ remote_file build delete $exe
+ verbose "check_effective_target_powerpc64 testfile status is <$status>" 2
+
+ if { $status == "pass" } then {
+ set powerpc64_available_saved 1
+ }
+ } else {
+ verbose "check_effective_target_powerpc64 testfile compilation failed" 2
+ }
+ }
+
+ return $powerpc64_available_saved
+}
+
+# GCC 3.4.0 for powerpc64-*-linux* included an ABI fix for passing
+# complex float arguments. This affects gfortran tests that call cabsf
+# in libm built by an earlier compiler. Return 1 if libm uses the same
+# argument passing as the compiler under test, 0 otherwise.
+#
+# When the target name changes, replace the cached result.
+
+proc check_effective_target_broken_cplxf_arg { } {
+ return [check_cached_effective_target broken_cplxf_arg {
+ # Skip the work for targets known not to be affected.
+ if { ![istarget powerpc64-*-linux*] } {
+ expr 0
+ } elseif { ![is-effective-target lp64] } {
+ expr 0
+ } else {
+ check_runtime_nocache broken_cplxf_arg {
+ #include <complex.h>
+ extern void abort (void);
+ float fabsf (float);
+ float cabsf (_Complex float);
+ int main ()
+ {
+ _Complex float cf;
+ float f;
+ cf = 3 + 4.0fi;
+ f = cabsf (cf);
+ if (fabsf (f - 5.0) > 0.0001)
+ abort ();
+ return 0;
+ }
+ } "-lm"
+ }
+ }]
+}
+
+# Return 1 is this is a TI C6X target supporting C67X instructions
+proc check_effective_target_ti_c67x { } {
+ return [check_no_compiler_messages ti_c67x assembly {
+ #if !defined(_TMS320C6700)
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 is this is a TI C6X target supporting C64X+ instructions
+proc check_effective_target_ti_c64xp { } {
+ return [check_no_compiler_messages ti_c64xp assembly {
+ #if !defined(_TMS320C6400_PLUS)
+ #error FOO
+ #endif
+ }]
+}
+
+
+proc check_alpha_max_hw_available { } {
+ return [check_runtime alpha_max_hw_available {
+ int main() { return __builtin_alpha_amask(1<<8) != 0; }
+ }]
+}
+
+# Returns true iff the FUNCTION is available on the target system.
+# (This is essentially a Tcl implementation of Autoconf's
+# AC_CHECK_FUNC.)
+
+proc check_function_available { function } {
+ return [check_no_compiler_messages ${function}_available \
+ executable [subst {
+ #ifdef __cplusplus
+ extern "C"
+ #endif
+ char $function ();
+ int main () { $function (); }
+ }] "-fno-builtin" ]
+}
+
+# Returns true iff "fork" is available on the target system.
+
+proc check_fork_available {} {
+ return [check_function_available "fork"]
+}
+
+# Returns true iff "mkfifo" is available on the target system.
+
+proc check_mkfifo_available {} {
+ if { [istarget *-*-cygwin*] } {
+ # Cygwin has mkfifo, but support is incomplete.
+ return 0
+ }
+
+ return [check_function_available "mkfifo"]
+}
+
+# Returns true iff "__cxa_atexit" is used on the target system.
+
+proc check_cxa_atexit_available { } {
+ return [check_cached_effective_target cxa_atexit_available {
+ if { [istarget hppa*-*-hpux10*] } {
+ # HP-UX 10 doesn't have __cxa_atexit but subsequent test passes.
+ expr 0
+ } elseif { [istarget *-*-vxworks] } {
+ # vxworks doesn't have __cxa_atexit but subsequent test passes.
+ expr 0
+ } else {
+ check_runtime_nocache cxa_atexit_available {
+ // C++
+ #include <stdlib.h>
+ static unsigned int count;
+ struct X
+ {
+ X() { count = 1; }
+ ~X()
+ {
+ if (count != 3)
+ exit(1);
+ count = 4;
+ }
+ };
+ void f()
+ {
+ static X x;
+ }
+ struct Y
+ {
+ Y() { f(); count = 2; }
+ ~Y()
+ {
+ if (count != 2)
+ exit(1);
+ count = 3;
+ }
+ };
+ Y y;
+ int main() { return 0; }
+ }
+ }
+ }]
+}
+
+proc check_effective_target_objc2 { } {
+ return [check_no_compiler_messages objc2 object {
+ #ifdef __OBJC2__
+ int dummy[1];
+ #else
+ #error
+ #endif
+ }]
+}
+
+proc check_effective_target_next_runtime { } {
+ return [check_no_compiler_messages objc2 object {
+ #ifdef __NEXT_RUNTIME__
+ int dummy[1];
+ #else
+ #error
+ #endif
+ }]
+}
+
+# Return 1 if we're generating 32-bit code using default options, 0
+# otherwise.
+
+proc check_effective_target_ilp32 { } {
+ return [check_no_compiler_messages ilp32 object {
+ int dummy[sizeof (int) == 4
+ && sizeof (void *) == 4
+ && sizeof (long) == 4 ? 1 : -1];
+ }]
+}
+
+# Return 1 if we're generating ia32 code using default options, 0
+# otherwise.
+
+proc check_effective_target_ia32 { } {
+ return [check_no_compiler_messages ia32 object {
+ int dummy[sizeof (int) == 4
+ && sizeof (void *) == 4
+ && sizeof (long) == 4 ? 1 : -1] = { __i386__ };
+ }]
+}
+
+# Return 1 if we're generating x32 code using default options, 0
+# otherwise.
+
+proc check_effective_target_x32 { } {
+ return [check_no_compiler_messages x32 object {
+ int dummy[sizeof (int) == 4
+ && sizeof (void *) == 4
+ && sizeof (long) == 4 ? 1 : -1] = { __x86_64__ };
+ }]
+}
+
+# Return 1 if we're generating 32-bit integers using default
+# options, 0 otherwise.
+
+proc check_effective_target_int32 { } {
+ return [check_no_compiler_messages int32 object {
+ int dummy[sizeof (int) == 4 ? 1 : -1];
+ }]
+}
+
+# Return 1 if we're generating 32-bit or larger integers using default
+# options, 0 otherwise.
+
+proc check_effective_target_int32plus { } {
+ return [check_no_compiler_messages int32plus object {
+ int dummy[sizeof (int) >= 4 ? 1 : -1];
+ }]
+}
+
+# Return 1 if we're generating 32-bit or larger pointers using default
+# options, 0 otherwise.
+
+proc check_effective_target_ptr32plus { } {
+ # The msp430 has 16-bit or 20-bit pointers. The 20-bit pointer is stored
+ # in a 32-bit slot when in memory, so sizeof(void *) returns 4, but it
+ # cannot really hold a 32-bit address, so we always return false here.
+ if { [istarget msp430-*-*] } {
+ return 0
+ }
+
+ return [check_no_compiler_messages ptr32plus object {
+ int dummy[sizeof (void *) >= 4 ? 1 : -1];
+ }]
+}
+
+# Return 1 if we support 32-bit or larger array and structure sizes
+# using default options, 0 otherwise.
+
+proc check_effective_target_size32plus { } {
+ return [check_no_compiler_messages size32plus object {
+ char dummy[65537];
+ }]
+}
+
+# Returns 1 if we're generating 16-bit or smaller integers with the
+# default options, 0 otherwise.
+
+proc check_effective_target_int16 { } {
+ return [check_no_compiler_messages int16 object {
+ int dummy[sizeof (int) < 4 ? 1 : -1];
+ }]
+}
+
+# Return 1 if we're generating 64-bit code using default options, 0
+# otherwise.
+
+proc check_effective_target_lp64 { } {
+ return [check_no_compiler_messages lp64 object {
+ int dummy[sizeof (int) == 4
+ && sizeof (void *) == 8
+ && sizeof (long) == 8 ? 1 : -1];
+ }]
+}
+
+# Return 1 if we're generating 64-bit code using default llp64 options,
+# 0 otherwise.
+
+proc check_effective_target_llp64 { } {
+ return [check_no_compiler_messages llp64 object {
+ int dummy[sizeof (int) == 4
+ && sizeof (void *) == 8
+ && sizeof (long long) == 8
+ && sizeof (long) == 4 ? 1 : -1];
+ }]
+}
+
+# Return 1 if long and int have different sizes,
+# 0 otherwise.
+
+proc check_effective_target_long_neq_int { } {
+ return [check_no_compiler_messages long_ne_int object {
+ int dummy[sizeof (int) != sizeof (long) ? 1 : -1];
+ }]
+}
+
+# Return 1 if the target supports long double larger than double,
+# 0 otherwise.
+
+proc check_effective_target_large_long_double { } {
+ return [check_no_compiler_messages large_long_double object {
+ int dummy[sizeof(long double) > sizeof(double) ? 1 : -1];
+ }]
+}
+
+# Return 1 if the target supports double larger than float,
+# 0 otherwise.
+
+proc check_effective_target_large_double { } {
+ return [check_no_compiler_messages large_double object {
+ int dummy[sizeof(double) > sizeof(float) ? 1 : -1];
+ }]
+}
+
+# Return 1 if the target supports double of 64 bits,
+# 0 otherwise.
+
+proc check_effective_target_double64 { } {
+ return [check_no_compiler_messages double64 object {
+ int dummy[sizeof(double) == 8 ? 1 : -1];
+ }]
+}
+
+# Return 1 if the target supports double of at least 64 bits,
+# 0 otherwise.
+
+proc check_effective_target_double64plus { } {
+ return [check_no_compiler_messages double64plus object {
+ int dummy[sizeof(double) >= 8 ? 1 : -1];
+ }]
+}
+
+# Return 1 if the target supports 'w' suffix on floating constant
+# 0 otherwise.
+
+proc check_effective_target_has_w_floating_suffix { } {
+ set opts ""
+ if [check_effective_target_c++] {
+ append opts "-std=gnu++03"
+ }
+ return [check_no_compiler_messages w_fp_suffix object {
+ float dummy = 1.0w;
+ } "$opts"]
+}
+
+# Return 1 if the target supports 'q' suffix on floating constant
+# 0 otherwise.
+
+proc check_effective_target_has_q_floating_suffix { } {
+ set opts ""
+ if [check_effective_target_c++] {
+ append opts "-std=gnu++03"
+ }
+ return [check_no_compiler_messages q_fp_suffix object {
+ float dummy = 1.0q;
+ } "$opts"]
+}
+# Return 1 if the target supports compiling fixed-point,
+# 0 otherwise.
+
+proc check_effective_target_fixed_point { } {
+ return [check_no_compiler_messages fixed_point object {
+ _Sat _Fract x; _Sat _Accum y;
+ }]
+}
+
+# Return 1 if the target supports compiling decimal floating point,
+# 0 otherwise.
+
+proc check_effective_target_dfp_nocache { } {
+ verbose "check_effective_target_dfp_nocache: compiling source" 2
+ set ret [check_no_compiler_messages_nocache dfp object {
+ float x __attribute__((mode(DD)));
+ }]
+ verbose "check_effective_target_dfp_nocache: returning $ret" 2
+ return $ret
+}
+
+proc check_effective_target_dfprt_nocache { } {
+ return [check_runtime_nocache dfprt {
+ typedef float d64 __attribute__((mode(DD)));
+ d64 x = 1.2df, y = 2.3dd, z;
+ int main () { z = x + y; return 0; }
+ }]
+}
+
+# Return 1 if the target supports compiling Decimal Floating Point,
+# 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_dfp { } {
+ return [check_cached_effective_target dfp {
+ check_effective_target_dfp_nocache
+ }]
+}
+
+# Return 1 if the target supports linking and executing Decimal Floating
+# Point, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_dfprt { } {
+ return [check_cached_effective_target dfprt {
+ check_effective_target_dfprt_nocache
+ }]
+}
+
+# Return 1 if the target supports executing DFP hardware instructions,
+# 0 otherwise. Cache the result.
+
+proc check_dfp_hw_available { } {
+ return [check_cached_effective_target dfp_hw_available {
+ # For now, disable on Darwin
+ if { [istarget powerpc-*-eabi] || [istarget powerpc*-*-eabispe] || [istarget *-*-darwin*]} {
+ expr 0
+ } else {
+ check_runtime_nocache dfp_hw_available {
+ volatile _Decimal64 r;
+ volatile _Decimal64 a = 4.0DD;
+ volatile _Decimal64 b = 2.0DD;
+ int main()
+ {
+ asm volatile ("dadd %0,%1,%2" : "=d" (r) : "d" (a), "d" (b));
+ asm volatile ("dsub %0,%1,%2" : "=d" (r) : "d" (a), "d" (b));
+ asm volatile ("dmul %0,%1,%2" : "=d" (r) : "d" (a), "d" (b));
+ asm volatile ("ddiv %0,%1,%2" : "=d" (r) : "d" (a), "d" (b));
+ return 0;
+ }
+ } "-mcpu=power6 -mhard-float"
+ }
+ }]
+}
+
+# Return 1 if the target supports compiling and assembling UCN, 0 otherwise.
+
+proc check_effective_target_ucn_nocache { } {
+ # -std=c99 is only valid for C
+ if [check_effective_target_c] {
+ set ucnopts "-std=c99"
+ }
+ append ucnopts " -fextended-identifiers"
+ verbose "check_effective_target_ucn_nocache: compiling source" 2
+ set ret [check_no_compiler_messages_nocache ucn object {
+ int \u00C0;
+ } $ucnopts]
+ verbose "check_effective_target_ucn_nocache: returning $ret" 2
+ return $ret
+}
+
+# Return 1 if the target supports compiling and assembling UCN, 0 otherwise.
+#
+# This won't change for different subtargets, so cache the result.
+
+proc check_effective_target_ucn { } {
+ return [check_cached_effective_target ucn {
+ check_effective_target_ucn_nocache
+ }]
+}
+
+# Return 1 if the target needs a command line argument to enable a SIMD
+# instruction set.
+
+proc check_effective_target_vect_cmdline_needed { } {
+ global et_vect_cmdline_needed_saved
+ global et_vect_cmdline_needed_target_name
+
+ if { ![info exists et_vect_cmdline_needed_target_name] } {
+ set et_vect_cmdline_needed_target_name ""
+ }
+
+ # If the target has changed since we set the cached value, clear it.
+ set current_target [current_target_name]
+ if { $current_target != $et_vect_cmdline_needed_target_name } {
+ verbose "check_effective_target_vect_cmdline_needed: `$et_vect_cmdline_needed_target_name' `$current_target'" 2
+ set et_vect_cmdline_needed_target_name $current_target
+ if { [info exists et_vect_cmdline_needed_saved] } {
+ verbose "check_effective_target_vect_cmdline_needed: removing cached result" 2
+ unset et_vect_cmdline_needed_saved
+ }
+ }
+
+ if [info exists et_vect_cmdline_needed_saved] {
+ verbose "check_effective_target_vect_cmdline_needed: using cached result" 2
+ } else {
+ set et_vect_cmdline_needed_saved 1
+ if { [istarget alpha*-*-*]
+ || [istarget ia64-*-*]
+ || (([istarget x86_64-*-*] || [istarget i?86-*-*])
+ && ([check_effective_target_x32]
+ || [check_effective_target_lp64]))
+ || ([istarget powerpc*-*-*]
+ && ([check_effective_target_powerpc_spe]
+ || [check_effective_target_powerpc_altivec]))
+ || ([istarget sparc*-*-*] && [check_effective_target_sparc_vis])
+ || [istarget spu-*-*]
+ || ([istarget arm*-*-*] && [check_effective_target_arm_neon])
+ || [istarget aarch64*-*-*] } {
+ set et_vect_cmdline_needed_saved 0
+ }
+ }
+
+ verbose "check_effective_target_vect_cmdline_needed: returning $et_vect_cmdline_needed_saved" 2
+ return $et_vect_cmdline_needed_saved
+}
+
+# Return 1 if the target supports hardware vectors of int, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_int { } {
+ global et_vect_int_saved
+
+ if [info exists et_vect_int_saved] {
+ verbose "check_effective_target_vect_int: using cached result" 2
+ } else {
+ set et_vect_int_saved 0
+ if { [istarget i?86-*-*]
+ || ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget spu-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget sparc*-*-*]
+ || [istarget alpha*-*-*]
+ || [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
+ || [check_effective_target_arm32]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_loongson]) } {
+ set et_vect_int_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_int: returning $et_vect_int_saved" 2
+ return $et_vect_int_saved
+}
+
+# Return 1 if the target supports signed int->float conversion
+#
+
+proc check_effective_target_vect_intfloat_cvt { } {
+ global et_vect_intfloat_cvt_saved
+
+ if [info exists et_vect_intfloat_cvt_saved] {
+ verbose "check_effective_target_vect_intfloat_cvt: using cached result" 2
+ } else {
+ set et_vect_intfloat_cvt_saved 0
+ if { [istarget i?86-*-*]
+ || ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget x86_64-*-*]
+ || ([istarget arm*-*-*]
+ && [check_effective_target_arm_neon_ok])} {
+ set et_vect_intfloat_cvt_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_intfloat_cvt: returning $et_vect_intfloat_cvt_saved" 2
+ return $et_vect_intfloat_cvt_saved
+}
+
+#Return 1 if we're supporting __int128 for target, 0 otherwise.
+
+proc check_effective_target_int128 { } {
+ return [check_no_compiler_messages int128 object {
+ int dummy[
+ #ifndef __SIZEOF_INT128__
+ -1
+ #else
+ 1
+ #endif
+ ];
+ }]
+}
+
+# Return 1 if the target supports unsigned int->float conversion
+#
+
+proc check_effective_target_vect_uintfloat_cvt { } {
+ global et_vect_uintfloat_cvt_saved
+
+ if [info exists et_vect_uintfloat_cvt_saved] {
+ verbose "check_effective_target_vect_uintfloat_cvt: using cached result" 2
+ } else {
+ set et_vect_uintfloat_cvt_saved 0
+ if { [istarget i?86-*-*]
+ || ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*]
+ || ([istarget arm*-*-*]
+ && [check_effective_target_arm_neon_ok])} {
+ set et_vect_uintfloat_cvt_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_uintfloat_cvt: returning $et_vect_uintfloat_cvt_saved" 2
+ return $et_vect_uintfloat_cvt_saved
+}
+
+
+# Return 1 if the target supports signed float->int conversion
+#
+
+proc check_effective_target_vect_floatint_cvt { } {
+ global et_vect_floatint_cvt_saved
+
+ if [info exists et_vect_floatint_cvt_saved] {
+ verbose "check_effective_target_vect_floatint_cvt: using cached result" 2
+ } else {
+ set et_vect_floatint_cvt_saved 0
+ if { [istarget i?86-*-*]
+ || ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget x86_64-*-*]
+ || ([istarget arm*-*-*]
+ && [check_effective_target_arm_neon_ok])} {
+ set et_vect_floatint_cvt_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_floatint_cvt: returning $et_vect_floatint_cvt_saved" 2
+ return $et_vect_floatint_cvt_saved
+}
+
+# Return 1 if the target supports unsigned float->int conversion
+#
+
+proc check_effective_target_vect_floatuint_cvt { } {
+ global et_vect_floatuint_cvt_saved
+
+ if [info exists et_vect_floatuint_cvt_saved] {
+ verbose "check_effective_target_vect_floatuint_cvt: using cached result" 2
+ } else {
+ set et_vect_floatuint_cvt_saved 0
+ if { ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || ([istarget arm*-*-*]
+ && [check_effective_target_arm_neon_ok])} {
+ set et_vect_floatuint_cvt_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_floatuint_cvt: returning $et_vect_floatuint_cvt_saved" 2
+ return $et_vect_floatuint_cvt_saved
+}
+
+# Return 1 if the target supports #pragma omp declare simd, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_simd_clones { } {
+ global et_vect_simd_clones_saved
+
+ if [info exists et_vect_simd_clones_saved] {
+ verbose "check_effective_target_vect_simd_clones: using cached result" 2
+ } else {
+ set et_vect_simd_clones_saved 0
+ if { [istarget i?86-*-*] || [istarget x86_64-*-*] } {
+ # On i?86/x86_64 #pragma omp declare simd builds a sse2, avx and
+ # avx2 clone. Only the right clone for the specified arch will be
+ # chosen, but still we need to at least be able to assemble
+ # avx2.
+ if { [check_effective_target_avx2] } {
+ set et_vect_simd_clones_saved 1
+ }
+ }
+ }
+
+ verbose "check_effective_target_vect_simd_clones: returning $et_vect_simd_clones_saved" 2
+ return $et_vect_simd_clones_saved
+}
+
+# Return 1 if this is a AArch64 target supporting big endian
+proc check_effective_target_aarch64_big_endian { } {
+ return [check_no_compiler_messages aarch64_big_endian assembly {
+ #if !defined(__aarch64__) || !defined(__AARCH64EB__)
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 if this is a AArch64 target supporting little endian
+proc check_effective_target_aarch64_little_endian { } {
+ return [check_no_compiler_messages aarch64_little_endian assembly {
+ #if !defined(__aarch64__) || defined(__AARCH64EB__)
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 is this is an arm target using 32-bit instructions
+proc check_effective_target_arm32 { } {
+ return [check_no_compiler_messages arm32 assembly {
+ #if !defined(__arm__) || (defined(__thumb__) && !defined(__thumb2__))
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 is this is an arm target not using Thumb
+proc check_effective_target_arm_nothumb { } {
+ return [check_no_compiler_messages arm_nothumb assembly {
+ #if (defined(__thumb__) || defined(__thumb2__))
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 if this is a little-endian ARM target
+proc check_effective_target_arm_little_endian { } {
+ return [check_no_compiler_messages arm_little_endian assembly {
+ #if !defined(__arm__) || !defined(__ARMEL__)
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 if this is an ARM target that only supports aligned vector accesses
+proc check_effective_target_arm_vect_no_misalign { } {
+ return [check_no_compiler_messages arm_vect_no_misalign assembly {
+ #if !defined(__arm__) \
+ || (defined(__ARMEL__) \
+ && (!defined(__thumb__) || defined(__thumb2__)))
+ #error FOO
+ #endif
+ }]
+}
+
+
+# Return 1 if this is an ARM target supporting -mfpu=vfp
+# -mfloat-abi=softfp. Some multilibs may be incompatible with these
+# options.
+
+proc check_effective_target_arm_vfp_ok { } {
+ if { [check_effective_target_arm32] } {
+ return [check_no_compiler_messages arm_vfp_ok object {
+ int dummy;
+ } "-mfpu=vfp -mfloat-abi=softfp"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is an ARM target supporting -mfpu=vfp3
+# -mfloat-abi=softfp.
+
+proc check_effective_target_arm_vfp3_ok { } {
+ if { [check_effective_target_arm32] } {
+ return [check_no_compiler_messages arm_vfp3_ok object {
+ int dummy;
+ } "-mfpu=vfp3 -mfloat-abi=softfp"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is an ARM target supporting -mfpu=fp-armv8
+# -mfloat-abi=softfp.
+proc check_effective_target_arm_v8_vfp_ok {} {
+ if { [check_effective_target_arm32] } {
+ return [check_no_compiler_messages arm_v8_vfp_ok object {
+ int foo (void)
+ {
+ __asm__ volatile ("vrinta.f32.f32 s0, s0");
+ return 0;
+ }
+ } "-mfpu=fp-armv8 -mfloat-abi=softfp"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is an ARM target supporting -mfpu=vfp
+# -mfloat-abi=hard. Some multilibs may be incompatible with these
+# options.
+
+proc check_effective_target_arm_hard_vfp_ok { } {
+ if { [check_effective_target_arm32]
+ && ! [check-flags [list "" { *-*-* } { "-mfloat-abi=*" } { "-mfloat-abi=hard" }]] } {
+ return [check_no_compiler_messages arm_hard_vfp_ok executable {
+ int main() { return 0;}
+ } "-mfpu=vfp -mfloat-abi=hard"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is an ARM target that supports DSP multiply with
+# current multilib flags.
+
+proc check_effective_target_arm_dsp { } {
+ return [check_no_compiler_messages arm_dsp assembly {
+ #ifndef __ARM_FEATURE_DSP
+ #error not DSP
+ #endif
+ int i;
+ }]
+}
+
+# Return 1 if this is an ARM target that supports unaligned word/halfword
+# load/store instructions.
+
+proc check_effective_target_arm_unaligned { } {
+ return [check_no_compiler_messages arm_unaligned assembly {
+ #ifndef __ARM_FEATURE_UNALIGNED
+ #error no unaligned support
+ #endif
+ int i;
+ }]
+}
+
+# Return 1 if this is an ARM target supporting -mfpu=crypto-neon-fp-armv8
+# -mfloat-abi=softfp or equivalent options. Some multilibs may be
+# incompatible with these options. Also set et_arm_crypto_flags to the
+# best options to add.
+
+proc check_effective_target_arm_crypto_ok_nocache { } {
+ global et_arm_crypto_flags
+ set et_arm_crypto_flags ""
+ if { [check_effective_target_arm32] } {
+ foreach flags {"" "-mfloat-abi=softfp" "-mfpu=crypto-neon-fp-armv8" "-mfpu=crypto-neon-fp-armv8 -mfloat-abi=softfp"} {
+ if { [check_no_compiler_messages_nocache arm_crypto_ok object {
+ #include "arm_neon.h"
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vaeseq_u8 (a, b);
+ }
+ } "$flags"] } {
+ set et_arm_crypto_flags $flags
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+# Return 1 if this is an ARM target supporting -mfpu=crypto-neon-fp-armv8
+
+proc check_effective_target_arm_crypto_ok { } {
+ return [check_cached_effective_target arm_crypto_ok \
+ check_effective_target_arm_crypto_ok_nocache]
+}
+
+# Add options for crypto extensions.
+proc add_options_for_arm_crypto { flags } {
+ if { ! [check_effective_target_arm_crypto_ok] } {
+ return "$flags"
+ }
+ global et_arm_crypto_flags
+ return "$flags $et_arm_crypto_flags"
+}
+
+# Add the options needed for NEON. We need either -mfloat-abi=softfp
+# or -mfloat-abi=hard, but if one is already specified by the
+# multilib, use it. Similarly, if a -mfpu option already enables
+# NEON, do not add -mfpu=neon.
+
+proc add_options_for_arm_neon { flags } {
+ if { ! [check_effective_target_arm_neon_ok] } {
+ return "$flags"
+ }
+ global et_arm_neon_flags
+ return "$flags $et_arm_neon_flags"
+}
+
+proc add_options_for_arm_v8_vfp { flags } {
+ if { ! [check_effective_target_arm_v8_vfp_ok] } {
+ return "$flags"
+ }
+ return "$flags -mfpu=fp-armv8 -mfloat-abi=softfp"
+}
+
+proc add_options_for_arm_v8_neon { flags } {
+ if { ! [check_effective_target_arm_v8_neon_ok] } {
+ return "$flags"
+ }
+ global et_arm_v8_neon_flags
+ return "$flags $et_arm_v8_neon_flags -march=armv8-a"
+}
+
+proc add_options_for_arm_crc { flags } {
+ if { ! [check_effective_target_arm_crc_ok] } {
+ return "$flags"
+ }
+ global et_arm_crc_flags
+ return "$flags $et_arm_crc_flags"
+}
+
+# Add the options needed for NEON. We need either -mfloat-abi=softfp
+# or -mfloat-abi=hard, but if one is already specified by the
+# multilib, use it. Similarly, if a -mfpu option already enables
+# NEON, do not add -mfpu=neon.
+
+proc add_options_for_arm_neonv2 { flags } {
+ if { ! [check_effective_target_arm_neonv2_ok] } {
+ return "$flags"
+ }
+ global et_arm_neonv2_flags
+ return "$flags $et_arm_neonv2_flags"
+}
+
+# Add the options needed for vfp3.
+proc add_options_for_arm_vfp3 { flags } {
+ if { ! [check_effective_target_arm_vfp3_ok] } {
+ return "$flags"
+ }
+ return "$flags -mfpu=vfp3 -mfloat-abi=softfp"
+}
+
+# Return 1 if this is an ARM target supporting -mfpu=neon
+# -mfloat-abi=softfp or equivalent options. Some multilibs may be
+# incompatible with these options. Also set et_arm_neon_flags to the
+# best options to add.
+
+proc check_effective_target_arm_neon_ok_nocache { } {
+ global et_arm_neon_flags
+ set et_arm_neon_flags ""
+ if { [check_effective_target_arm32] } {
+ foreach flags {"" "-mfloat-abi=softfp" "-mfpu=neon" "-mfpu=neon -mfloat-abi=softfp"} {
+ if { [check_no_compiler_messages_nocache arm_neon_ok object {
+ #include "arm_neon.h"
+ int dummy;
+ } "$flags"] } {
+ set et_arm_neon_flags $flags
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+proc check_effective_target_arm_neon_ok { } {
+ return [check_cached_effective_target arm_neon_ok \
+ check_effective_target_arm_neon_ok_nocache]
+}
+
+proc check_effective_target_arm_crc_ok_nocache { } {
+ global et_arm_crc_flags
+ set et_arm_crc_flags "-march=armv8-a+crc"
+ return [check_no_compiler_messages_nocache arm_crc_ok object {
+ #if !defined (__ARM_FEATURE_CRC32)
+ #error FOO
+ #endif
+ } "$et_arm_crc_flags"]
+}
+
+proc check_effective_target_arm_crc_ok { } {
+ return [check_cached_effective_target arm_crc_ok \
+ check_effective_target_arm_crc_ok_nocache]
+}
+
+# Return 1 if this is an ARM target supporting -mfpu=neon-fp16
+# -mfloat-abi=softfp or equivalent options. Some multilibs may be
+# incompatible with these options. Also set et_arm_neon_flags to the
+# best options to add.
+
+proc check_effective_target_arm_neon_fp16_ok_nocache { } {
+ global et_arm_neon_fp16_flags
+ set et_arm_neon_fp16_flags ""
+ if { [check_effective_target_arm32] } {
+ foreach flags {"" "-mfloat-abi=softfp" "-mfpu=neon-fp16"
+ "-mfpu=neon-fp16 -mfloat-abi=softfp"} {
+ if { [check_no_compiler_messages_nocache arm_neon_fp_16_ok object {
+ #include "arm_neon.h"
+ float16x4_t
+ foo (float32x4_t arg)
+ {
+ return vcvt_f16_f32 (arg);
+ }
+ } "$flags"] } {
+ set et_arm_neon_fp16_flags $flags
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+proc check_effective_target_arm_neon_fp16_ok { } {
+ return [check_cached_effective_target arm_neon_fp16_ok \
+ check_effective_target_arm_neon_fp16_ok_nocache]
+}
+
+proc add_options_for_arm_neon_fp16 { flags } {
+ if { ! [check_effective_target_arm_neon_fp16_ok] } {
+ return "$flags"
+ }
+ global et_arm_neon_fp16_flags
+ return "$flags $et_arm_neon_fp16_flags"
+}
+
+# Return 1 if this is an ARM target supporting -mfpu=neon-fp-armv8
+# -mfloat-abi=softfp or equivalent options. Some multilibs may be
+# incompatible with these options. Also set et_arm_v8_neon_flags to the
+# best options to add.
+
+proc check_effective_target_arm_v8_neon_ok_nocache { } {
+ global et_arm_v8_neon_flags
+ set et_arm_v8_neon_flags ""
+ if { [check_effective_target_arm32] } {
+ foreach flags {"" "-mfloat-abi=softfp" "-mfpu=neon-fp-armv8" "-mfpu=neon-fp-armv8 -mfloat-abi=softfp"} {
+ if { [check_no_compiler_messages_nocache arm_v8_neon_ok object {
+ #include "arm_neon.h"
+ void
+ foo ()
+ {
+ __asm__ volatile ("vrintn.f32 q0, q0");
+ }
+ } "$flags"] } {
+ set et_arm_v8_neon_flags $flags
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+proc check_effective_target_arm_v8_neon_ok { } {
+ return [check_cached_effective_target arm_v8_neon_ok \
+ check_effective_target_arm_v8_neon_ok_nocache]
+}
+
+# Return 1 if this is an ARM target supporting -mfpu=neon-vfpv4
+# -mfloat-abi=softfp or equivalent options. Some multilibs may be
+# incompatible with these options. Also set et_arm_neonv2_flags to the
+# best options to add.
+
+proc check_effective_target_arm_neonv2_ok_nocache { } {
+ global et_arm_neonv2_flags
+ set et_arm_neonv2_flags ""
+ if { [check_effective_target_arm32] } {
+ foreach flags {"" "-mfloat-abi=softfp" "-mfpu=neon-vfpv4" "-mfpu=neon-vfpv4 -mfloat-abi=softfp"} {
+ if { [check_no_compiler_messages_nocache arm_neonv2_ok object {
+ #include "arm_neon.h"
+ float32x2_t
+ foo (float32x2_t a, float32x2_t b, float32x2_t c)
+ {
+ return vfma_f32 (a, b, c);
+ }
+ } "$flags"] } {
+ set et_arm_neonv2_flags $flags
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+proc check_effective_target_arm_neonv2_ok { } {
+ return [check_cached_effective_target arm_neonv2_ok \
+ check_effective_target_arm_neonv2_ok_nocache]
+}
+
+# Add the options needed for NEON. We need either -mfloat-abi=softfp
+# or -mfloat-abi=hard, but if one is already specified by the
+# multilib, use it.
+
+proc add_options_for_arm_fp16 { flags } {
+ if { ! [check_effective_target_arm_fp16_ok] } {
+ return "$flags"
+ }
+ global et_arm_fp16_flags
+ return "$flags $et_arm_fp16_flags"
+}
+
+# Return 1 if this is an ARM target that can support a VFP fp16 variant.
+# Skip multilibs that are incompatible with these options and set
+# et_arm_fp16_flags to the best options to add.
+
+proc check_effective_target_arm_fp16_ok_nocache { } {
+ global et_arm_fp16_flags
+ set et_arm_fp16_flags ""
+ if { ! [check_effective_target_arm32] } {
+ return 0;
+ }
+ if [check-flags [list "" { *-*-* } { "-mfpu=*" } { "-mfpu=*fp16*" "-mfpu=*fpv[4-9]*" "-mfpu=*fpv[1-9][0-9]*" } ]] {
+ # Multilib flags would override -mfpu.
+ return 0
+ }
+ if [check-flags [list "" { *-*-* } { "-mfloat-abi=soft" } { "" } ]] {
+ # Must generate floating-point instructions.
+ return 0
+ }
+ if [check_effective_target_arm_hf_eabi] {
+ # Use existing float-abi and force an fpu which supports fp16
+ set et_arm_fp16_flags "-mfpu=vfpv4"
+ return 1;
+ }
+ if [check-flags [list "" { *-*-* } { "-mfpu=*" } { "" } ]] {
+ # The existing -mfpu value is OK; use it, but add softfp.
+ set et_arm_fp16_flags "-mfloat-abi=softfp"
+ return 1;
+ }
+ # Add -mfpu for a VFP fp16 variant since there is no preprocessor
+ # macro to check for this support.
+ set flags "-mfpu=vfpv4 -mfloat-abi=softfp"
+ if { [check_no_compiler_messages_nocache arm_fp16_ok assembly {
+ int dummy;
+ } "$flags"] } {
+ set et_arm_fp16_flags "$flags"
+ return 1
+ }
+
+ return 0
+}
+
+proc check_effective_target_arm_fp16_ok { } {
+ return [check_cached_effective_target arm_fp16_ok \
+ check_effective_target_arm_fp16_ok_nocache]
+}
+
+# Creates a series of routines that return 1 if the given architecture
+# can be selected and a routine to give the flags to select that architecture
+# Note: Extra flags may be added to disable options from newer compilers
+# (Thumb in particular - but others may be added in the future)
+# Usage: /* { dg-require-effective-target arm_arch_v5_ok } */
+# /* { dg-add-options arm_arch_v5 } */
+# /* { dg-require-effective-target arm_arch_v5_multilib } */
+foreach { armfunc armflag armdef } { v4 "-march=armv4 -marm" __ARM_ARCH_4__
+ v4t "-march=armv4t" __ARM_ARCH_4T__
+ v5 "-march=armv5 -marm" __ARM_ARCH_5__
+ v5t "-march=armv5t" __ARM_ARCH_5T__
+ v5te "-march=armv5te" __ARM_ARCH_5TE__
+ v6 "-march=armv6" __ARM_ARCH_6__
+ v6k "-march=armv6k" __ARM_ARCH_6K__
+ v6t2 "-march=armv6t2" __ARM_ARCH_6T2__
+ v6z "-march=armv6z" __ARM_ARCH_6Z__
+ v6m "-march=armv6-m -mthumb" __ARM_ARCH_6M__
+ v7a "-march=armv7-a" __ARM_ARCH_7A__
+ v7ve "-march=armv7ve" __ARM_ARCH_7A__
+ v7r "-march=armv7-r" __ARM_ARCH_7R__
+ v7m "-march=armv7-m -mthumb" __ARM_ARCH_7M__
+ v7em "-march=armv7e-m -mthumb" __ARM_ARCH_7EM__
+ v8a "-march=armv8-a" __ARM_ARCH_8A__ } {
+ eval [string map [list FUNC $armfunc FLAG $armflag DEF $armdef ] {
+ proc check_effective_target_arm_arch_FUNC_ok { } {
+ if { [ string match "*-marm*" "FLAG" ] &&
+ ![check_effective_target_arm_arm_ok] } {
+ return 0
+ }
+ return [check_no_compiler_messages arm_arch_FUNC_ok assembly {
+ #if !defined (DEF)
+ #error FOO
+ #endif
+ } "FLAG" ]
+ }
+
+ proc add_options_for_arm_arch_FUNC { flags } {
+ return "$flags FLAG"
+ }
+
+ proc check_effective_target_arm_arch_FUNC_multilib { } {
+ return [check_runtime arm_arch_FUNC_multilib {
+ int
+ main (void)
+ {
+ return 0;
+ }
+ } [add_options_for_arm_arch_FUNC ""]]
+ }
+ }]
+}
+
+# Return 1 if this is an ARM target where -marm causes ARM to be
+# used (not Thumb)
+
+proc check_effective_target_arm_arm_ok { } {
+ return [check_no_compiler_messages arm_arm_ok assembly {
+ #if !defined (__arm__) || defined (__thumb__) || defined (__thumb2__)
+ #error FOO
+ #endif
+ } "-marm"]
+}
+
+
+# Return 1 is this is an ARM target where -mthumb causes Thumb-1 to be
+# used.
+
+proc check_effective_target_arm_thumb1_ok { } {
+ return [check_no_compiler_messages arm_thumb1_ok assembly {
+ #if !defined(__arm__) || !defined(__thumb__) || defined(__thumb2__)
+ #error FOO
+ #endif
+ } "-mthumb"]
+}
+
+# Return 1 is this is an ARM target where -mthumb causes Thumb-2 to be
+# used.
+
+proc check_effective_target_arm_thumb2_ok { } {
+ return [check_no_compiler_messages arm_thumb2_ok assembly {
+ #if !defined(__thumb2__)
+ #error FOO
+ #endif
+ } "-mthumb"]
+}
+
+# Return 1 if this is an ARM target where Thumb-1 is used without options
+# added by the test.
+
+proc check_effective_target_arm_thumb1 { } {
+ return [check_no_compiler_messages arm_thumb1 assembly {
+ #if !defined(__arm__) || !defined(__thumb__) || defined(__thumb2__)
+ #error not thumb1
+ #endif
+ int i;
+ } ""]
+}
+
+# Return 1 if this is an ARM target where Thumb-2 is used without options
+# added by the test.
+
+proc check_effective_target_arm_thumb2 { } {
+ return [check_no_compiler_messages arm_thumb2 assembly {
+ #if !defined(__thumb2__)
+ #error FOO
+ #endif
+ int i;
+ } ""]
+}
+
+# Return 1 if this is an ARM target where conditional execution is available.
+
+proc check_effective_target_arm_cond_exec { } {
+ return [check_no_compiler_messages arm_cond_exec assembly {
+ #if defined(__arm__) && defined(__thumb__) && !defined(__thumb2__)
+ #error FOO
+ #endif
+ int i;
+ } ""]
+}
+
+# Return 1 if this is an ARM cortex-M profile cpu
+
+proc check_effective_target_arm_cortex_m { } {
+ return [check_no_compiler_messages arm_cortex_m assembly {
+ #if !defined(__ARM_ARCH_7M__) \
+ && !defined (__ARM_ARCH_7EM__) \
+ && !defined (__ARM_ARCH_6M__)
+ #error FOO
+ #endif
+ int i;
+ } "-mthumb"]
+}
+
+# Return 1 if the target supports executing NEON instructions, 0
+# otherwise. Cache the result.
+
+proc check_effective_target_arm_neon_hw { } {
+ return [check_runtime arm_neon_hw_available {
+ int
+ main (void)
+ {
+ long long a = 0, b = 1;
+ asm ("vorr %P0, %P1, %P2"
+ : "=w" (a)
+ : "0" (a), "w" (b));
+ return (a != 1);
+ }
+ } [add_options_for_arm_neon ""]]
+}
+
+proc check_effective_target_arm_neonv2_hw { } {
+ return [check_runtime arm_neon_hwv2_available {
+ #include "arm_neon.h"
+ int
+ main (void)
+ {
+ float32x2_t a, b, c;
+ asm ("vfma.f32 %P0, %P1, %P2"
+ : "=w" (a)
+ : "w" (b), "w" (c));
+ return 0;
+ }
+ } [add_options_for_arm_neonv2 ""]]
+}
+
+# Return 1 if the target supports executing ARMv8 NEON instructions, 0
+# otherwise.
+
+proc check_effective_target_arm_v8_neon_hw { } {
+ return [check_runtime arm_v8_neon_hw_available {
+ #include "arm_neon.h"
+ int
+ main (void)
+ {
+ float32x2_t a;
+ asm ("vrinta.f32 %P0, %P1"
+ : "=w" (a)
+ : "0" (a));
+ return 0;
+ }
+ } [add_options_for_arm_v8_neon ""]]
+}
+
+# Return 1 if this is a ARM target with NEON enabled.
+
+proc check_effective_target_arm_neon { } {
+ if { [check_effective_target_arm32] } {
+ return [check_no_compiler_messages arm_neon object {
+ #ifndef __ARM_NEON__
+ #error not NEON
+ #else
+ int dummy;
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+proc check_effective_target_arm_neonv2 { } {
+ if { [check_effective_target_arm32] } {
+ return [check_no_compiler_messages arm_neon object {
+ #ifndef __ARM_NEON__
+ #error not NEON
+ #else
+ #ifndef __ARM_FEATURE_FMA
+ #error not NEONv2
+ #else
+ int dummy;
+ #endif
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this a Loongson-2E or -2F target using an ABI that supports
+# the Loongson vector modes.
+
+proc check_effective_target_mips_loongson { } {
+ return [check_no_compiler_messages loongson assembly {
+ #if !defined(__mips_loongson_vector_rev)
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 if this is an ARM target that adheres to the ABI for the ARM
+# Architecture.
+
+proc check_effective_target_arm_eabi { } {
+ return [check_no_compiler_messages arm_eabi object {
+ #ifndef __ARM_EABI__
+ #error not EABI
+ #else
+ int dummy;
+ #endif
+ }]
+}
+
+# Return 1 if this is an ARM target that adheres to the hard-float variant of
+# the ABI for the ARM Architecture (e.g. -mfloat-abi=hard).
+
+proc check_effective_target_arm_hf_eabi { } {
+ return [check_no_compiler_messages arm_hf_eabi object {
+ #if !defined(__ARM_EABI__) || !defined(__ARM_PCS_VFP)
+ #error not hard-float EABI
+ #else
+ int dummy;
+ #endif
+ }]
+}
+
+# Return 1 if this is an ARM target supporting -mcpu=iwmmxt.
+# Some multilibs may be incompatible with this option.
+
+proc check_effective_target_arm_iwmmxt_ok { } {
+ if { [check_effective_target_arm32] } {
+ return [check_no_compiler_messages arm_iwmmxt_ok object {
+ int dummy;
+ } "-mcpu=iwmmxt"]
+ } else {
+ return 0
+ }
+}
+
+# Return true if LDRD/STRD instructions are prefered over LDM/STM instructions
+# for an ARM target.
+proc check_effective_target_arm_prefer_ldrd_strd { } {
+ if { ![check_effective_target_arm32] } {
+ return 0;
+ }
+
+ return [check_no_messages_and_pattern arm_prefer_ldrd_strd "strd\tr" assembly {
+ void foo (int *p) { p[0] = 1; p[1] = 0;}
+ } "-O2 -mthumb" ]
+}
+
+# Return 1 if this is a PowerPC target supporting -meabi.
+
+proc check_effective_target_powerpc_eabi_ok { } {
+ if { [istarget powerpc*-*-*] } {
+ return [check_no_compiler_messages powerpc_eabi_ok object {
+ int dummy;
+ } "-meabi"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target with floating-point registers.
+
+proc check_effective_target_powerpc_fprs { } {
+ if { [istarget powerpc*-*-*]
+ || [istarget rs6000-*-*] } {
+ return [check_no_compiler_messages powerpc_fprs object {
+ #ifdef __NO_FPRS__
+ #error no FPRs
+ #else
+ int dummy;
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target with hardware double-precision
+# floating point.
+
+proc check_effective_target_powerpc_hard_double { } {
+ if { [istarget powerpc*-*-*]
+ || [istarget rs6000-*-*] } {
+ return [check_no_compiler_messages powerpc_hard_double object {
+ #ifdef _SOFT_DOUBLE
+ #error soft double
+ #else
+ int dummy;
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target supporting -maltivec.
+
+proc check_effective_target_powerpc_altivec_ok { } {
+ if { ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget rs6000-*-*] } {
+ # AltiVec is not supported on AIX before 5.3.
+ if { [istarget powerpc*-*-aix4*]
+ || [istarget powerpc*-*-aix5.1*]
+ || [istarget powerpc*-*-aix5.2*] } {
+ return 0
+ }
+ return [check_no_compiler_messages powerpc_altivec_ok object {
+ int dummy;
+ } "-maltivec"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target supporting -mpower8-vector
+
+proc check_effective_target_powerpc_p8vector_ok { } {
+ if { ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget rs6000-*-*] } {
+ # AltiVec is not supported on AIX before 5.3.
+ if { [istarget powerpc*-*-aix4*]
+ || [istarget powerpc*-*-aix5.1*]
+ || [istarget powerpc*-*-aix5.2*] } {
+ return 0
+ }
+ return [check_no_compiler_messages powerpc_p8vector_ok object {
+ int main (void) {
+#ifdef __MACH__
+ asm volatile ("xxlorc vs0,vs0,vs0");
+#else
+ asm volatile ("xxlorc 0,0,0");
+#endif
+ return 0;
+ }
+ } "-mpower8-vector"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target supporting -mvsx
+
+proc check_effective_target_powerpc_vsx_ok { } {
+ if { ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget rs6000-*-*] } {
+ # VSX is not supported on AIX before 7.1.
+ if { [istarget powerpc*-*-aix4*]
+ || [istarget powerpc*-*-aix5*]
+ || [istarget powerpc*-*-aix6*] } {
+ return 0
+ }
+ return [check_no_compiler_messages powerpc_vsx_ok object {
+ int main (void) {
+#ifdef __MACH__
+ asm volatile ("xxlor vs0,vs0,vs0");
+#else
+ asm volatile ("xxlor 0,0,0");
+#endif
+ return 0;
+ }
+ } "-mvsx"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target supporting -mhtm
+
+proc check_effective_target_powerpc_htm_ok { } {
+ if { ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget rs6000-*-*] } {
+ # HTM is not supported on AIX yet.
+ if { [istarget powerpc*-*-aix*] } {
+ return 0
+ }
+ return [check_no_compiler_messages powerpc_htm_ok object {
+ int main (void) {
+ asm volatile ("tbegin. 0");
+ return 0;
+ }
+ } "-mhtm"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target supporting -mcpu=cell.
+
+proc check_effective_target_powerpc_ppu_ok { } {
+ if [check_effective_target_powerpc_altivec_ok] {
+ return [check_no_compiler_messages cell_asm_available object {
+ int main (void) {
+#ifdef __MACH__
+ asm volatile ("lvlx v0,v0,v0");
+#else
+ asm volatile ("lvlx 0,0,0");
+#endif
+ return 0;
+ }
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target that supports SPU.
+
+proc check_effective_target_powerpc_spu { } {
+ if { [istarget powerpc*-*-linux*] } {
+ return [check_effective_target_powerpc_altivec_ok]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC SPE target. The check includes options
+# specified by dg-options for this test, so don't cache the result.
+
+proc check_effective_target_powerpc_spe_nocache { } {
+ if { [istarget powerpc*-*-*] } {
+ return [check_no_compiler_messages_nocache powerpc_spe object {
+ #ifndef __SPE__
+ #error not SPE
+ #else
+ int dummy;
+ #endif
+ } [current_compiler_flags]]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target with SPE enabled.
+
+proc check_effective_target_powerpc_spe { } {
+ if { [istarget powerpc*-*-*] } {
+ return [check_no_compiler_messages powerpc_spe object {
+ #ifndef __SPE__
+ #error not SPE
+ #else
+ int dummy;
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target with Altivec enabled.
+
+proc check_effective_target_powerpc_altivec { } {
+ if { [istarget powerpc*-*-*] } {
+ return [check_no_compiler_messages powerpc_altivec object {
+ #ifndef __ALTIVEC__
+ #error not Altivec
+ #else
+ int dummy;
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC 405 target. The check includes options
+# specified by dg-options for this test, so don't cache the result.
+
+proc check_effective_target_powerpc_405_nocache { } {
+ if { [istarget powerpc*-*-*] || [istarget rs6000-*-*] } {
+ return [check_no_compiler_messages_nocache powerpc_405 object {
+ #ifdef __PPC405__
+ int dummy;
+ #else
+ #error not a PPC405
+ #endif
+ } [current_compiler_flags]]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a PowerPC target using the ELFv2 ABI.
+
+proc check_effective_target_powerpc_elfv2 { } {
+ if { [istarget powerpc*-*-*] } {
+ return [check_no_compiler_messages powerpc_elfv2 object {
+ #if _CALL_ELF != 2
+ #error not ELF v2 ABI
+ #else
+ int dummy;
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a SPU target with a toolchain that
+# supports automatic overlay generation.
+
+proc check_effective_target_spu_auto_overlay { } {
+ if { [istarget spu*-*-elf*] } {
+ return [check_no_compiler_messages spu_auto_overlay executable {
+ int main (void) { }
+ } "-Wl,--auto-overlay" ]
+ } else {
+ return 0
+ }
+}
+
+# The VxWorks SPARC simulator accepts only EM_SPARC executables and
+# chokes on EM_SPARC32PLUS or EM_SPARCV9 executables. Return 1 if the
+# test environment appears to run executables on such a simulator.
+
+proc check_effective_target_ultrasparc_hw { } {
+ return [check_runtime ultrasparc_hw {
+ int main() { return 0; }
+ } "-mcpu=ultrasparc"]
+}
+
+# Return 1 if the test environment supports executing UltraSPARC VIS2
+# instructions. We check this by attempting: "bmask %g0, %g0, %g0"
+
+proc check_effective_target_ultrasparc_vis2_hw { } {
+ return [check_runtime ultrasparc_vis2_hw {
+ int main() { __asm__(".word 0x81b00320"); return 0; }
+ } "-mcpu=ultrasparc3"]
+}
+
+# Return 1 if the test environment supports executing UltraSPARC VIS3
+# instructions. We check this by attempting: "addxc %g0, %g0, %g0"
+
+proc check_effective_target_ultrasparc_vis3_hw { } {
+ return [check_runtime ultrasparc_vis3_hw {
+ int main() { __asm__(".word 0x81b00220"); return 0; }
+ } "-mcpu=niagara3"]
+}
+
+# Return 1 if this is a SPARC-V9 target.
+
+proc check_effective_target_sparc_v9 { } {
+ if { [istarget sparc*-*-*] } {
+ return [check_no_compiler_messages sparc_v9 object {
+ int main (void) {
+ asm volatile ("return %i7+8");
+ return 0;
+ }
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is a SPARC target with VIS enabled.
+
+proc check_effective_target_sparc_vis { } {
+ if { [istarget sparc*-*-*] } {
+ return [check_no_compiler_messages sparc_vis object {
+ #ifndef __VIS__
+ #error not VIS
+ #else
+ int dummy;
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if the target supports hardware vector shift operation.
+
+proc check_effective_target_vect_shift { } {
+ global et_vect_shift_saved
+
+ if [info exists et_vect_shift_saved] {
+ verbose "check_effective_target_vect_shift: using cached result" 2
+ } else {
+ set et_vect_shift_saved 0
+ if { ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget ia64-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*]
+ || [check_effective_target_arm32]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_loongson]) } {
+ set et_vect_shift_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_shift: returning $et_vect_shift_saved" 2
+ return $et_vect_shift_saved
+}
+
+# Return 1 if the target supports hardware vector shift operation for char.
+
+proc check_effective_target_vect_shift_char { } {
+ global et_vect_shift_char_saved
+
+ if [info exists et_vect_shift_char_saved] {
+ verbose "check_effective_target_vect_shift_char: using cached result" 2
+ } else {
+ set et_vect_shift_char_saved 0
+ if { ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [check_effective_target_arm32] } {
+ set et_vect_shift_char_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_shift_char: returning $et_vect_shift_char_saved" 2
+ return $et_vect_shift_char_saved
+}
+
+# Return 1 if the target supports hardware vectors of long, 0 otherwise.
+#
+# This can change for different subtargets so do not cache the result.
+
+proc check_effective_target_vect_long { } {
+ if { [istarget i?86-*-*]
+ || (([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ && [check_effective_target_ilp32])
+ || [istarget x86_64-*-*]
+ || [check_effective_target_arm32]
+ || ([istarget sparc*-*-*] && [check_effective_target_ilp32]) } {
+ set answer 1
+ } else {
+ set answer 0
+ }
+
+ verbose "check_effective_target_vect_long: returning $answer" 2
+ return $answer
+}
+
+# Return 1 if the target supports hardware vectors of float, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_float { } {
+ global et_vect_float_saved
+
+ if [info exists et_vect_float_saved] {
+ verbose "check_effective_target_vect_float: using cached result" 2
+ } else {
+ set et_vect_float_saved 0
+ if { [istarget i?86-*-*]
+ || [istarget powerpc*-*-*]
+ || [istarget spu-*-*]
+ || [istarget mips-sde-elf]
+ || [istarget mipsisa64*-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
+ || [check_effective_target_arm32] } {
+ set et_vect_float_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_float: returning $et_vect_float_saved" 2
+ return $et_vect_float_saved
+}
+
+# Return 1 if the target supports hardware vectors of double, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_double { } {
+ global et_vect_double_saved
+
+ if [info exists et_vect_double_saved] {
+ verbose "check_effective_target_vect_double: using cached result" 2
+ } else {
+ set et_vect_double_saved 0
+ if { [istarget i?86-*-*]
+ || [istarget aarch64*-*-*]
+ || [istarget x86_64-*-*] } {
+ if { [check_no_compiler_messages vect_double assembly {
+ #ifdef __tune_atom__
+ # error No double vectorizer support.
+ #endif
+ }] } {
+ set et_vect_double_saved 1
+ } else {
+ set et_vect_double_saved 0
+ }
+ } elseif { [istarget spu-*-*] } {
+ set et_vect_double_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_double: returning $et_vect_double_saved" 2
+ return $et_vect_double_saved
+}
+
+# Return 1 if the target supports hardware vectors of long long, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_long_long { } {
+ global et_vect_long_long_saved
+
+ if [info exists et_vect_long_long_saved] {
+ verbose "check_effective_target_vect_long_long: using cached result" 2
+ } else {
+ set et_vect_long_long_saved 0
+ if { [istarget i?86-*-*]
+ || [istarget x86_64-*-*] } {
+ set et_vect_long_long_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_long_long: returning $et_vect_long_long_saved" 2
+ return $et_vect_long_long_saved
+}
+
+
+# Return 1 if the target plus current options does not support a vector
+# max instruction on "int", 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_no_int_max { } {
+ global et_vect_no_int_max_saved
+
+ if [info exists et_vect_no_int_max_saved] {
+ verbose "check_effective_target_vect_no_int_max: using cached result" 2
+ } else {
+ set et_vect_no_int_max_saved 0
+ if { [istarget sparc*-*-*]
+ || [istarget spu-*-*]
+ || [istarget alpha*-*-*]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_loongson]) } {
+ set et_vect_no_int_max_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_no_int_max: returning $et_vect_no_int_max_saved" 2
+ return $et_vect_no_int_max_saved
+}
+
+# Return 1 if the target plus current options does not support a vector
+# add instruction on "int", 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_no_int_add { } {
+ global et_vect_no_int_add_saved
+
+ if [info exists et_vect_no_int_add_saved] {
+ verbose "check_effective_target_vect_no_int_add: using cached result" 2
+ } else {
+ set et_vect_no_int_add_saved 0
+ # Alpha only supports vector add on V8QI and V4HI.
+ if { [istarget alpha*-*-*] } {
+ set et_vect_no_int_add_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_no_int_add: returning $et_vect_no_int_add_saved" 2
+ return $et_vect_no_int_add_saved
+}
+
+# Return 1 if the target plus current options does not support vector
+# bitwise instructions, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_no_bitwise { } {
+ global et_vect_no_bitwise_saved
+
+ if [info exists et_vect_no_bitwise_saved] {
+ verbose "check_effective_target_vect_no_bitwise: using cached result" 2
+ } else {
+ set et_vect_no_bitwise_saved 0
+ }
+ verbose "check_effective_target_vect_no_bitwise: returning $et_vect_no_bitwise_saved" 2
+ return $et_vect_no_bitwise_saved
+}
+
+# Return 1 if the target plus current options supports vector permutation,
+# 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_perm { } {
+ global et_vect_perm
+
+ if [info exists et_vect_perm_saved] {
+ verbose "check_effective_target_vect_perm: using cached result" 2
+ } else {
+ set et_vect_perm_saved 0
+ if { [is-effective-target arm_neon_ok]
+ || ([istarget aarch64*-*-*]
+ && [is-effective-target aarch64_little_endian])
+ || [istarget powerpc*-*-*]
+ || [istarget spu-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mpaired_single]) } {
+ set et_vect_perm_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_perm: returning $et_vect_perm_saved" 2
+ return $et_vect_perm_saved
+}
+
+# Return 1 if the target plus current options supports vector permutation
+# on byte-sized elements, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_perm_byte { } {
+ global et_vect_perm_byte
+
+ if [info exists et_vect_perm_byte_saved] {
+ verbose "check_effective_target_vect_perm_byte: using cached result" 2
+ } else {
+ set et_vect_perm_byte_saved 0
+ if { ([is-effective-target arm_neon_ok]
+ && [is-effective-target arm_little_endian])
+ || ([istarget aarch64*-*-*]
+ && [is-effective-target aarch64_little_endian])
+ || [istarget powerpc*-*-*]
+ || [istarget spu-*-*] } {
+ set et_vect_perm_byte_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_perm_byte: returning $et_vect_perm_byte_saved" 2
+ return $et_vect_perm_byte_saved
+}
+
+# Return 1 if the target plus current options supports vector permutation
+# on short-sized elements, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_perm_short { } {
+ global et_vect_perm_short
+
+ if [info exists et_vect_perm_short_saved] {
+ verbose "check_effective_target_vect_perm_short: using cached result" 2
+ } else {
+ set et_vect_perm_short_saved 0
+ if { ([is-effective-target arm_neon_ok]
+ && [is-effective-target arm_little_endian])
+ || ([istarget aarch64*-*-*]
+ && [is-effective-target aarch64_little_endian])
+ || [istarget powerpc*-*-*]
+ || [istarget spu-*-*] } {
+ set et_vect_perm_short_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_perm_short: returning $et_vect_perm_short_saved" 2
+ return $et_vect_perm_short_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# widening summation of *short* args into *int* result, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_widen_sum_hi_to_si_pattern { } {
+ global et_vect_widen_sum_hi_to_si_pattern
+
+ if [info exists et_vect_widen_sum_hi_to_si_pattern_saved] {
+ verbose "check_effective_target_vect_widen_sum_hi_to_si_pattern: using cached result" 2
+ } else {
+ set et_vect_widen_sum_hi_to_si_pattern_saved 0
+ if { [istarget powerpc*-*-*]
+ || [istarget ia64-*-*] } {
+ set et_vect_widen_sum_hi_to_si_pattern_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_widen_sum_hi_to_si_pattern: returning $et_vect_widen_sum_hi_to_si_pattern_saved" 2
+ return $et_vect_widen_sum_hi_to_si_pattern_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# widening summation of *short* args into *int* result, 0 otherwise.
+# A target can also support this widening summation if it can support
+# promotion (unpacking) from shorts to ints.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_widen_sum_hi_to_si { } {
+ global et_vect_widen_sum_hi_to_si
+
+ if [info exists et_vect_widen_sum_hi_to_si_saved] {
+ verbose "check_effective_target_vect_widen_sum_hi_to_si: using cached result" 2
+ } else {
+ set et_vect_widen_sum_hi_to_si_saved [check_effective_target_vect_unpack]
+ if { [istarget powerpc*-*-*]
+ || [istarget ia64-*-*] } {
+ set et_vect_widen_sum_hi_to_si_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_widen_sum_hi_to_si: returning $et_vect_widen_sum_hi_to_si_saved" 2
+ return $et_vect_widen_sum_hi_to_si_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# widening summation of *char* args into *short* result, 0 otherwise.
+# A target can also support this widening summation if it can support
+# promotion (unpacking) from chars to shorts.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_widen_sum_qi_to_hi { } {
+ global et_vect_widen_sum_qi_to_hi
+
+ if [info exists et_vect_widen_sum_qi_to_hi_saved] {
+ verbose "check_effective_target_vect_widen_sum_qi_to_hi: using cached result" 2
+ } else {
+ set et_vect_widen_sum_qi_to_hi_saved 0
+ if { [check_effective_target_vect_unpack]
+ || [check_effective_target_arm_neon_ok]
+ || [istarget ia64-*-*] } {
+ set et_vect_widen_sum_qi_to_hi_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_widen_sum_qi_to_hi: returning $et_vect_widen_sum_qi_to_hi_saved" 2
+ return $et_vect_widen_sum_qi_to_hi_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# widening summation of *char* args into *int* result, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_widen_sum_qi_to_si { } {
+ global et_vect_widen_sum_qi_to_si
+
+ if [info exists et_vect_widen_sum_qi_to_si_saved] {
+ verbose "check_effective_target_vect_widen_sum_qi_to_si: using cached result" 2
+ } else {
+ set et_vect_widen_sum_qi_to_si_saved 0
+ if { [istarget powerpc*-*-*] } {
+ set et_vect_widen_sum_qi_to_si_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_widen_sum_qi_to_si: returning $et_vect_widen_sum_qi_to_si_saved" 2
+ return $et_vect_widen_sum_qi_to_si_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# widening multiplication of *char* args into *short* result, 0 otherwise.
+# A target can also support this widening multplication if it can support
+# promotion (unpacking) from chars to shorts, and vect_short_mult (non-widening
+# multiplication of shorts).
+#
+# This won't change for different subtargets so cache the result.
+
+
+proc check_effective_target_vect_widen_mult_qi_to_hi { } {
+ global et_vect_widen_mult_qi_to_hi
+
+ if [info exists et_vect_widen_mult_qi_to_hi_saved] {
+ verbose "check_effective_target_vect_widen_mult_qi_to_hi: using cached result" 2
+ } else {
+ if { [check_effective_target_vect_unpack]
+ && [check_effective_target_vect_short_mult] } {
+ set et_vect_widen_mult_qi_to_hi_saved 1
+ } else {
+ set et_vect_widen_mult_qi_to_hi_saved 0
+ }
+ if { [istarget powerpc*-*-*]
+ || [istarget aarch64*-*-*]
+ || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]) } {
+ set et_vect_widen_mult_qi_to_hi_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_widen_mult_qi_to_hi: returning $et_vect_widen_mult_qi_to_hi_saved" 2
+ return $et_vect_widen_mult_qi_to_hi_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# widening multiplication of *short* args into *int* result, 0 otherwise.
+# A target can also support this widening multplication if it can support
+# promotion (unpacking) from shorts to ints, and vect_int_mult (non-widening
+# multiplication of ints).
+#
+# This won't change for different subtargets so cache the result.
+
+
+proc check_effective_target_vect_widen_mult_hi_to_si { } {
+ global et_vect_widen_mult_hi_to_si
+
+ if [info exists et_vect_widen_mult_hi_to_si_saved] {
+ verbose "check_effective_target_vect_widen_mult_hi_to_si: using cached result" 2
+ } else {
+ if { [check_effective_target_vect_unpack]
+ && [check_effective_target_vect_int_mult] } {
+ set et_vect_widen_mult_hi_to_si_saved 1
+ } else {
+ set et_vect_widen_mult_hi_to_si_saved 0
+ }
+ if { [istarget powerpc*-*-*]
+ || [istarget spu-*-*]
+ || [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]) } {
+ set et_vect_widen_mult_hi_to_si_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_widen_mult_hi_to_si: returning $et_vect_widen_mult_hi_to_si_saved" 2
+ return $et_vect_widen_mult_hi_to_si_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# widening multiplication of *char* args into *short* result, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_widen_mult_qi_to_hi_pattern { } {
+ global et_vect_widen_mult_qi_to_hi_pattern
+
+ if [info exists et_vect_widen_mult_qi_to_hi_pattern_saved] {
+ verbose "check_effective_target_vect_widen_mult_qi_to_hi_pattern: using cached result" 2
+ } else {
+ set et_vect_widen_mult_qi_to_hi_pattern_saved 0
+ if { [istarget powerpc*-*-*]
+ || ([istarget arm*-*-*]
+ && [check_effective_target_arm_neon_ok]
+ && [check_effective_target_arm_little_endian]) } {
+ set et_vect_widen_mult_qi_to_hi_pattern_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_widen_mult_qi_to_hi_pattern: returning $et_vect_widen_mult_qi_to_hi_pattern_saved" 2
+ return $et_vect_widen_mult_qi_to_hi_pattern_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# widening multiplication of *short* args into *int* result, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_widen_mult_hi_to_si_pattern { } {
+ global et_vect_widen_mult_hi_to_si_pattern
+
+ if [info exists et_vect_widen_mult_hi_to_si_pattern_saved] {
+ verbose "check_effective_target_vect_widen_mult_hi_to_si_pattern: using cached result" 2
+ } else {
+ set et_vect_widen_mult_hi_to_si_pattern_saved 0
+ if { [istarget powerpc*-*-*]
+ || [istarget spu-*-*]
+ || [istarget ia64-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || ([istarget arm*-*-*]
+ && [check_effective_target_arm_neon_ok]
+ && [check_effective_target_arm_little_endian]) } {
+ set et_vect_widen_mult_hi_to_si_pattern_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_widen_mult_hi_to_si_pattern: returning $et_vect_widen_mult_hi_to_si_pattern_saved" 2
+ return $et_vect_widen_mult_hi_to_si_pattern_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# widening multiplication of *int* args into *long* result, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_widen_mult_si_to_di_pattern { } {
+ global et_vect_widen_mult_si_to_di_pattern
+
+ if [info exists et_vect_widen_mult_si_to_di_pattern_saved] {
+ verbose "check_effective_target_vect_widen_mult_si_to_di_pattern: using cached result" 2
+ } else {
+ set et_vect_widen_mult_si_to_di_pattern_saved 0
+ if {[istarget ia64-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*] } {
+ set et_vect_widen_mult_si_to_di_pattern_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_widen_mult_si_to_di_pattern: returning $et_vect_widen_mult_si_to_di_pattern_saved" 2
+ return $et_vect_widen_mult_si_to_di_pattern_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# widening shift, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_widen_shift { } {
+ global et_vect_widen_shift_saved
+
+ if [info exists et_vect_shift_saved] {
+ verbose "check_effective_target_vect_widen_shift: using cached result" 2
+ } else {
+ set et_vect_widen_shift_saved 0
+ if { ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]) } {
+ set et_vect_widen_shift_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_widen_shift: returning $et_vect_widen_shift_saved" 2
+ return $et_vect_widen_shift_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# dot-product of signed chars, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_sdot_qi { } {
+ global et_vect_sdot_qi
+
+ if [info exists et_vect_sdot_qi_saved] {
+ verbose "check_effective_target_vect_sdot_qi: using cached result" 2
+ } else {
+ set et_vect_sdot_qi_saved 0
+ if { [istarget ia64-*-*] } {
+ set et_vect_udot_qi_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_sdot_qi: returning $et_vect_sdot_qi_saved" 2
+ return $et_vect_sdot_qi_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# dot-product of unsigned chars, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_udot_qi { } {
+ global et_vect_udot_qi
+
+ if [info exists et_vect_udot_qi_saved] {
+ verbose "check_effective_target_vect_udot_qi: using cached result" 2
+ } else {
+ set et_vect_udot_qi_saved 0
+ if { [istarget powerpc*-*-*]
+ || [istarget ia64-*-*] } {
+ set et_vect_udot_qi_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_udot_qi: returning $et_vect_udot_qi_saved" 2
+ return $et_vect_udot_qi_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# dot-product of signed shorts, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_sdot_hi { } {
+ global et_vect_sdot_hi
+
+ if [info exists et_vect_sdot_hi_saved] {
+ verbose "check_effective_target_vect_sdot_hi: using cached result" 2
+ } else {
+ set et_vect_sdot_hi_saved 0
+ if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
+ || [istarget ia64-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*] } {
+ set et_vect_sdot_hi_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_sdot_hi: returning $et_vect_sdot_hi_saved" 2
+ return $et_vect_sdot_hi_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# dot-product of unsigned shorts, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_udot_hi { } {
+ global et_vect_udot_hi
+
+ if [info exists et_vect_udot_hi_saved] {
+ verbose "check_effective_target_vect_udot_hi: using cached result" 2
+ } else {
+ set et_vect_udot_hi_saved 0
+ if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*]) } {
+ set et_vect_udot_hi_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_udot_hi: returning $et_vect_udot_hi_saved" 2
+ return $et_vect_udot_hi_saved
+}
+
+
+# Return 1 if the target plus current options supports a vector
+# demotion (packing) of shorts (to chars) and ints (to shorts)
+# using modulo arithmetic, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_pack_trunc { } {
+ global et_vect_pack_trunc
+
+ if [info exists et_vect_pack_trunc_saved] {
+ verbose "check_effective_target_vect_pack_trunc: using cached result" 2
+ } else {
+ set et_vect_pack_trunc_saved 0
+ if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*]
+ || [istarget spu-*-*]
+ || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]
+ && [check_effective_target_arm_little_endian]) } {
+ set et_vect_pack_trunc_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_pack_trunc: returning $et_vect_pack_trunc_saved" 2
+ return $et_vect_pack_trunc_saved
+}
+
+# Return 1 if the target plus current options supports a vector
+# promotion (unpacking) of chars (to shorts) and shorts (to ints), 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_unpack { } {
+ global et_vect_unpack
+
+ if [info exists et_vect_unpack_saved] {
+ verbose "check_effective_target_vect_unpack: using cached result" 2
+ } else {
+ set et_vect_unpack_saved 0
+ if { ([istarget powerpc*-*-*] && ![istarget powerpc-*paired*])
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget spu-*-*]
+ || [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
+ || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]
+ && [check_effective_target_arm_little_endian]) } {
+ set et_vect_unpack_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_unpack: returning $et_vect_unpack_saved" 2
+ return $et_vect_unpack_saved
+}
+
+# Return 1 if the target plus current options does not guarantee
+# that its STACK_BOUNDARY is >= the reguired vector alignment.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_unaligned_stack { } {
+ global et_unaligned_stack_saved
+
+ if [info exists et_unaligned_stack_saved] {
+ verbose "check_effective_target_unaligned_stack: using cached result" 2
+ } else {
+ set et_unaligned_stack_saved 0
+ }
+ verbose "check_effective_target_unaligned_stack: returning $et_unaligned_stack_saved" 2
+ return $et_unaligned_stack_saved
+}
+
+# Return 1 if the target plus current options does not support a vector
+# alignment mechanism, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_no_align { } {
+ global et_vect_no_align_saved
+
+ if [info exists et_vect_no_align_saved] {
+ verbose "check_effective_target_vect_no_align: using cached result" 2
+ } else {
+ set et_vect_no_align_saved 0
+ if { [istarget mipsisa64*-*-*]
+ || [istarget mips-sde-elf]
+ || [istarget sparc*-*-*]
+ || [istarget ia64-*-*]
+ || [check_effective_target_arm_vect_no_misalign]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_loongson]) } {
+ set et_vect_no_align_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_no_align: returning $et_vect_no_align_saved" 2
+ return $et_vect_no_align_saved
+}
+
+# Return 1 if the target supports a vector misalign access, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_hw_misalign { } {
+ global et_vect_hw_misalign_saved
+
+ if [info exists et_vect_hw_misalign_saved] {
+ verbose "check_effective_target_vect_hw_misalign: using cached result" 2
+ } else {
+ set et_vect_hw_misalign_saved 0
+ if { ([istarget x86_64-*-*]
+ || [istarget aarch64*-*-*]
+ || [istarget i?86-*-*]) } {
+ set et_vect_hw_misalign_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_hw_misalign: returning $et_vect_hw_misalign_saved" 2
+ return $et_vect_hw_misalign_saved
+}
+
+
+# Return 1 if arrays are aligned to the vector alignment
+# boundary, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_aligned_arrays { } {
+ global et_vect_aligned_arrays
+
+ if [info exists et_vect_aligned_arrays_saved] {
+ verbose "check_effective_target_vect_aligned_arrays: using cached result" 2
+ } else {
+ set et_vect_aligned_arrays_saved 0
+ if { ([istarget x86_64-*-*] || [istarget i?86-*-*]) } {
+ if { ([is-effective-target lp64]
+ && ( ![check_avx_available]
+ || [check_prefer_avx128])) } {
+ set et_vect_aligned_arrays_saved 1
+ }
+ }
+ if [istarget spu-*-*] {
+ set et_vect_aligned_arrays_saved 1
+ }
+ }
+ verbose "check_effective_target_vect_aligned_arrays: returning $et_vect_aligned_arrays_saved" 2
+ return $et_vect_aligned_arrays_saved
+}
+
+# Return 1 if types of size 32 bit or less are naturally aligned
+# (aligned to their type-size), 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_natural_alignment_32 { } {
+ global et_natural_alignment_32
+
+ if [info exists et_natural_alignment_32_saved] {
+ verbose "check_effective_target_natural_alignment_32: using cached result" 2
+ } else {
+ # FIXME: 32bit powerpc: guaranteed only if MASK_ALIGN_NATURAL/POWER.
+ set et_natural_alignment_32_saved 1
+ if { ([istarget *-*-darwin*] && [is-effective-target lp64]) } {
+ set et_natural_alignment_32_saved 0
+ }
+ }
+ verbose "check_effective_target_natural_alignment_32: returning $et_natural_alignment_32_saved" 2
+ return $et_natural_alignment_32_saved
+}
+
+# Return 1 if types of size 64 bit or less are naturally aligned (aligned to their
+# type-size), 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_natural_alignment_64 { } {
+ global et_natural_alignment_64
+
+ if [info exists et_natural_alignment_64_saved] {
+ verbose "check_effective_target_natural_alignment_64: using cached result" 2
+ } else {
+ set et_natural_alignment_64_saved 0
+ if { ([is-effective-target lp64] && ![istarget *-*-darwin*])
+ || [istarget spu-*-*] } {
+ set et_natural_alignment_64_saved 1
+ }
+ }
+ verbose "check_effective_target_natural_alignment_64: returning $et_natural_alignment_64_saved" 2
+ return $et_natural_alignment_64_saved
+}
+
+# Return 1 if all vector types are naturally aligned (aligned to their
+# type-size), 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vect_natural_alignment { } {
+ global et_vect_natural_alignment
+
+ if [info exists et_vect_natural_alignment_saved] {
+ verbose "check_effective_target_vect_natural_alignment: using cached result" 2
+ } else {
+ set et_vect_natural_alignment_saved 1
+ if { [check_effective_target_arm_eabi] } {
+ set et_vect_natural_alignment_saved 0
+ }
+ }
+ verbose "check_effective_target_vect_natural_alignment: returning $et_vect_natural_alignment_saved" 2
+ return $et_vect_natural_alignment_saved
+}
+
+# Return 1 if vector alignment (for types of size 32 bit or less) is reachable, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vector_alignment_reachable { } {
+ global et_vector_alignment_reachable
+
+ if [info exists et_vector_alignment_reachable_saved] {
+ verbose "check_effective_target_vector_alignment_reachable: using cached result" 2
+ } else {
+ if { [check_effective_target_vect_aligned_arrays]
+ || [check_effective_target_natural_alignment_32] } {
+ set et_vector_alignment_reachable_saved 1
+ } else {
+ set et_vector_alignment_reachable_saved 0
+ }
+ }
+ verbose "check_effective_target_vector_alignment_reachable: returning $et_vector_alignment_reachable_saved" 2
+ return $et_vector_alignment_reachable_saved
+}
+
+# Return 1 if vector alignment for 64 bit is reachable, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+proc check_effective_target_vector_alignment_reachable_for_64bit { } {
+ global et_vector_alignment_reachable_for_64bit
+
+ if [info exists et_vector_alignment_reachable_for_64bit_saved] {
+ verbose "check_effective_target_vector_alignment_reachable_for_64bit: using cached result" 2
+ } else {
+ if { [check_effective_target_vect_aligned_arrays]
+ || [check_effective_target_natural_alignment_64] } {
+ set et_vector_alignment_reachable_for_64bit_saved 1
+ } else {
+ set et_vector_alignment_reachable_for_64bit_saved 0
+ }
+ }
+ verbose "check_effective_target_vector_alignment_reachable_for_64bit: returning $et_vector_alignment_reachable_for_64bit_saved" 2
+ return $et_vector_alignment_reachable_for_64bit_saved
+}
+
+# Return 1 if the target only requires element alignment for vector accesses
+
+proc check_effective_target_vect_element_align { } {
+ global et_vect_element_align
+
+ if [info exists et_vect_element_align] {
+ verbose "check_effective_target_vect_element_align: using cached result" 2
+ } else {
+ set et_vect_element_align 0
+ if { ([istarget arm*-*-*]
+ && ![check_effective_target_arm_vect_no_misalign])
+ || [check_effective_target_vect_hw_misalign] } {
+ set et_vect_element_align 1
+ }
+ }
+
+ verbose "check_effective_target_vect_element_align: returning $et_vect_element_align" 2
+ return $et_vect_element_align
+}
+
+# Return 1 if the target supports vector conditional operations, 0 otherwise.
+
+proc check_effective_target_vect_condition { } {
+ global et_vect_cond_saved
+
+ if [info exists et_vect_cond_saved] {
+ verbose "check_effective_target_vect_cond: using cached result" 2
+ } else {
+ set et_vect_cond_saved 0
+ if { [istarget aarch64*-*-*]
+ || [istarget powerpc*-*-*]
+ || [istarget ia64-*-*]
+ || [istarget i?86-*-*]
+ || [istarget spu-*-*]
+ || [istarget x86_64-*-*]
+ || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]) } {
+ set et_vect_cond_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_cond: returning $et_vect_cond_saved" 2
+ return $et_vect_cond_saved
+}
+
+# Return 1 if the target supports vector conditional operations where
+# the comparison has different type from the lhs, 0 otherwise.
+
+proc check_effective_target_vect_cond_mixed { } {
+ global et_vect_cond_mixed_saved
+
+ if [info exists et_vect_cond_mixed_saved] {
+ verbose "check_effective_target_vect_cond_mixed: using cached result" 2
+ } else {
+ set et_vect_cond_mixed_saved 0
+ if { [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget powerpc*-*-*] } {
+ set et_vect_cond_mixed_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_cond_mixed: returning $et_vect_cond_mixed_saved" 2
+ return $et_vect_cond_mixed_saved
+}
+
+# Return 1 if the target supports vector char multiplication, 0 otherwise.
+
+proc check_effective_target_vect_char_mult { } {
+ global et_vect_char_mult_saved
+
+ if [info exists et_vect_char_mult_saved] {
+ verbose "check_effective_target_vect_char_mult: using cached result" 2
+ } else {
+ set et_vect_char_mult_saved 0
+ if { [istarget aarch64*-*-*]
+ || [istarget ia64-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [check_effective_target_arm32] } {
+ set et_vect_char_mult_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_char_mult: returning $et_vect_char_mult_saved" 2
+ return $et_vect_char_mult_saved
+}
+
+# Return 1 if the target supports vector short multiplication, 0 otherwise.
+
+proc check_effective_target_vect_short_mult { } {
+ global et_vect_short_mult_saved
+
+ if [info exists et_vect_short_mult_saved] {
+ verbose "check_effective_target_vect_short_mult: using cached result" 2
+ } else {
+ set et_vect_short_mult_saved 0
+ if { [istarget ia64-*-*]
+ || [istarget spu-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget powerpc*-*-*]
+ || [istarget aarch64*-*-*]
+ || [check_effective_target_arm32]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mips_loongson]) } {
+ set et_vect_short_mult_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_short_mult: returning $et_vect_short_mult_saved" 2
+ return $et_vect_short_mult_saved
+}
+
+# Return 1 if the target supports vector int multiplication, 0 otherwise.
+
+proc check_effective_target_vect_int_mult { } {
+ global et_vect_int_mult_saved
+
+ if [info exists et_vect_int_mult_saved] {
+ verbose "check_effective_target_vect_int_mult: using cached result" 2
+ } else {
+ set et_vect_int_mult_saved 0
+ if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
+ || [istarget spu-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
+ || [check_effective_target_arm32] } {
+ set et_vect_int_mult_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_int_mult: returning $et_vect_int_mult_saved" 2
+ return $et_vect_int_mult_saved
+}
+
+# Return 1 if the target supports vector even/odd elements extraction, 0 otherwise.
+
+proc check_effective_target_vect_extract_even_odd { } {
+ global et_vect_extract_even_odd_saved
+
+ if [info exists et_vect_extract_even_odd_saved] {
+ verbose "check_effective_target_vect_extract_even_odd: using cached result" 2
+ } else {
+ set et_vect_extract_even_odd_saved 0
+ if { [istarget aarch64*-*-*]
+ || [istarget powerpc*-*-*]
+ || [is-effective-target arm_neon_ok]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget ia64-*-*]
+ || [istarget spu-*-*]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mpaired_single]) } {
+ set et_vect_extract_even_odd_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_extract_even_odd: returning $et_vect_extract_even_odd_saved" 2
+ return $et_vect_extract_even_odd_saved
+}
+
+# Return 1 if the target supports vector interleaving, 0 otherwise.
+
+proc check_effective_target_vect_interleave { } {
+ global et_vect_interleave_saved
+
+ if [info exists et_vect_interleave_saved] {
+ verbose "check_effective_target_vect_interleave: using cached result" 2
+ } else {
+ set et_vect_interleave_saved 0
+ if { [istarget aarch64*-*-*]
+ || [istarget powerpc*-*-*]
+ || [is-effective-target arm_neon_ok]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget ia64-*-*]
+ || [istarget spu-*-*]
+ || ([istarget mips*-*-*]
+ && [check_effective_target_mpaired_single]) } {
+ set et_vect_interleave_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_interleave: returning $et_vect_interleave_saved" 2
+ return $et_vect_interleave_saved
+}
+
+foreach N {2 3 4 8} {
+ eval [string map [list N $N] {
+ # Return 1 if the target supports 2-vector interleaving
+ proc check_effective_target_vect_stridedN { } {
+ global et_vect_stridedN_saved
+
+ if [info exists et_vect_stridedN_saved] {
+ verbose "check_effective_target_vect_stridedN: using cached result" 2
+ } else {
+ set et_vect_stridedN_saved 0
+ if { (N & -N) == N
+ && [check_effective_target_vect_interleave]
+ && [check_effective_target_vect_extract_even_odd] } {
+ set et_vect_stridedN_saved 1
+ }
+ if { ([istarget arm*-*-*]
+ || [istarget aarch64*-*-*]) && N >= 2 && N <= 4 } {
+ set et_vect_stridedN_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_stridedN: returning $et_vect_stridedN_saved" 2
+ return $et_vect_stridedN_saved
+ }
+ }]
+}
+
+# Return 1 if the target supports multiple vector sizes
+
+proc check_effective_target_vect_multiple_sizes { } {
+ global et_vect_multiple_sizes_saved
+
+ set et_vect_multiple_sizes_saved 0
+ if { ([istarget aarch64*-*-*]
+ || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok])) } {
+ set et_vect_multiple_sizes_saved 1
+ }
+ if { ([istarget x86_64-*-*] || [istarget i?86-*-*]) } {
+ if { ([check_avx_available] && ![check_prefer_avx128]) } {
+ set et_vect_multiple_sizes_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_multiple_sizes: returning $et_vect_multiple_sizes_saved" 2
+ return $et_vect_multiple_sizes_saved
+}
+
+# Return 1 if the target supports vectors of 64 bits.
+
+proc check_effective_target_vect64 { } {
+ global et_vect64_saved
+
+ if [info exists et_vect64_saved] {
+ verbose "check_effective_target_vect64: using cached result" 2
+ } else {
+ set et_vect64_saved 0
+ if { ([istarget arm*-*-*]
+ && [check_effective_target_arm_neon_ok]
+ && [check_effective_target_arm_little_endian]) } {
+ set et_vect64_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect64: returning $et_vect64_saved" 2
+ return $et_vect64_saved
+}
+
+# Return 1 if the target supports vector copysignf calls.
+
+proc check_effective_target_vect_call_copysignf { } {
+ global et_vect_call_copysignf_saved
+
+ if [info exists et_vect_call_copysignf_saved] {
+ verbose "check_effective_target_vect_call_copysignf: using cached result" 2
+ } else {
+ set et_vect_call_copysignf_saved 0
+ if { [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget powerpc*-*-*] } {
+ set et_vect_call_copysignf_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_copysignf: returning $et_vect_call_copysignf_saved" 2
+ return $et_vect_call_copysignf_saved
+}
+
+# Return 1 if the target supports vector sqrtf calls.
+
+proc check_effective_target_vect_call_sqrtf { } {
+ global et_vect_call_sqrtf_saved
+
+ if [info exists et_vect_call_sqrtf_saved] {
+ verbose "check_effective_target_vect_call_sqrtf: using cached result" 2
+ } else {
+ set et_vect_call_sqrtf_saved 0
+ if { [istarget aarch64*-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || ([istarget powerpc*-*-*] && [check_vsx_hw_available]) } {
+ set et_vect_call_sqrtf_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_sqrtf: returning $et_vect_call_sqrtf_saved" 2
+ return $et_vect_call_sqrtf_saved
+}
+
+# Return 1 if the target supports vector lrint calls.
+
+proc check_effective_target_vect_call_lrint { } {
+ set et_vect_call_lrint 0
+ if { ([istarget i?86-*-*] || [istarget x86_64-*-*]) && [check_effective_target_ilp32] } {
+ set et_vect_call_lrint 1
+ }
+
+ verbose "check_effective_target_vect_call_lrint: returning $et_vect_call_lrint" 2
+ return $et_vect_call_lrint
+}
+
+# Return 1 if the target supports vector btrunc calls.
+
+proc check_effective_target_vect_call_btrunc { } {
+ global et_vect_call_btrunc_saved
+
+ if [info exists et_vect_call_btrunc_saved] {
+ verbose "check_effective_target_vect_call_btrunc: using cached result" 2
+ } else {
+ set et_vect_call_btrunc_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_btrunc_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_btrunc: returning $et_vect_call_btrunc_saved" 2
+ return $et_vect_call_btrunc_saved
+}
+
+# Return 1 if the target supports vector btruncf calls.
+
+proc check_effective_target_vect_call_btruncf { } {
+ global et_vect_call_btruncf_saved
+
+ if [info exists et_vect_call_btruncf_saved] {
+ verbose "check_effective_target_vect_call_btruncf: using cached result" 2
+ } else {
+ set et_vect_call_btruncf_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_btruncf_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_btruncf: returning $et_vect_call_btruncf_saved" 2
+ return $et_vect_call_btruncf_saved
+}
+
+# Return 1 if the target supports vector ceil calls.
+
+proc check_effective_target_vect_call_ceil { } {
+ global et_vect_call_ceil_saved
+
+ if [info exists et_vect_call_ceil_saved] {
+ verbose "check_effective_target_vect_call_ceil: using cached result" 2
+ } else {
+ set et_vect_call_ceil_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_ceil_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_ceil: returning $et_vect_call_ceil_saved" 2
+ return $et_vect_call_ceil_saved
+}
+
+# Return 1 if the target supports vector ceilf calls.
+
+proc check_effective_target_vect_call_ceilf { } {
+ global et_vect_call_ceilf_saved
+
+ if [info exists et_vect_call_ceilf_saved] {
+ verbose "check_effective_target_vect_call_ceilf: using cached result" 2
+ } else {
+ set et_vect_call_ceilf_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_ceilf_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_ceilf: returning $et_vect_call_ceilf_saved" 2
+ return $et_vect_call_ceilf_saved
+}
+
+# Return 1 if the target supports vector floor calls.
+
+proc check_effective_target_vect_call_floor { } {
+ global et_vect_call_floor_saved
+
+ if [info exists et_vect_call_floor_saved] {
+ verbose "check_effective_target_vect_call_floor: using cached result" 2
+ } else {
+ set et_vect_call_floor_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_floor_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_floor: returning $et_vect_call_floor_saved" 2
+ return $et_vect_call_floor_saved
+}
+
+# Return 1 if the target supports vector floorf calls.
+
+proc check_effective_target_vect_call_floorf { } {
+ global et_vect_call_floorf_saved
+
+ if [info exists et_vect_call_floorf_saved] {
+ verbose "check_effective_target_vect_call_floorf: using cached result" 2
+ } else {
+ set et_vect_call_floorf_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_floorf_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_floorf: returning $et_vect_call_floorf_saved" 2
+ return $et_vect_call_floorf_saved
+}
+
+# Return 1 if the target supports vector lceil calls.
+
+proc check_effective_target_vect_call_lceil { } {
+ global et_vect_call_lceil_saved
+
+ if [info exists et_vect_call_lceil_saved] {
+ verbose "check_effective_target_vect_call_lceil: using cached result" 2
+ } else {
+ set et_vect_call_lceil_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_lceil_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_lceil: returning $et_vect_call_lceil_saved" 2
+ return $et_vect_call_lceil_saved
+}
+
+# Return 1 if the target supports vector lfloor calls.
+
+proc check_effective_target_vect_call_lfloor { } {
+ global et_vect_call_lfloor_saved
+
+ if [info exists et_vect_call_lfloor_saved] {
+ verbose "check_effective_target_vect_call_lfloor: using cached result" 2
+ } else {
+ set et_vect_call_lfloor_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_lfloor_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_lfloor: returning $et_vect_call_lfloor_saved" 2
+ return $et_vect_call_lfloor_saved
+}
+
+# Return 1 if the target supports vector nearbyint calls.
+
+proc check_effective_target_vect_call_nearbyint { } {
+ global et_vect_call_nearbyint_saved
+
+ if [info exists et_vect_call_nearbyint_saved] {
+ verbose "check_effective_target_vect_call_nearbyint: using cached result" 2
+ } else {
+ set et_vect_call_nearbyint_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_nearbyint_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_nearbyint: returning $et_vect_call_nearbyint_saved" 2
+ return $et_vect_call_nearbyint_saved
+}
+
+# Return 1 if the target supports vector nearbyintf calls.
+
+proc check_effective_target_vect_call_nearbyintf { } {
+ global et_vect_call_nearbyintf_saved
+
+ if [info exists et_vect_call_nearbyintf_saved] {
+ verbose "check_effective_target_vect_call_nearbyintf: using cached result" 2
+ } else {
+ set et_vect_call_nearbyintf_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_nearbyintf_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_nearbyintf: returning $et_vect_call_nearbyintf_saved" 2
+ return $et_vect_call_nearbyintf_saved
+}
+
+# Return 1 if the target supports vector round calls.
+
+proc check_effective_target_vect_call_round { } {
+ global et_vect_call_round_saved
+
+ if [info exists et_vect_call_round_saved] {
+ verbose "check_effective_target_vect_call_round: using cached result" 2
+ } else {
+ set et_vect_call_round_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_round_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_round: returning $et_vect_call_round_saved" 2
+ return $et_vect_call_round_saved
+}
+
+# Return 1 if the target supports vector roundf calls.
+
+proc check_effective_target_vect_call_roundf { } {
+ global et_vect_call_roundf_saved
+
+ if [info exists et_vect_call_roundf_saved] {
+ verbose "check_effective_target_vect_call_roundf: using cached result" 2
+ } else {
+ set et_vect_call_roundf_saved 0
+ if { [istarget aarch64*-*-*] } {
+ set et_vect_call_roundf_saved 1
+ }
+ }
+
+ verbose "check_effective_target_vect_call_roundf: returning $et_vect_call_roundf_saved" 2
+ return $et_vect_call_roundf_saved
+}
+
+# Return 1 if the target supports section-anchors
+
+proc check_effective_target_section_anchors { } {
+ global et_section_anchors_saved
+
+ if [info exists et_section_anchors_saved] {
+ verbose "check_effective_target_section_anchors: using cached result" 2
+ } else {
+ set et_section_anchors_saved 0
+ if { [istarget powerpc*-*-*]
+ || [istarget arm*-*-*] } {
+ set et_section_anchors_saved 1
+ }
+ }
+
+ verbose "check_effective_target_section_anchors: returning $et_section_anchors_saved" 2
+ return $et_section_anchors_saved
+}
+
+# Return 1 if the target supports atomic operations on "int_128" values.
+
+proc check_effective_target_sync_int_128 { } {
+ if { ([istarget x86_64-*-*] || [istarget i?86-*-*])
+ && ![is-effective-target ia32] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if the target supports atomic operations on "int_128" values
+# and can execute them.
+
+proc check_effective_target_sync_int_128_runtime { } {
+ if { ([istarget x86_64-*-*] || [istarget i?86-*-*])
+ && ![is-effective-target ia32] } {
+ return [check_cached_effective_target sync_int_128_available {
+ check_runtime_nocache sync_int_128_available {
+ #include "cpuid.h"
+ int main ()
+ {
+ unsigned int eax, ebx, ecx, edx;
+ if (__get_cpuid (1, &eax, &ebx, &ecx, &edx))
+ return !(ecx & bit_CMPXCHG16B);
+ return 1;
+ }
+ } ""
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if the target supports atomic operations on "long long".
+#
+# Note: 32bit x86 targets require -march=pentium in dg-options.
+
+proc check_effective_target_sync_long_long { } {
+ if { [istarget x86_64-*-*]
+ || [istarget i?86-*-*])
+ || [istarget aarch64*-*-*]
+ || [istarget arm*-*-*]
+ || [istarget alpha*-*-*]
+ || ([istarget sparc*-*-*] && [check_effective_target_lp64]) } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if the target supports atomic operations on "long long"
+# and can execute them.
+#
+# Note: 32bit x86 targets require -march=pentium in dg-options.
+
+proc check_effective_target_sync_long_long_runtime { } {
+ if { [istarget x86_64-*-*]
+ || [istarget i?86-*-*] } {
+ return [check_cached_effective_target sync_long_long_available {
+ check_runtime_nocache sync_long_long_available {
+ #include "cpuid.h"
+ int main ()
+ {
+ unsigned int eax, ebx, ecx, edx;
+ if (__get_cpuid (1, &eax, &ebx, &ecx, &edx))
+ return !(edx & bit_CMPXCHG8B);
+ return 1;
+ }
+ } ""
+ }]
+ } elseif { [istarget aarch64*-*-*] } {
+ return 1
+ } elseif { [istarget arm*-*-linux-*] } {
+ return [check_runtime sync_longlong_runtime {
+ #include <stdlib.h>
+ int main ()
+ {
+ long long l1;
+
+ if (sizeof (long long) != 8)
+ exit (1);
+
+ /* Just check for native; checking for kernel fallback is tricky. */
+ asm volatile ("ldrexd r0,r1, [%0]" : : "r" (&l1) : "r0", "r1");
+
+ exit (0);
+ }
+ } "" ]
+ } elseif { [istarget alpha*-*-*] } {
+ return 1
+ } elseif { ([istarget sparc*-*-*]
+ && [check_effective_target_lp64]
+ && [check_effective_target_ultrasparc_hw]) } {
+ return 1
+ } elseif { [istarget powerpc*-*-*] && [check_effective_target_lp64] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if the target supports atomic operations on "int" and "long".
+
+proc check_effective_target_sync_int_long { } {
+ global et_sync_int_long_saved
+
+ if [info exists et_sync_int_long_saved] {
+ verbose "check_effective_target_sync_int_long: using cached result" 2
+ } else {
+ set et_sync_int_long_saved 0
+# This is intentionally powerpc but not rs6000, rs6000 doesn't have the
+# load-reserved/store-conditional instructions.
+ if { [istarget ia64-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*]
+ || [istarget alpha*-*-*]
+ || [istarget arm*-*-linux-*]
+ || [istarget bfin*-*linux*]
+ || [istarget hppa*-*linux*]
+ || [istarget s390*-*-*]
+ || [istarget powerpc*-*-*]
+ || [istarget crisv32-*-*] || [istarget cris-*-*]
+ || ([istarget sparc*-*-*] && [check_effective_target_sparc_v9])
+ || [check_effective_target_mips_llsc] } {
+ set et_sync_int_long_saved 1
+ }
+ }
+
+ verbose "check_effective_target_sync_int_long: returning $et_sync_int_long_saved" 2
+ return $et_sync_int_long_saved
+}
+
+# Return 1 if the target supports atomic operations on "char" and "short".
+
+proc check_effective_target_sync_char_short { } {
+ global et_sync_char_short_saved
+
+ if [info exists et_sync_char_short_saved] {
+ verbose "check_effective_target_sync_char_short: using cached result" 2
+ } else {
+ set et_sync_char_short_saved 0
+# This is intentionally powerpc but not rs6000, rs6000 doesn't have the
+# load-reserved/store-conditional instructions.
+ if { [istarget aarch64*-*-*]
+ || [istarget ia64-*-*]
+ || [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || [istarget alpha*-*-*]
+ || [istarget arm*-*-linux-*]
+ || [istarget hppa*-*linux*]
+ || [istarget s390*-*-*]
+ || [istarget powerpc*-*-*]
+ || [istarget crisv32-*-*] || [istarget cris-*-*]
+ || ([istarget sparc*-*-*] && [check_effective_target_sparc_v9])
+ || [check_effective_target_mips_llsc] } {
+ set et_sync_char_short_saved 1
+ }
+ }
+
+ verbose "check_effective_target_sync_char_short: returning $et_sync_char_short_saved" 2
+ return $et_sync_char_short_saved
+}
+
+# Return 1 if the target uses a ColdFire FPU.
+
+proc check_effective_target_coldfire_fpu { } {
+ return [check_no_compiler_messages coldfire_fpu assembly {
+ #ifndef __mcffpu__
+ #error FOO
+ #endif
+ }]
+}
+
+# Return true if this is a uClibc target.
+
+proc check_effective_target_uclibc {} {
+ return [check_no_compiler_messages uclibc object {
+ #include <features.h>
+ #if !defined (__UCLIBC__)
+ #error FOO
+ #endif
+ }]
+}
+
+# Return true if this is a uclibc target and if the uclibc feature
+# described by __$feature__ is not present.
+
+proc check_missing_uclibc_feature {feature} {
+ return [check_no_compiler_messages $feature object "
+ #include <features.h>
+ #if !defined (__UCLIBC) || defined (__${feature}__)
+ #error FOO
+ #endif
+ "]
+}
+
+# Return true if this is a Newlib target.
+
+proc check_effective_target_newlib {} {
+ return [check_no_compiler_messages newlib object {
+ #include <newlib.h>
+ }]
+}
+
+# Return true if this is NOT a Bionic target.
+
+proc check_effective_target_non_bionic {} {
+ return [check_no_compiler_messages non_bionic object {
+ #include <ctype.h>
+ #if defined (__BIONIC__)
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 if
+# (a) an error of a few ULP is expected in string to floating-point
+# conversion functions; and
+# (b) overflow is not always detected correctly by those functions.
+
+proc check_effective_target_lax_strtofp {} {
+ # By default, assume that all uClibc targets suffer from this.
+ return [check_effective_target_uclibc]
+}
+
+# Return 1 if this is a target for which wcsftime is a dummy
+# function that always returns 0.
+
+proc check_effective_target_dummy_wcsftime {} {
+ # By default, assume that all uClibc targets suffer from this.
+ return [check_effective_target_uclibc]
+}
+
+# Return 1 if constructors with initialization priority arguments are
+# supposed on this target.
+
+proc check_effective_target_init_priority {} {
+ return [check_no_compiler_messages init_priority assembly "
+ void f() __attribute__((constructor (1000)));
+ void f() \{\}
+ "]
+}
+
+# Return 1 if the target matches the effective target 'arg', 0 otherwise.
+# This can be used with any check_* proc that takes no argument and
+# returns only 1 or 0. It could be used with check_* procs that take
+# arguments with keywords that pass particular arguments.
+
+proc is-effective-target { arg } {
+ set selected 0
+ if { [info procs check_effective_target_${arg}] != [list] } {
+ set selected [check_effective_target_${arg}]
+ } else {
+ switch $arg {
+ "vmx_hw" { set selected [check_vmx_hw_available] }
+ "vsx_hw" { set selected [check_vsx_hw_available] }
+ "p8vector_hw" { set selected [check_p8vector_hw_available] }
+ "ppc_recip_hw" { set selected [check_ppc_recip_hw_available] }
+ "dfp_hw" { set selected [check_dfp_hw_available] }
+ "named_sections" { set selected [check_named_sections_available] }
+ "gc_sections" { set selected [check_gc_sections_available] }
+ "cxa_atexit" { set selected [check_cxa_atexit_available] }
+ default { error "unknown effective target keyword `$arg'" }
+ }
+ }
+ verbose "is-effective-target: $arg $selected" 2
+ return $selected
+}
+
+# Return 1 if the argument is an effective-target keyword, 0 otherwise.
+
+proc is-effective-target-keyword { arg } {
+ if { [info procs check_effective_target_${arg}] != [list] } {
+ return 1
+ } else {
+ # These have different names for their check_* procs.
+ switch $arg {
+ "vmx_hw" { return 1 }
+ "vsx_hw" { return 1 }
+ "p8vector_hw" { return 1 }
+ "ppc_recip_hw" { return 1 }
+ "dfp_hw" { return 1 }
+ "named_sections" { return 1 }
+ "gc_sections" { return 1 }
+ "cxa_atexit" { return 1 }
+ default { return 0 }
+ }
+ }
+}
+
+# Return 1 if target default to short enums
+
+proc check_effective_target_short_enums { } {
+ return [check_no_compiler_messages short_enums assembly {
+ enum foo { bar };
+ int s[sizeof (enum foo) == 1 ? 1 : -1];
+ }]
+}
+
+# Return 1 if target supports merging string constants at link time.
+
+proc check_effective_target_string_merging { } {
+ return [check_no_messages_and_pattern string_merging \
+ "rodata\\.str" assembly {
+ const char *var = "String";
+ } {-O2}]
+}
+
+# Return 1 if target has the basic signed and unsigned types in
+# <stdint.h>, 0 otherwise. This will be obsolete when GCC ensures a
+# working <stdint.h> for all targets.
+
+proc check_effective_target_stdint_types { } {
+ return [check_no_compiler_messages stdint_types assembly {
+ #include <stdint.h>
+ int8_t a; int16_t b; int32_t c; int64_t d;
+ uint8_t e; uint16_t f; uint32_t g; uint64_t h;
+ }]
+}
+
+# Return 1 if target has the basic signed and unsigned types in
+# <inttypes.h>, 0 otherwise. This is for tests that GCC's notions of
+# these types agree with those in the header, as some systems have
+# only <inttypes.h>.
+
+proc check_effective_target_inttypes_types { } {
+ return [check_no_compiler_messages inttypes_types assembly {
+ #include <inttypes.h>
+ int8_t a; int16_t b; int32_t c; int64_t d;
+ uint8_t e; uint16_t f; uint32_t g; uint64_t h;
+ }]
+}
+
+# Return 1 if programs are intended to be run on a simulator
+# (i.e. slowly) rather than hardware (i.e. fast).
+
+proc check_effective_target_simulator { } {
+
+ # All "src/sim" simulators set this one.
+ if [board_info target exists is_simulator] {
+ return [board_info target is_simulator]
+ }
+
+ # The "sid" simulators don't set that one, but at least they set
+ # this one.
+ if [board_info target exists slow_simulator] {
+ return [board_info target slow_simulator]
+ }
+
+ return 0
+}
+
+# Return 1 if programs are intended to be run on hardware rather than
+# on a simulator
+
+proc check_effective_target_hw { } {
+
+ # All "src/sim" simulators set this one.
+ if [board_info target exists is_simulator] {
+ if [board_info target is_simulator] {
+ return 0
+ } else {
+ return 1
+ }
+ }
+
+ # The "sid" simulators don't set that one, but at least they set
+ # this one.
+ if [board_info target exists slow_simulator] {
+ if [board_info target slow_simulator] {
+ return 0
+ } else {
+ return 1
+ }
+ }
+
+ return 1
+}
+
+# Return 1 if the target is a VxWorks kernel.
+
+proc check_effective_target_vxworks_kernel { } {
+ return [check_no_compiler_messages vxworks_kernel assembly {
+ #if !defined __vxworks || defined __RTP__
+ #error NO
+ #endif
+ }]
+}
+
+# Return 1 if the target is a VxWorks RTP.
+
+proc check_effective_target_vxworks_rtp { } {
+ return [check_no_compiler_messages vxworks_rtp assembly {
+ #if !defined __vxworks || !defined __RTP__
+ #error NO
+ #endif
+ }]
+}
+
+# Return 1 if the target is expected to provide wide character support.
+
+proc check_effective_target_wchar { } {
+ if {[check_missing_uclibc_feature UCLIBC_HAS_WCHAR]} {
+ return 0
+ }
+ return [check_no_compiler_messages wchar assembly {
+ #include <wchar.h>
+ }]
+}
+
+# Return 1 if the target has <pthread.h>.
+
+proc check_effective_target_pthread_h { } {
+ return [check_no_compiler_messages pthread_h assembly {
+ #include <pthread.h>
+ }]
+}
+
+# Return 1 if the target can truncate a file from a file-descriptor,
+# as used by libgfortran/io/unix.c:fd_truncate; i.e. ftruncate or
+# chsize. We test for a trivially functional truncation; no stubs.
+# As libgfortran uses _FILE_OFFSET_BITS 64, we do too; it'll cause a
+# different function to be used.
+
+proc check_effective_target_fd_truncate { } {
+ set prog {
+ #define _FILE_OFFSET_BITS 64
+ #include <unistd.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ int main ()
+ {
+ FILE *f = fopen ("tst.tmp", "wb");
+ int fd;
+ const char t[] = "test writing more than ten characters";
+ char s[11];
+ int status = 0;
+ fd = fileno (f);
+ write (fd, t, sizeof (t) - 1);
+ lseek (fd, 0, 0);
+ if (ftruncate (fd, 10) != 0)
+ status = 1;
+ close (fd);
+ fclose (f);
+ if (status)
+ {
+ unlink ("tst.tmp");
+ exit (status);
+ }
+ f = fopen ("tst.tmp", "rb");
+ if (fread (s, 1, sizeof (s), f) != 10 || strncmp (s, t, 10) != 0)
+ status = 1;
+ fclose (f);
+ unlink ("tst.tmp");
+ exit (status);
+ }
+ }
+
+ if { [check_runtime ftruncate $prog] } {
+ return 1;
+ }
+
+ regsub "ftruncate" $prog "chsize" prog
+ return [check_runtime chsize $prog]
+}
+
+# Add to FLAGS all the target-specific flags needed to access the c99 runtime.
+
+proc add_options_for_c99_runtime { flags } {
+ if { [istarget *-*-solaris2*] } {
+ return "$flags -std=c99"
+ }
+ if { [istarget powerpc-*-darwin*] } {
+ return "$flags -mmacosx-version-min=10.3"
+ }
+ return $flags
+}
+
+# Add to FLAGS all the target-specific flags needed to enable
+# full IEEE compliance mode.
+
+proc add_options_for_ieee { flags } {
+ if { [istarget alpha*-*-*]
+ || [istarget sh*-*-*] } {
+ return "$flags -mieee"
+ }
+ if { [istarget rx-*-*] } {
+ return "$flags -mnofpu"
+ }
+ return $flags
+}
+
+# Add to FLAGS the flags needed to enable functions to bind locally
+# when using pic/PIC passes in the testsuite.
+
+proc add_options_for_bind_pic_locally { flags } {
+ if {[check_no_compiler_messages using_pic2 assembly {
+ #if __PIC__ != 2
+ #error FOO
+ #endif
+ }]} {
+ return "$flags -fPIE"
+ }
+ if {[check_no_compiler_messages using_pic1 assembly {
+ #if __PIC__ != 1
+ #error FOO
+ #endif
+ }]} {
+ return "$flags -fpie"
+ }
+
+ return $flags
+}
+
+# Add to FLAGS the flags needed to enable 64-bit vectors.
+
+proc add_options_for_double_vectors { flags } {
+ if [is-effective-target arm_neon_ok] {
+ return "$flags -mvectorize-with-neon-double"
+ }
+
+ return $flags
+}
+
+# Return 1 if the target provides a full C99 runtime.
+
+proc check_effective_target_c99_runtime { } {
+ return [check_cached_effective_target c99_runtime {
+ global srcdir
+
+ set file [open "$srcdir/gcc.dg/builtins-config.h"]
+ set contents [read $file]
+ close $file
+ append contents {
+ #ifndef HAVE_C99_RUNTIME
+ #error FOO
+ #endif
+ }
+ check_no_compiler_messages_nocache c99_runtime assembly \
+ $contents [add_options_for_c99_runtime ""]
+ }]
+}
+
+# Return 1 if target wchar_t is at least 4 bytes.
+
+proc check_effective_target_4byte_wchar_t { } {
+ return [check_no_compiler_messages 4byte_wchar_t object {
+ int dummy[sizeof (__WCHAR_TYPE__) >= 4 ? 1 : -1];
+ }]
+}
+
+# Return 1 if the target supports automatic stack alignment.
+
+proc check_effective_target_automatic_stack_alignment { } {
+ # Ordinarily x86 supports automatic stack alignment ...
+ if { [istarget i?86*-*-*] || [istarget x86_64-*-*] } then {
+ if { [istarget *-*-mingw*] || [istarget *-*-cygwin*] } {
+ # ... except Win64 SEH doesn't. Succeed for Win32 though.
+ return [check_effective_target_ilp32];
+ }
+ return 1;
+ }
+ return 0;
+}
+
+# Return true if we are compiling for AVX target.
+
+proc check_avx_available { } {
+ if { [check_no_compiler_messages avx_available assembly {
+ #ifndef __AVX__
+ #error unsupported
+ #endif
+ } ""] } {
+ return 1;
+ }
+ return 0;
+}
+
+# Return true if 32- and 16-bytes vectors are available.
+
+proc check_effective_target_vect_sizes_32B_16B { } {
+ return [check_avx_available];
+}
+
+# Return true if 128-bits vectors are preferred even if 256-bits vectors
+# are available.
+
+proc check_prefer_avx128 { } {
+ if ![check_avx_available] {
+ return 0;
+ }
+ return [check_no_messages_and_pattern avx_explicit "xmm" assembly {
+ float a[1024],b[1024],c[1024];
+ void foo (void) { int i; for (i = 0; i < 1024; i++) a[i]=b[i]+c[i];}
+ } "-O2 -ftree-vectorize"]
+}
+
+
+# Return 1 if avx512f instructions can be compiled.
+
+proc check_effective_target_avx512f { } {
+ return [check_no_compiler_messages avx512f object {
+ typedef double __m512d __attribute__ ((__vector_size__ (64)));
+
+ __m512d _mm512_add (__m512d a)
+ {
+ return __builtin_ia32_addpd512_mask (a, a, a, 1, 4);
+ }
+ } "-O2 -mavx512f" ]
+}
+
+# Return 1 if avx instructions can be compiled.
+
+proc check_effective_target_avx { } {
+ return [check_no_compiler_messages avx object {
+ void _mm256_zeroall (void)
+ {
+ __builtin_ia32_vzeroall ();
+ }
+ } "-O2 -mavx" ]
+}
+
+# Return 1 if avx2 instructions can be compiled.
+proc check_effective_target_avx2 { } {
+ return [check_no_compiler_messages avx2 object {
+ typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+ __v4di
+ mm256_is32_andnotsi256 (__v4di __X, __v4di __Y)
+ {
+ return __builtin_ia32_andnotsi256 (__X, __Y);
+ }
+ } "-O0 -mavx2" ]
+}
+
+# Return 1 if sse instructions can be compiled.
+proc check_effective_target_sse { } {
+ return [check_no_compiler_messages sse object {
+ int main ()
+ {
+ __builtin_ia32_stmxcsr ();
+ return 0;
+ }
+ } "-O2 -msse" ]
+}
+
+# Return 1 if sse2 instructions can be compiled.
+proc check_effective_target_sse2 { } {
+ return [check_no_compiler_messages sse2 object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_srli_si128 (__m128i __A, int __N)
+ {
+ return (__m128i)__builtin_ia32_psrldqi128 (__A, 8);
+ }
+ } "-O2 -msse2" ]
+}
+
+# Return 1 if F16C instructions can be compiled.
+
+proc check_effective_target_f16c { } {
+ return [check_no_compiler_messages f16c object {
+ #include "immintrin.h"
+ float
+ foo (unsigned short val)
+ {
+ return _cvtsh_ss (val);
+ }
+ } "-O2 -mf16c" ]
+}
+
+# Return 1 if C wchar_t type is compatible with char16_t.
+
+proc check_effective_target_wchar_t_char16_t_compatible { } {
+ return [check_no_compiler_messages wchar_t_char16_t object {
+ __WCHAR_TYPE__ wc;
+ __CHAR16_TYPE__ *p16 = &wc;
+ char t[(((__CHAR16_TYPE__) -1) < 0 == ((__WCHAR_TYPE__) -1) < 0) ? 1 : -1];
+ }]
+}
+
+# Return 1 if C wchar_t type is compatible with char32_t.
+
+proc check_effective_target_wchar_t_char32_t_compatible { } {
+ return [check_no_compiler_messages wchar_t_char32_t object {
+ __WCHAR_TYPE__ wc;
+ __CHAR32_TYPE__ *p32 = &wc;
+ char t[(((__CHAR32_TYPE__) -1) < 0 == ((__WCHAR_TYPE__) -1) < 0) ? 1 : -1];
+ }]
+}
+
+# Return 1 if pow10 function exists.
+
+proc check_effective_target_pow10 { } {
+ return [check_runtime pow10 {
+ #include <math.h>
+ int main () {
+ double x;
+ x = pow10 (1);
+ return 0;
+ }
+ } "-lm" ]
+}
+
+# Return 1 if current options generate DFP instructions, 0 otherwise.
+
+proc check_effective_target_hard_dfp {} {
+ return [check_no_messages_and_pattern hard_dfp "!adddd3" assembly {
+ typedef float d64 __attribute__((mode(DD)));
+ d64 x, y, z;
+ void foo (void) { z = x + y; }
+ }]
+}
+
+# Return 1 if string.h and wchar.h headers provide C++ requires overloads
+# for strchr etc. functions.
+
+proc check_effective_target_correct_iso_cpp_string_wchar_protos { } {
+ return [check_no_compiler_messages correct_iso_cpp_string_wchar_protos assembly {
+ #include <string.h>
+ #include <wchar.h>
+ #if !defined(__cplusplus) \
+ || !defined(__CORRECT_ISO_CPP_STRING_H_PROTO) \
+ || !defined(__CORRECT_ISO_CPP_WCHAR_H_PROTO)
+ ISO C++ correct string.h and wchar.h protos not supported.
+ #else
+ int i;
+ #endif
+ }]
+}
+
+# Return 1 if GNU as is used.
+
+proc check_effective_target_gas { } {
+ global use_gas_saved
+ global tool
+
+ if {![info exists use_gas_saved]} {
+ # Check if the as used by gcc is GNU as.
+ set gcc_as [lindex [${tool}_target_compile "-print-prog-name=as" "" "none" ""] 0]
+ # Provide /dev/null as input, otherwise gas times out reading from
+ # stdin.
+ set status [remote_exec host "$gcc_as" "-v /dev/null"]
+ set as_output [lindex $status 1]
+ if { [ string first "GNU" $as_output ] >= 0 } {
+ set use_gas_saved 1
+ } else {
+ set use_gas_saved 0
+ }
+ }
+ return $use_gas_saved
+}
+
+# Return 1 if GNU ld is used.
+
+proc check_effective_target_gld { } {
+ global use_gld_saved
+ global tool
+
+ if {![info exists use_gld_saved]} {
+ # Check if the ld used by gcc is GNU ld.
+ set gcc_ld [lindex [${tool}_target_compile "-print-prog-name=ld" "" "none" ""] 0]
+ set status [remote_exec host "$gcc_ld" "--version"]
+ set ld_output [lindex $status 1]
+ if { [ string first "GNU" $ld_output ] >= 0 } {
+ set use_gld_saved 1
+ } else {
+ set use_gld_saved 0
+ }
+ }
+ return $use_gld_saved
+}
+
+# Return 1 if the compiler has been configure with link-time optimization
+# (LTO) support.
+
+proc check_effective_target_lto { } {
+ global ENABLE_LTO
+ return [info exists ENABLE_LTO]
+}
+
+# Return 1 if -mx32 -maddress-mode=short can compile, 0 otherwise.
+
+proc check_effective_target_maybe_x32 { } {
+ return [check_no_compiler_messages maybe_x32 object {
+ void foo (void) {}
+ } "-mx32 -maddress-mode=short"]
+}
+
+# Return 1 if this target supports the -fsplit-stack option, 0
+# otherwise.
+
+proc check_effective_target_split_stack {} {
+ return [check_no_compiler_messages split_stack object {
+ void foo (void) { }
+ } "-fsplit-stack"]
+}
+
+# Return 1 if this target supports the -masm=intel option, 0
+# otherwise
+
+proc check_effective_target_masm_intel {} {
+ return [check_no_compiler_messages masm_intel object {
+ extern void abort (void);
+ } "-masm=intel"]
+}
+
+# Return 1 if the language for the compiler under test is C.
+
+proc check_effective_target_c { } {
+ global tool
+ if [string match $tool "gcc"] {
+ return 1
+ }
+ return 0
+}
+
+# Return 1 if the language for the compiler under test is C++.
+
+proc check_effective_target_c++ { } {
+ global tool
+ if [string match $tool "g++"] {
+ return 1
+ }
+ return 0
+}
+
+# Check whether the current active language standard supports the features
+# of C++11/C++1y by checking for the presence of one of the -std
+# flags. This assumes that the default for the compiler is C++98, and that
+# there will never be multiple -std= arguments on the command line.
+proc check_effective_target_c++11_only { } {
+ if ![check_effective_target_c++] {
+ return 0
+ }
+ return [check-flags { { } { } { -std=c++0x -std=gnu++0x -std=c++11 -std=gnu++11 } }]
+}
+proc check_effective_target_c++11 { } {
+ if [check_effective_target_c++11_only] {
+ return 1
+ }
+ return [check_effective_target_c++1y]
+}
+proc check_effective_target_c++11_down { } {
+ if ![check_effective_target_c++] {
+ return 0
+ }
+ return ![check_effective_target_c++1y]
+}
+
+proc check_effective_target_c++1y_only { } {
+ if ![check_effective_target_c++] {
+ return 0
+ }
+ return [check-flags { { } { } { -std=c++1y -std=gnu++1y -std=c++14 -std=gnu++14 } }]
+}
+proc check_effective_target_c++1y { } {
+ return [check_effective_target_c++1y_only]
+}
+
+proc check_effective_target_c++98_only { } {
+ if ![check_effective_target_c++] {
+ return 0
+ }
+ return ![check_effective_target_c++11]
+}
+
+# Return 1 if expensive testcases should be run.
+
+proc check_effective_target_run_expensive_tests { } {
+ if { [getenv GCC_TEST_RUN_EXPENSIVE] != "" } {
+ return 1
+ }
+ return 0
+}
+
+# Returns 1 if "mempcpy" is available on the target system.
+
+proc check_effective_target_mempcpy {} {
+ return [check_function_available "mempcpy"]
+}
+
+# Check whether the vectorizer tests are supported by the target and
+# append additional target-dependent compile flags to DEFAULT_VECTCFLAGS.
+# Set dg-do-what-default to either compile or run, depending on target
+# capabilities. Return 1 if vectorizer tests are supported by
+# target, 0 otherwise.
+
+proc check_vect_support_and_set_flags { } {
+ global DEFAULT_VECTCFLAGS
+ global dg-do-what-default
+
+ if [istarget powerpc-*paired*] {
+ lappend DEFAULT_VECTCFLAGS "-mpaired"
+ if [check_750cl_hw_available] {
+ set dg-do-what-default run
+ } else {
+ set dg-do-what-default compile
+ }
+ } elseif [istarget powerpc*-*-*] {
+ # Skip targets not supporting -maltivec.
+ if ![is-effective-target powerpc_altivec_ok] {
+ return 0
+ }
+
+ lappend DEFAULT_VECTCFLAGS "-maltivec"
+ if [check_p8vector_hw_available] {
+ lappend DEFAULT_VECTCFLAGS "-mpower8-vector" "-mno-allow-movmisalign"
+ } elseif [check_vsx_hw_available] {
+ lappend DEFAULT_VECTCFLAGS "-mvsx" "-mno-allow-movmisalign"
+ }
+
+ if [check_vmx_hw_available] {
+ set dg-do-what-default run
+ } else {
+ if [is-effective-target ilp32] {
+ # Specify a cpu that supports VMX for compile-only tests.
+ lappend DEFAULT_VECTCFLAGS "-mcpu=970"
+ }
+ set dg-do-what-default compile
+ }
+ } elseif { [istarget spu-*-*] } {
+ set dg-do-what-default run
+ } elseif { [istarget i?86-*-*] || [istarget x86_64-*-*] } {
+ lappend DEFAULT_VECTCFLAGS "-msse2"
+ if { [check_effective_target_sse2_runtime] } {
+ set dg-do-what-default run
+ } else {
+ set dg-do-what-default compile
+ }
+ } elseif { [istarget mips*-*-*]
+ && ([check_effective_target_mpaired_single]
+ || [check_effective_target_mips_loongson])
+ && [check_effective_target_nomips16] } {
+ if { [check_effective_target_mpaired_single] } {
+ lappend DEFAULT_VECTCFLAGS "-mpaired-single"
+ }
+ set dg-do-what-default run
+ } elseif [istarget sparc*-*-*] {
+ lappend DEFAULT_VECTCFLAGS "-mcpu=ultrasparc" "-mvis"
+ if [check_effective_target_ultrasparc_hw] {
+ set dg-do-what-default run
+ } else {
+ set dg-do-what-default compile
+ }
+ } elseif [istarget alpha*-*-*] {
+ # Alpha's vectorization capabilities are extremely limited.
+ # It's more effort than its worth disabling all of the tests
+ # that it cannot pass. But if you actually want to see what
+ # does work, command out the return.
+ return 0
+
+ lappend DEFAULT_VECTCFLAGS "-mmax"
+ if [check_alpha_max_hw_available] {
+ set dg-do-what-default run
+ } else {
+ set dg-do-what-default compile
+ }
+ } elseif [istarget ia64-*-*] {
+ set dg-do-what-default run
+ } elseif [is-effective-target arm_neon_ok] {
+ eval lappend DEFAULT_VECTCFLAGS [add_options_for_arm_neon ""]
+ # NEON does not support denormals, so is not used for vectorization by
+ # default to avoid loss of precision. We must pass -ffast-math to test
+ # vectorization of float operations.
+ lappend DEFAULT_VECTCFLAGS "-ffast-math"
+ if [is-effective-target arm_neon_hw] {
+ set dg-do-what-default run
+ } else {
+ set dg-do-what-default compile
+ }
+ } elseif [istarget "aarch64*-*-*"] {
+ set dg-do-what-default run
+ } else {
+ return 0
+ }
+
+ return 1
+}
+
+proc check_effective_target_non_strict_align {} {
+ return [check_no_compiler_messages non_strict_align assembly {
+ char *y;
+ typedef char __attribute__ ((__aligned__(__BIGGEST_ALIGNMENT__))) c;
+ c *z;
+ void foo(void) { z = (c *) y; }
+ } "-Wcast-align"]
+}
+
+# Return 1 if the target has <ucontext.h>.
+
+proc check_effective_target_ucontext_h { } {
+ return [check_no_compiler_messages ucontext_h assembly {
+ #include <ucontext.h>
+ }]
+}
+
+proc check_effective_target_aarch64_tiny { } {
+ if { [istarget aarch64*-*-*] } {
+ return [check_no_compiler_messages aarch64_tiny object {
+ #ifdef __AARCH64_CMODEL_TINY__
+ int dummy;
+ #else
+ #error target not AArch64 tiny code model
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+proc check_effective_target_aarch64_small { } {
+ if { [istarget aarch64*-*-*] } {
+ return [check_no_compiler_messages aarch64_small object {
+ #ifdef __AARCH64_CMODEL_SMALL__
+ int dummy;
+ #else
+ #error target not AArch64 small code model
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+proc check_effective_target_aarch64_large { } {
+ if { [istarget aarch64*-*-*] } {
+ return [check_no_compiler_messages aarch64_large object {
+ #ifdef __AARCH64_CMODEL_LARGE__
+ int dummy;
+ #else
+ #error target not AArch64 large code model
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if <fenv.h> is available with all the standard IEEE
+# exceptions and floating-point exceptions are raised by arithmetic
+# operations. (If the target requires special options for "inexact"
+# exceptions, those need to be specified in the testcases.)
+
+proc check_effective_target_fenv_exceptions {} {
+ return [check_runtime fenv_exceptions {
+ #include <fenv.h>
+ #include <stdlib.h>
+ #ifndef FE_DIVBYZERO
+ # error Missing FE_DIVBYZERO
+ #endif
+ #ifndef FE_INEXACT
+ # error Missing FE_INEXACT
+ #endif
+ #ifndef FE_INVALID
+ # error Missing FE_INVALID
+ #endif
+ #ifndef FE_OVERFLOW
+ # error Missing FE_OVERFLOW
+ #endif
+ #ifndef FE_UNDERFLOW
+ # error Missing FE_UNDERFLOW
+ #endif
+ volatile float a = 0.0f, r;
+ int
+ main (void)
+ {
+ r = a / a;
+ if (fetestexcept (FE_INVALID))
+ exit (0);
+ else
+ abort ();
+ }
+ } "-std=gnu99"]
+}
+
+# Return 1 if LOGICAL_OP_NON_SHORT_CIRCUIT is set to 0 for the current target.
+
+proc check_effective_target_logical_op_short_circuit {} {
+ if { [istarget mips*-*-*]
+ || [istarget arc*-*-*]
+ || [istarget avr*-*-*]
+ || [istarget crisv32-*-*] || [istarget cris-*-*]
+ || [istarget s390*-*-*]
+ || [check_effective_target_arm_cortex_m] } {
+ return 1
+ }
+ return 0
+}
+
+# Record that dg-final test TEST requires convential compilation.
+
+proc force_conventional_output_for { test } {
+ if { [info proc $test] == "" } {
+ perror "$test does not exist"
+ exit 1
+ }
+ proc ${test}_required_options {} {
+ global gcc_force_conventional_output
+ return $gcc_force_conventional_output
+ }
+}
+