From 1bc5aee63eb72b341f506ad058502cd0361f0d10 Mon Sep 17 00:00:00 2001 From: Ben Cheng Date: Tue, 25 Mar 2014 22:37:19 -0700 Subject: Initial checkin of GCC 4.9.0 from trunk (r208799). Change-Id: I48a3c08bb98542aa215912a75f03c0890e497dba --- gcc-4.9/gcc/testsuite/lib/target-supports.exp | 5733 +++++++++++++++++++++++++ 1 file changed, 5733 insertions(+) create mode 100644 gcc-4.9/gcc/testsuite/lib/target-supports.exp (limited to 'gcc-4.9/gcc/testsuite/lib/target-supports.exp') diff --git a/gcc-4.9/gcc/testsuite/lib/target-supports.exp b/gcc-4.9/gcc/testsuite/lib/target-supports.exp new file mode 100644 index 000000000..bee847133 --- /dev/null +++ b/gcc-4.9/gcc/testsuite/lib/target-supports.exp @@ -0,0 +1,5733 @@ +# Copyright (C) 1999-2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +# Please email any bugs, comments, and/or additions to this file to: +# gcc-patches@gcc.gnu.org + +# This file defines procs for determining features supported by the target. + +# Try to compile the code given by CONTENTS into an output file of +# type TYPE, where TYPE is as for target_compile. Return a list +# whose first element contains the compiler messages and whose +# second element is the name of the output file. +# +# BASENAME is a prefix to use for source and output files. +# If ARGS is not empty, its first element is a string that +# should be added to the command line. +# +# Assume by default that CONTENTS is C code. +# Otherwise, code should contain: +# "// C++" for c++, +# "! Fortran" for Fortran code, +# "/* ObjC", for ObjC +# "// ObjC++" for ObjC++ +# and "// Go" for Go +# If the tool is ObjC/ObjC++ then we overide the extension to .m/.mm to +# allow for ObjC/ObjC++ specific flags. +proc check_compile {basename type contents args} { + global tool + verbose "check_compile tool: $tool for $basename" + + if { [llength $args] > 0 } { + set options [list "additional_flags=[lindex $args 0]"] + } else { + set options "" + } + switch -glob -- $contents { + "*! Fortran*" { set src ${basename}[pid].f90 } + "*// C++*" { set src ${basename}[pid].cc } + "*// ObjC++*" { set src ${basename}[pid].mm } + "*/* ObjC*" { set src ${basename}[pid].m } + "*// Go*" { set src ${basename}[pid].go } + default { + switch -- $tool { + "objc" { set src ${basename}[pid].m } + "obj-c++" { set src ${basename}[pid].mm } + default { set src ${basename}[pid].c } + } + } + } + + set compile_type $type + switch -glob $type { + assembly { set output ${basename}[pid].s } + object { set output ${basename}[pid].o } + executable { set output ${basename}[pid].exe } + "rtl-*" { + set output ${basename}[pid].s + lappend options "additional_flags=-fdump-$type" + set compile_type assembly + } + } + set f [open $src "w"] + puts $f $contents + close $f + set lines [${tool}_target_compile $src $output $compile_type "$options"] + file delete $src + + set scan_output $output + # Don't try folding this into the switch above; calling "glob" before the + # file is created won't work. + if [regexp "rtl-(.*)" $type dummy rtl_type] { + set scan_output "[glob $src.\[0-9\]\[0-9\]\[0-9\]r.$rtl_type]" + file delete $output + } + + return [list $lines $scan_output] +} + +proc current_target_name { } { + global target_info + if [info exists target_info(target,name)] { + set answer $target_info(target,name) + } else { + set answer "" + } + return $answer +} + +# Implement an effective-target check for property PROP by invoking +# the Tcl command ARGS and seeing if it returns true. + +proc check_cached_effective_target { prop args } { + global et_cache + + set target [current_target_name] + if {![info exists et_cache($prop,target)] + || $et_cache($prop,target) != $target} { + verbose "check_cached_effective_target $prop: checking $target" 2 + set et_cache($prop,target) $target + set et_cache($prop,value) [uplevel eval $args] + } + set value $et_cache($prop,value) + verbose "check_cached_effective_target $prop: returning $value for $target" 2 + return $value +} + +# Like check_compile, but delete the output file and return true if the +# compiler printed no messages. +proc check_no_compiler_messages_nocache {args} { + set result [eval check_compile $args] + set lines [lindex $result 0] + set output [lindex $result 1] + remote_file build delete $output + return [string match "" $lines] +} + +# Like check_no_compiler_messages_nocache, but cache the result. +# PROP is the property we're checking, and doubles as a prefix for +# temporary filenames. +proc check_no_compiler_messages {prop args} { + return [check_cached_effective_target $prop { + eval [list check_no_compiler_messages_nocache $prop] $args + }] +} + +# Like check_compile, but return true if the compiler printed no +# messages and if the contents of the output file satisfy PATTERN. +# If PATTERN has the form "!REGEXP", the contents satisfy it if they +# don't match regular expression REGEXP, otherwise they satisfy it +# if they do match regular expression PATTERN. (PATTERN can start +# with something like "[!]" if the regular expression needs to match +# "!" as the first character.) +# +# Delete the output file before returning. The other arguments are +# as for check_compile. +proc check_no_messages_and_pattern_nocache {basename pattern args} { + global tool + + set result [eval [list check_compile $basename] $args] + set lines [lindex $result 0] + set output [lindex $result 1] + + set ok 0 + if { [string match "" $lines] } { + set chan [open "$output"] + set invert [regexp {^!(.*)} $pattern dummy pattern] + set ok [expr { [regexp $pattern [read $chan]] != $invert }] + close $chan + } + + remote_file build delete $output + return $ok +} + +# Like check_no_messages_and_pattern_nocache, but cache the result. +# PROP is the property we're checking, and doubles as a prefix for +# temporary filenames. +proc check_no_messages_and_pattern {prop pattern args} { + return [check_cached_effective_target $prop { + eval [list check_no_messages_and_pattern_nocache $prop $pattern] $args + }] +} + +# Try to compile and run an executable from code CONTENTS. Return true +# if the compiler reports no messages and if execution "passes" in the +# usual DejaGNU sense. The arguments are as for check_compile, with +# TYPE implicitly being "executable". +proc check_runtime_nocache {basename contents args} { + global tool + + set result [eval [list check_compile $basename executable $contents] $args] + set lines [lindex $result 0] + set output [lindex $result 1] + + set ok 0 + if { [string match "" $lines] } { + # No error messages, everything is OK. + set result [remote_load target "./$output" "" ""] + set status [lindex $result 0] + verbose "check_runtime_nocache $basename: status is <$status>" 2 + if { $status == "pass" } { + set ok 1 + } + } + remote_file build delete $output + return $ok +} + +# Like check_runtime_nocache, but cache the result. PROP is the +# property we're checking, and doubles as a prefix for temporary +# filenames. +proc check_runtime {prop args} { + global tool + + return [check_cached_effective_target $prop { + eval [list check_runtime_nocache $prop] $args + }] +} + +############################### +# proc check_weak_available { } +############################### + +# weak symbols are only supported in some configs/object formats +# this proc returns 1 if they're supported, 0 if they're not, or -1 if unsure + +proc check_weak_available { } { + global target_cpu + + # All mips targets should support it + + if { [ string first "mips" $target_cpu ] >= 0 } { + return 1 + } + + # All AIX targets should support it + + if { [istarget *-*-aix*] } { + return 1 + } + + # All solaris2 targets should support it + + if { [istarget *-*-solaris2*] } { + return 1 + } + + # Windows targets Cygwin and MingW32 support it + + if { [istarget *-*-cygwin*] || [istarget *-*-mingw*] } { + return 1 + } + + # HP-UX 10.X doesn't support it + + if { [istarget hppa*-*-hpux10*] } { + return 0 + } + + # ELF and ECOFF support it. a.out does with gas/gld but may also with + # other linkers, so we should try it + + set objformat [gcc_target_object_format] + + switch $objformat { + elf { return 1 } + ecoff { return 1 } + a.out { return 1 } + mach-o { return 1 } + som { return 1 } + unknown { return -1 } + default { return 0 } + } +} + +############################### +# proc check_weak_override_available { } +############################### + +# Like check_weak_available, but return 0 if weak symbol definitions +# cannot be overridden. + +proc check_weak_override_available { } { + if { [istarget *-*-mingw*] } { + return 0 + } + return [check_weak_available] +} + +############################### +# proc check_visibility_available { what_kind } +############################### + +# The visibility attribute is only support in some object formats +# This proc returns 1 if it is supported, 0 if not. +# The argument is the kind of visibility, default/protected/hidden/internal. + +proc check_visibility_available { what_kind } { + if [string match "" $what_kind] { set what_kind "hidden" } + + return [check_no_compiler_messages visibility_available_$what_kind object " + void f() __attribute__((visibility(\"$what_kind\"))); + void f() {} + "] +} + +############################### +# proc check_alias_available { } +############################### + +# Determine if the target toolchain supports the alias attribute. + +# Returns 2 if the target supports aliases. Returns 1 if the target +# only supports weak aliased. Returns 0 if the target does not +# support aliases at all. Returns -1 if support for aliases could not +# be determined. + +proc check_alias_available { } { + global alias_available_saved + global tool + + if [info exists alias_available_saved] { + verbose "check_alias_available returning saved $alias_available_saved" 2 + } else { + set src alias[pid].c + set obj alias[pid].o + verbose "check_alias_available compiling testfile $src" 2 + set f [open $src "w"] + # Compile a small test program. The definition of "g" is + # necessary to keep the Solaris assembler from complaining + # about the program. + puts $f "#ifdef __cplusplus\nextern \"C\"\n#endif\n" + puts $f "void g() {} void f() __attribute__((alias(\"g\")));" + close $f + set lines [${tool}_target_compile $src $obj object ""] + file delete $src + remote_file build delete $obj + + if [string match "" $lines] then { + # No error messages, everything is OK. + set alias_available_saved 2 + } else { + if [regexp "alias definitions not supported" $lines] { + verbose "check_alias_available target does not support aliases" 2 + + set objformat [gcc_target_object_format] + + if { $objformat == "elf" } { + verbose "check_alias_available but target uses ELF format, so it ought to" 2 + set alias_available_saved -1 + } else { + set alias_available_saved 0 + } + } else { + if [regexp "only weak aliases are supported" $lines] { + verbose "check_alias_available target supports only weak aliases" 2 + set alias_available_saved 1 + } else { + set alias_available_saved -1 + } + } + } + + verbose "check_alias_available returning $alias_available_saved" 2 + } + + return $alias_available_saved +} + +# Returns 1 if the target toolchain supports strong aliases, 0 otherwise. + +proc check_effective_target_alias { } { + if { [check_alias_available] < 2 } { + return 0 + } else { + return 1 + } +} + +# Returns 1 if the target toolchain supports ifunc, 0 otherwise. + +proc check_ifunc_available { } { + return [check_no_compiler_messages ifunc_available object { + #ifdef __cplusplus + extern "C" + #endif + void g() {} + void f() __attribute__((ifunc("g"))); + }] +} + +# Returns true if --gc-sections is supported on the target. + +proc check_gc_sections_available { } { + global gc_sections_available_saved + global tool + + if {![info exists gc_sections_available_saved]} { + # Some targets don't support gc-sections despite whatever's + # advertised by ld's options. + if { [istarget alpha*-*-*] + || [istarget ia64-*-*] } { + set gc_sections_available_saved 0 + return 0 + } + + # elf2flt uses -q (--emit-relocs), which is incompatible with + # --gc-sections. + if { [board_info target exists ldflags] + && [regexp " -elf2flt\[ =\]" " [board_info target ldflags] "] } { + set gc_sections_available_saved 0 + return 0 + } + + # VxWorks kernel modules are relocatable objects linked with -r, + # while RTP executables are linked with -q (--emit-relocs). + # Both of these options are incompatible with --gc-sections. + if { [istarget *-*-vxworks*] } { + set gc_sections_available_saved 0 + return 0 + } + + # Check if the ld used by gcc supports --gc-sections. + set gcc_spec [${tool}_target_compile "-dumpspecs" "" "none" ""] + regsub ".*\n\\*linker:\[ \t\]*\n(\[^ \t\n\]*).*" "$gcc_spec" {\1} linker + set gcc_ld [lindex [${tool}_target_compile "-print-prog-name=$linker" "" "none" ""] 0] + set ld_output [remote_exec host "$gcc_ld" "--help"] + if { [ string first "--gc-sections" $ld_output ] >= 0 } { + set gc_sections_available_saved 1 + } else { + set gc_sections_available_saved 0 + } + } + return $gc_sections_available_saved +} + +# Return 1 if according to target_info struct and explicit target list +# target is supposed to support trampolines. + +proc check_effective_target_trampolines { } { + if [target_info exists no_trampolines] { + return 0 + } + if { [istarget avr-*-*] + || [istarget msp430-*-*] + || [istarget hppa2.0w-hp-hpux11.23] + || [istarget hppa64-hp-hpux11.23] } { + return 0; + } + return 1 +} + +# Return 1 if according to target_info struct and explicit target list +# target is supposed to keep null pointer checks. This could be due to +# use of option fno-delete-null-pointer-checks or hardwired in target. + +proc check_effective_target_keeps_null_pointer_checks { } { + if [target_info exists keeps_null_pointer_checks] { + return 1 + } + if { [istarget avr-*-*] } { + return 1; + } + return 0 +} + +# Return true if profiling is supported on the target. + +proc check_profiling_available { test_what } { + global profiling_available_saved + + verbose "Profiling argument is <$test_what>" 1 + + # These conditions depend on the argument so examine them before + # looking at the cache variable. + + # Tree profiling requires TLS runtime support. + if { $test_what == "-fprofile-generate" } { + if { ![check_effective_target_tls_runtime] } { + return 0 + } + } + + # Support for -p on solaris2 relies on mcrt1.o which comes with the + # vendor compiler. We cannot reliably predict the directory where the + # vendor compiler (and thus mcrt1.o) is installed so we can't + # necessarily find mcrt1.o even if we have it. + if { [istarget *-*-solaris2*] && $test_what == "-p" } { + return 0 + } + + # We don't yet support profiling for MIPS16. + if { [istarget mips*-*-*] + && ![check_effective_target_nomips16] + && ($test_what == "-p" || $test_what == "-pg") } { + return 0 + } + + # MinGW does not support -p. + if { [istarget *-*-mingw*] && $test_what == "-p" } { + return 0 + } + + # cygwin does not support -p. + if { [istarget *-*-cygwin*] && $test_what == "-p" } { + return 0 + } + + # uClibc does not have gcrt1.o. + if { [check_effective_target_uclibc] + && ($test_what == "-p" || $test_what == "-pg") } { + return 0 + } + + # Now examine the cache variable. + if {![info exists profiling_available_saved]} { + # Some targets don't have any implementation of __bb_init_func or are + # missing other needed machinery. + if { [istarget aarch64*-*-elf] + || [istarget am3*-*-linux*] + || [istarget arm*-*-eabi*] + || [istarget arm*-*-elf] + || [istarget arm*-*-symbianelf*] + || [istarget avr-*-*] + || [istarget bfin-*-*] + || [istarget cris-*-*] + || [istarget crisv32-*-*] + || [istarget fido-*-elf] + || [istarget h8300-*-*] + || [istarget lm32-*-*] + || [istarget m32c-*-elf] + || [istarget m68k-*-elf] + || [istarget m68k-*-uclinux*] + || [istarget mep-*-elf] + || [istarget mips*-*-elf*] + || [istarget mmix-*-*] + || [istarget mn10300-*-elf*] + || [istarget moxie-*-elf*] + || [istarget msp430-*-*] + || [istarget nds32*-*-elf] + || [istarget nios2-*-elf] + || [istarget picochip-*-*] + || [istarget powerpc-*-eabi*] + || [istarget powerpc-*-elf] + || [istarget rx-*-*] + || [istarget tic6x-*-elf] + || [istarget xstormy16-*] + || [istarget xtensa*-*-elf] + || [istarget *-*-rtems*] + || [istarget *-*-vxworks*] } { + set profiling_available_saved 0 + } else { + set profiling_available_saved 1 + } + } + + return $profiling_available_saved +} + +# Check to see if a target is "freestanding". This is as per the definition +# in Section 4 of C99 standard. Effectively, it is a target which supports no +# extra headers or libraries other than what is considered essential. +proc check_effective_target_freestanding { } { + if { [istarget picochip-*-*] } then { + return 1 + } else { + return 0 + } +} + +# Return 1 if target has packed layout of structure members by +# default, 0 otherwise. Note that this is slightly different than +# whether the target has "natural alignment": both attributes may be +# false. + +proc check_effective_target_default_packed { } { + return [check_no_compiler_messages default_packed assembly { + struct x { char a; long b; } c; + int s[sizeof (c) == sizeof (char) + sizeof (long) ? 1 : -1]; + }] +} + +# Return 1 if target has PCC_BITFIELD_TYPE_MATTERS defined. See +# documentation, where the test also comes from. + +proc check_effective_target_pcc_bitfield_type_matters { } { + # PCC_BITFIELD_TYPE_MATTERS isn't just about unnamed or empty + # bitfields, but let's stick to the example code from the docs. + return [check_no_compiler_messages pcc_bitfield_type_matters assembly { + struct foo1 { char x; char :0; char y; }; + struct foo2 { char x; int :0; char y; }; + int s[sizeof (struct foo1) != sizeof (struct foo2) ? 1 : -1]; + }] +} + +# Add to FLAGS all the target-specific flags needed to use thread-local storage. + +proc add_options_for_tls { flags } { + # On Solaris 9, __tls_get_addr/___tls_get_addr only lives in + # libthread, so always pass -pthread for native TLS. Same for AIX. + # Need to duplicate native TLS check from + # check_effective_target_tls_native to avoid recursion. + if { ([istarget *-*-solaris2.9*] || [istarget powerpc-ibm-aix*]) && + [check_no_messages_and_pattern tls_native "!emutls" assembly { + __thread int i; + int f (void) { return i; } + void g (int j) { i = j; } + }] } { + return "$flags -pthread" + } + return $flags +} + +# Return 1 if thread local storage (TLS) is supported, 0 otherwise. + +proc check_effective_target_tls {} { + return [check_no_compiler_messages tls assembly { + __thread int i; + int f (void) { return i; } + void g (int j) { i = j; } + }] +} + +# Return 1 if *native* thread local storage (TLS) is supported, 0 otherwise. + +proc check_effective_target_tls_native {} { + # VxWorks uses emulated TLS machinery, but with non-standard helper + # functions, so we fail to automatically detect it. + if { [istarget *-*-vxworks*] } { + return 0 + } + + return [check_no_messages_and_pattern tls_native "!emutls" assembly { + __thread int i; + int f (void) { return i; } + void g (int j) { i = j; } + }] +} + +# Return 1 if *emulated* thread local storage (TLS) is supported, 0 otherwise. + +proc check_effective_target_tls_emulated {} { + # VxWorks uses emulated TLS machinery, but with non-standard helper + # functions, so we fail to automatically detect it. + if { [istarget *-*-vxworks*] } { + return 1 + } + + return [check_no_messages_and_pattern tls_emulated "emutls" assembly { + __thread int i; + int f (void) { return i; } + void g (int j) { i = j; } + }] +} + +# Return 1 if TLS executables can run correctly, 0 otherwise. + +proc check_effective_target_tls_runtime {} { + # MSP430 runtime does not have TLS support, but just + # running the test below is insufficient to show this. + if { [istarget msp430-*-*] } { + return 0 + } + return [check_runtime tls_runtime { + __thread int thr = 0; + int main (void) { return thr; } + } [add_options_for_tls ""]] +} + +# Return 1 if atomic compare-and-swap is supported on 'int' + +proc check_effective_target_cas_char {} { + return [check_no_compiler_messages cas_char assembly { + #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 + #error unsupported + #endif + } ""] +} + +proc check_effective_target_cas_int {} { + return [check_no_compiler_messages cas_int assembly { + #if __INT_MAX__ == 0x7fff && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2 + /* ok */ + #elif __INT_MAX__ == 0x7fffffff && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 + /* ok */ + #else + #error unsupported + #endif + } ""] +} + +# Return 1 if -ffunction-sections is supported, 0 otherwise. + +proc check_effective_target_function_sections {} { + # Darwin has its own scheme and silently accepts -ffunction-sections. + if { [istarget *-*-darwin*] } { + return 0 + } + + return [check_no_compiler_messages functionsections assembly { + void foo (void) { } + } "-ffunction-sections"] +} + +# Return 1 if instruction scheduling is available, 0 otherwise. + +proc check_effective_target_scheduling {} { + return [check_no_compiler_messages scheduling object { + void foo (void) { } + } "-fschedule-insns"] +} + +# Return 1 if trapping arithmetic is available, 0 otherwise. + +proc check_effective_target_trapping {} { + return [check_no_compiler_messages scheduling object { + add (int a, int b) { return a + b; } + } "-ftrapv"] +} + +# Return 1 if compilation with -fgraphite is error-free for trivial +# code, 0 otherwise. + +proc check_effective_target_fgraphite {} { + return [check_no_compiler_messages fgraphite object { + void foo (void) { } + } "-O1 -fgraphite"] +} + +# Return 1 if compilation with -fopenmp is error-free for trivial +# code, 0 otherwise. + +proc check_effective_target_fopenmp {} { + return [check_no_compiler_messages fopenmp object { + void foo (void) { } + } "-fopenmp"] +} + +# Return 1 if compilation with -fgnu-tm is error-free for trivial +# code, 0 otherwise. + +proc check_effective_target_fgnu_tm {} { + return [check_no_compiler_messages fgnu_tm object { + void foo (void) { } + } "-fgnu-tm"] +} + +# Return 1 if the target supports mmap, 0 otherwise. + +proc check_effective_target_mmap {} { + return [check_function_available "mmap"] +} + +# Return 1 if the target supports dlopen, 0 otherwise. +proc check_effective_target_dlopen {} { + return [check_function_available "dlopen"] +} + +# Return 1 if the target supports clone, 0 otherwise. +proc check_effective_target_clone {} { + return [check_function_available "clone"] +} + +# Return 1 if the target supports setrlimit, 0 otherwise. +proc check_effective_target_setrlimit {} { + # Darwin has non-posix compliant RLIMIT_AS + if { [istarget *-*-darwin*] } { + return 0 + } + return [check_function_available "setrlimit"] +} + +# Return 1 if the target supports swapcontext, 0 otherwise. +proc check_effective_target_swapcontext {} { + return [check_no_compiler_messages swapcontext executable { + #include + int main (void) + { + ucontext_t orig_context,child_context; + if (swapcontext(&child_context, &orig_context) < 0) { } + } + }] +} + +# Return 1 if compilation with -pthread is error-free for trivial +# code, 0 otherwise. + +proc check_effective_target_pthread {} { + return [check_no_compiler_messages pthread object { + void foo (void) { } + } "-pthread"] +} + +# Return 1 if compilation with -mpe-aligned-commons is error-free +# for trivial code, 0 otherwise. + +proc check_effective_target_pe_aligned_commons {} { + if { [istarget *-*-cygwin*] || [istarget *-*-mingw*] } { + return [check_no_compiler_messages pe_aligned_commons object { + int foo; + } "-mpe-aligned-commons"] + } + return 0 +} + +# Return 1 if the target supports -static +proc check_effective_target_static {} { + return [check_no_compiler_messages static executable { + int main (void) { return 0; } + } "-static"] +} + +# Return 1 if the target supports -fstack-protector +proc check_effective_target_fstack_protector {} { + return [check_runtime fstack_protector { + int main (void) { return 0; } + } "-fstack-protector"] +} + +# Return 1 if compilation with -freorder-blocks-and-partition is error-free +# for trivial code, 0 otherwise. + +proc check_effective_target_freorder {} { + return [check_no_compiler_messages freorder object { + void foo (void) { } + } "-freorder-blocks-and-partition"] +} + +# Return 1 if -fpic and -fPIC are supported, as in no warnings or errors +# emitted, 0 otherwise. Whether a shared library can actually be built is +# out of scope for this test. + +proc check_effective_target_fpic { } { + # Note that M68K has a multilib that supports -fpic but not + # -fPIC, so we need to check both. We test with a program that + # requires GOT references. + foreach arg {fpic fPIC} { + if [check_no_compiler_messages $arg object { + extern int foo (void); extern int bar; + int baz (void) { return foo () + bar; } + } "-$arg"] { + return 1 + } + } + return 0 +} + +# Return 1 if -pie, -fpie and -fPIE are supported, 0 otherwise. + +proc check_effective_target_pie { } { + if { [istarget *-*-darwin\[912\]*] + || [istarget *-*-linux*] + || [istarget *-*-gnu*] } { + return 1; + } + return 0 +} + +# Return true if the target supports -mpaired-single (as used on MIPS). + +proc check_effective_target_mpaired_single { } { + return [check_no_compiler_messages mpaired_single object { + void foo (void) { } + } "-mpaired-single"] +} + +# Return true if the target has access to FPU instructions. + +proc check_effective_target_hard_float { } { + if { [istarget mips*-*-*] } { + return [check_no_compiler_messages hard_float assembly { + #if (defined __mips_soft_float || defined __mips16) + #error FOO + #endif + }] + } + + # This proc is actually checking the availabilty of FPU + # support for doubles, so on the RX we must fail if the + # 64-bit double multilib has been selected. + if { [istarget rx-*-*] } { + return 0 + # return [check_no_compiler_messages hard_float assembly { + #if defined __RX_64_BIT_DOUBLES__ + #error FOO + #endif + # }] + } + + # The generic test equates hard_float with "no call for adding doubles". + return [check_no_messages_and_pattern hard_float "!\\(call" rtl-expand { + double a (double b, double c) { return b + c; } + }] +} + +# Return true if the target is a 64-bit MIPS target. + +proc check_effective_target_mips64 { } { + return [check_no_compiler_messages mips64 assembly { + #ifndef __mips64 + #error FOO + #endif + }] +} + +# Return true if the target is a MIPS target that does not produce +# MIPS16 code. + +proc check_effective_target_nomips16 { } { + return [check_no_compiler_messages nomips16 object { + #ifndef __mips + #error FOO + #else + /* A cheap way of testing for -mflip-mips16. */ + void foo (void) { asm ("addiu $20,$20,1"); } + void bar (void) { asm ("addiu $20,$20,1"); } + #endif + }] +} + +# Add the options needed for MIPS16 function attributes. At the moment, +# we don't support MIPS16 PIC. + +proc add_options_for_mips16_attribute { flags } { + return "$flags -mno-abicalls -fno-pic -DMIPS16=__attribute__((mips16))" +} + +# Return true if we can force a mode that allows MIPS16 code generation. +# We don't support MIPS16 PIC, and only support MIPS16 -mhard-float +# for o32 and o64. + +proc check_effective_target_mips16_attribute { } { + return [check_no_compiler_messages mips16_attribute assembly { + #ifdef PIC + #error FOO + #endif + #if defined __mips_hard_float \ + && (!defined _ABIO32 || _MIPS_SIM != _ABIO32) \ + && (!defined _ABIO64 || _MIPS_SIM != _ABIO64) + #error FOO + #endif + } [add_options_for_mips16_attribute ""]] +} + +# Return 1 if the target supports long double larger than double when +# using the new ABI, 0 otherwise. + +proc check_effective_target_mips_newabi_large_long_double { } { + return [check_no_compiler_messages mips_newabi_large_long_double object { + int dummy[sizeof(long double) > sizeof(double) ? 1 : -1]; + } "-mabi=64"] +} + +# Return true if the target is a MIPS target that has access +# to the LL and SC instructions. + +proc check_effective_target_mips_llsc { } { + if { ![istarget mips*-*-*] } { + return 0 + } + # Assume that these instructions are always implemented for + # non-elf* targets, via emulation if necessary. + if { ![istarget *-*-elf*] } { + return 1 + } + # Otherwise assume LL/SC support for everything but MIPS I. + return [check_no_compiler_messages mips_llsc assembly { + #if __mips == 1 + #error FOO + #endif + }] +} + +# Return true if the target is a MIPS target that uses in-place relocations. + +proc check_effective_target_mips_rel { } { + if { ![istarget mips*-*-*] } { + return 0 + } + return [check_no_compiler_messages mips_rel object { + #if (defined _ABIN32 && _MIPS_SIM == _ABIN32) \ + || (defined _ABI64 && _MIPS_SIM == _ABI64) + #error FOO + #endif + }] +} + +# Return true if the target is a MIPS target that uses the EABI. + +proc check_effective_target_mips_eabi { } { + if { ![istarget mips*-*-*] } { + return 0 + } + return [check_no_compiler_messages mips_eabi object { + #ifndef __mips_eabi + #error FOO + #endif + }] +} + +# Return 1 if the current multilib does not generate PIC by default. + +proc check_effective_target_nonpic { } { + return [check_no_compiler_messages nonpic assembly { + #if __PIC__ + #error FOO + #endif + }] +} + +# Return 1 if the target does not use a status wrapper. + +proc check_effective_target_unwrapped { } { + if { [target_info needs_status_wrapper] != "" \ + && [target_info needs_status_wrapper] != "0" } { + return 0 + } + return 1 +} + +# Return true if iconv is supported on the target. In particular IBM1047. + +proc check_iconv_available { test_what } { + global libiconv + + # If the tool configuration file has not set libiconv, try "-liconv" + if { ![info exists libiconv] } { + set libiconv "-liconv" + } + set test_what [lindex $test_what 1] + return [check_runtime_nocache $test_what [subst { + #include + int main (void) + { + iconv_t cd; + + cd = iconv_open ("$test_what", "UTF-8"); + if (cd == (iconv_t) -1) + return 1; + return 0; + } + }] $libiconv] +} + +# Return true if Cilk Library is supported on the target. +proc check_libcilkrts_available { } { + return [ check_no_compiler_messages_nocache libcilkrts_available executable { + #ifdef __cplusplus + extern "C" + #endif + int __cilkrts_set_param (const char *, const char *); + int main (void) { + int x = __cilkrts_set_param ("nworkers", "0"); + return x; + } + } "-fcilkplus -lcilkrts" ] +} + +# Return 1 if an ASCII locale is supported on this host, 0 otherwise. + +proc check_ascii_locale_available { } { + return 1 +} + +# Return true if named sections are supported on this target. + +proc check_named_sections_available { } { + return [check_no_compiler_messages named_sections assembly { + int __attribute__ ((section("whatever"))) foo; + }] +} + +# Return true if the "naked" function attribute is supported on this target. + +proc check_effective_target_naked_functions { } { + return [check_no_compiler_messages naked_functions assembly { + void f() __attribute__((naked)); + }] +} + +# Return 1 if the target supports Fortran real kinds larger than real(8), +# 0 otherwise. +# +# When the target name changes, replace the cached result. + +proc check_effective_target_fortran_large_real { } { + return [check_no_compiler_messages fortran_large_real executable { + ! Fortran + integer,parameter :: k = selected_real_kind (precision (0.0_8) + 1) + real(kind=k) :: x + x = cos (x) + end + }] +} + +# Return 1 if the target supports Fortran real kind real(16), +# 0 otherwise. Contrary to check_effective_target_fortran_large_real +# this checks for Real(16) only; the other returned real(10) if +# both real(10) and real(16) are available. +# +# When the target name changes, replace the cached result. + +proc check_effective_target_fortran_real_16 { } { + return [check_no_compiler_messages fortran_real_16 executable { + ! Fortran + real(kind=16) :: x + x = cos (x) + end + }] +} + + +# Return 1 if the target supports SQRT for the largest floating-point +# type. (Some targets lack the libm support for this FP type.) +# On most targets, this check effectively checks either whether sqrtl is +# available or on __float128 systems whether libquadmath is installed, +# which provides sqrtq. +# +# When the target name changes, replace the cached result. + +proc check_effective_target_fortran_largest_fp_has_sqrt { } { + return [check_no_compiler_messages fortran_largest_fp_has_sqrt executable { + ! Fortran + use iso_fortran_env, only: real_kinds + integer,parameter:: maxFP = real_kinds(ubound(real_kinds,dim=1)) + real(kind=maxFP), volatile :: x + x = 2.0_maxFP + x = sqrt (x) + end + }] +} + + +# Return 1 if the target supports Fortran integer kinds larger than +# integer(8), 0 otherwise. +# +# When the target name changes, replace the cached result. + +proc check_effective_target_fortran_large_int { } { + return [check_no_compiler_messages fortran_large_int executable { + ! Fortran + integer,parameter :: k = selected_int_kind (range (0_8) + 1) + integer(kind=k) :: i + end + }] +} + +# Return 1 if the target supports Fortran integer(16), 0 otherwise. +# +# When the target name changes, replace the cached result. + +proc check_effective_target_fortran_integer_16 { } { + return [check_no_compiler_messages fortran_integer_16 executable { + ! Fortran + integer(16) :: i + end + }] +} + +# Return 1 if we can statically link libgfortran, 0 otherwise. +# +# When the target name changes, replace the cached result. + +proc check_effective_target_static_libgfortran { } { + return [check_no_compiler_messages static_libgfortran executable { + ! Fortran + print *, 'test' + end + } "-static"] +} + +# Return 1 if cilk-plus is supported by the target, 0 otherwise. + +proc check_effective_target_cilkplus { } { + # Skip cilk-plus tests on int16 and size16 targets for now. + # The cilk-plus tests are not generic enough to cover these + # cases and would throw hundreds of FAILs. + if { [check_effective_target_int16] + || ![check_effective_target_size32plus] } { + return 0; + } + + # Skip AVR, its RAM is too small and too many tests would fail. + if { [istarget avr-*-*] } { + return 0; + } + return 1 +} + +proc check_linker_plugin_available { } { + return [check_no_compiler_messages_nocache linker_plugin executable { + int main() { return 0; } + } "-flto -fuse-linker-plugin"] +} + +# Return 1 if the target supports executing 750CL paired-single instructions, 0 +# otherwise. Cache the result. + +proc check_750cl_hw_available { } { + return [check_cached_effective_target 750cl_hw_available { + # If this is not the right target then we can skip the test. + if { ![istarget powerpc-*paired*] } { + expr 0 + } else { + check_runtime_nocache 750cl_hw_available { + int main() + { + #ifdef __MACH__ + asm volatile ("ps_mul v0,v0,v0"); + #else + asm volatile ("ps_mul 0,0,0"); + #endif + return 0; + } + } "-mpaired" + } + }] +} + +# Return 1 if the target OS supports running SSE executables, 0 +# otherwise. Cache the result. + +proc check_sse_os_support_available { } { + return [check_cached_effective_target sse_os_support_available { + # If this is not the right target then we can skip the test. + if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } { + expr 0 + } elseif { [istarget i?86-*-solaris2*] } { + # The Solaris 2 kernel doesn't save and restore SSE registers + # before Solaris 9 4/04. Before that, executables die with SIGILL. + check_runtime_nocache sse_os_support_available { + int main () + { + asm volatile ("movaps %xmm0,%xmm0"); + return 0; + } + } "-msse" + } else { + expr 1 + } + }] +} + +# Return 1 if the target OS supports running AVX executables, 0 +# otherwise. Cache the result. + +proc check_avx_os_support_available { } { + return [check_cached_effective_target avx_os_support_available { + # If this is not the right target then we can skip the test. + if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } { + expr 0 + } else { + # Check that OS has AVX and SSE saving enabled. + check_runtime_nocache avx_os_support_available { + int main () + { + unsigned int eax, edx; + + asm ("xgetbv" : "=a" (eax), "=d" (edx) : "c" (0)); + return (eax & 6) != 6; + } + } "" + } + }] +} + +# Return 1 if the target supports executing SSE instructions, 0 +# otherwise. Cache the result. + +proc check_sse_hw_available { } { + return [check_cached_effective_target sse_hw_available { + # If this is not the right target then we can skip the test. + if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } { + expr 0 + } else { + check_runtime_nocache sse_hw_available { + #include "cpuid.h" + int main () + { + unsigned int eax, ebx, ecx, edx; + if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)) + return !(edx & bit_SSE); + return 1; + } + } "" + } + }] +} + +# Return 1 if the target supports executing SSE2 instructions, 0 +# otherwise. Cache the result. + +proc check_sse2_hw_available { } { + return [check_cached_effective_target sse2_hw_available { + # If this is not the right target then we can skip the test. + if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } { + expr 0 + } else { + check_runtime_nocache sse2_hw_available { + #include "cpuid.h" + int main () + { + unsigned int eax, ebx, ecx, edx; + if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)) + return !(edx & bit_SSE2); + return 1; + } + } "" + } + }] +} + +# Return 1 if the target supports executing AVX instructions, 0 +# otherwise. Cache the result. + +proc check_avx_hw_available { } { + return [check_cached_effective_target avx_hw_available { + # If this is not the right target then we can skip the test. + if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } { + expr 0 + } else { + check_runtime_nocache avx_hw_available { + #include "cpuid.h" + int main () + { + unsigned int eax, ebx, ecx, edx; + if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)) + return ((ecx & (bit_AVX | bit_OSXSAVE)) + != (bit_AVX | bit_OSXSAVE)); + return 1; + } + } "" + } + }] +} + +# Return 1 if the target supports running SSE executables, 0 otherwise. + +proc check_effective_target_sse_runtime { } { + if { [check_effective_target_sse] + && [check_sse_hw_available] + && [check_sse_os_support_available] } { + return 1 + } + return 0 +} + +# Return 1 if the target supports running SSE2 executables, 0 otherwise. + +proc check_effective_target_sse2_runtime { } { + if { [check_effective_target_sse2] + && [check_sse2_hw_available] + && [check_sse_os_support_available] } { + return 1 + } + return 0 +} + +# Return 1 if the target supports running AVX executables, 0 otherwise. + +proc check_effective_target_avx_runtime { } { + if { [check_effective_target_avx] + && [check_avx_hw_available] + && [check_avx_os_support_available] } { + return 1 + } + return 0 +} + +# Return 1 if the target supports executing power8 vector instructions, 0 +# otherwise. Cache the result. + +proc check_p8vector_hw_available { } { + return [check_cached_effective_target p8vector_hw_available { + # Some simulators are known to not support VSX/power8 instructions. + # For now, disable on Darwin + if { [istarget powerpc-*-eabi] || [istarget powerpc*-*-eabispe] || [istarget *-*-darwin*]} { + expr 0 + } else { + set options "-mpower8-vector" + check_runtime_nocache p8vector_hw_available { + int main() + { + #ifdef __MACH__ + asm volatile ("xxlorc vs0,vs0,vs0"); + #else + asm volatile ("xxlorc 0,0,0"); + #endif + return 0; + } + } $options + } + }] +} + +# Return 1 if the target supports executing VSX instructions, 0 +# otherwise. Cache the result. + +proc check_vsx_hw_available { } { + return [check_cached_effective_target vsx_hw_available { + # Some simulators are known to not support VSX instructions. + # For now, disable on Darwin + if { [istarget powerpc-*-eabi] || [istarget powerpc*-*-eabispe] || [istarget *-*-darwin*]} { + expr 0 + } else { + set options "-mvsx" + check_runtime_nocache vsx_hw_available { + int main() + { + #ifdef __MACH__ + asm volatile ("xxlor vs0,vs0,vs0"); + #else + asm volatile ("xxlor 0,0,0"); + #endif + return 0; + } + } $options + } + }] +} + +# Return 1 if the target supports executing AltiVec instructions, 0 +# otherwise. Cache the result. + +proc check_vmx_hw_available { } { + return [check_cached_effective_target vmx_hw_available { + # Some simulators are known to not support VMX instructions. + if { [istarget powerpc-*-eabi] || [istarget powerpc*-*-eabispe] } { + expr 0 + } else { + # Most targets don't require special flags for this test case, but + # Darwin does. Just to be sure, make sure VSX is not enabled for + # the altivec tests. + if { [istarget *-*-darwin*] + || [istarget *-*-aix*] } { + set options "-maltivec -mno-vsx" + } else { + set options "-mno-vsx" + } + check_runtime_nocache vmx_hw_available { + int main() + { + #ifdef __MACH__ + asm volatile ("vor v0,v0,v0"); + #else + asm volatile ("vor 0,0,0"); + #endif + return 0; + } + } $options + } + }] +} + +proc check_ppc_recip_hw_available { } { + return [check_cached_effective_target ppc_recip_hw_available { + # Some simulators may not support FRE/FRES/FRSQRTE/FRSQRTES + # For now, disable on Darwin + if { [istarget powerpc-*-eabi] || [istarget powerpc*-*-eabispe] || [istarget *-*-darwin*]} { + expr 0 + } else { + set options "-mpowerpc-gfxopt -mpowerpc-gpopt -mpopcntb" + check_runtime_nocache ppc_recip_hw_available { + volatile double d_recip, d_rsqrt, d_four = 4.0; + volatile float f_recip, f_rsqrt, f_four = 4.0f; + int main() + { + asm volatile ("fres %0,%1" : "=f" (f_recip) : "f" (f_four)); + asm volatile ("fre %0,%1" : "=d" (d_recip) : "d" (d_four)); + asm volatile ("frsqrtes %0,%1" : "=f" (f_rsqrt) : "f" (f_four)); + asm volatile ("frsqrte %0,%1" : "=f" (d_rsqrt) : "d" (d_four)); + return 0; + } + } $options + } + }] +} + +# Return 1 if the target supports executing AltiVec and Cell PPU +# instructions, 0 otherwise. Cache the result. + +proc check_effective_target_cell_hw { } { + return [check_cached_effective_target cell_hw_available { + # Some simulators are known to not support VMX and PPU instructions. + if { [istarget powerpc-*-eabi*] } { + expr 0 + } else { + # Most targets don't require special flags for this test + # case, but Darwin and AIX do. + if { [istarget *-*-darwin*] + || [istarget *-*-aix*] } { + set options "-maltivec -mcpu=cell" + } else { + set options "-mcpu=cell" + } + check_runtime_nocache cell_hw_available { + int main() + { + #ifdef __MACH__ + asm volatile ("vor v0,v0,v0"); + asm volatile ("lvlx v0,r0,r0"); + #else + asm volatile ("vor 0,0,0"); + asm volatile ("lvlx 0,0,0"); + #endif + return 0; + } + } $options + } + }] +} + +# Return 1 if the target supports executing 64-bit instructions, 0 +# otherwise. Cache the result. + +proc check_effective_target_powerpc64 { } { + global powerpc64_available_saved + global tool + + if [info exists powerpc64_available_saved] { + verbose "check_effective_target_powerpc64 returning saved $powerpc64_available_saved" 2 + } else { + set powerpc64_available_saved 0 + + # Some simulators are known to not support powerpc64 instructions. + if { [istarget powerpc-*-eabi*] || [istarget powerpc-ibm-aix*] } { + verbose "check_effective_target_powerpc64 returning 0" 2 + return $powerpc64_available_saved + } + + # Set up, compile, and execute a test program containing a 64-bit + # instruction. Include the current process ID in the file + # names to prevent conflicts with invocations for multiple + # testsuites. + set src ppc[pid].c + set exe ppc[pid].x + + set f [open $src "w"] + puts $f "int main() {" + puts $f "#ifdef __MACH__" + puts $f " asm volatile (\"extsw r0,r0\");" + puts $f "#else" + puts $f " asm volatile (\"extsw 0,0\");" + puts $f "#endif" + puts $f " return 0; }" + close $f + + set opts "additional_flags=-mcpu=G5" + + verbose "check_effective_target_powerpc64 compiling testfile $src" 2 + set lines [${tool}_target_compile $src $exe executable "$opts"] + file delete $src + + if [string match "" $lines] then { + # No error message, compilation succeeded. + set result [${tool}_load "./$exe" "" ""] + set status [lindex $result 0] + remote_file build delete $exe + verbose "check_effective_target_powerpc64 testfile status is <$status>" 2 + + if { $status == "pass" } then { + set powerpc64_available_saved 1 + } + } else { + verbose "check_effective_target_powerpc64 testfile compilation failed" 2 + } + } + + return $powerpc64_available_saved +} + +# GCC 3.4.0 for powerpc64-*-linux* included an ABI fix for passing +# complex float arguments. This affects gfortran tests that call cabsf +# in libm built by an earlier compiler. Return 1 if libm uses the same +# argument passing as the compiler under test, 0 otherwise. +# +# When the target name changes, replace the cached result. + +proc check_effective_target_broken_cplxf_arg { } { + return [check_cached_effective_target broken_cplxf_arg { + # Skip the work for targets known not to be affected. + if { ![istarget powerpc64-*-linux*] } { + expr 0 + } elseif { ![is-effective-target lp64] } { + expr 0 + } else { + check_runtime_nocache broken_cplxf_arg { + #include + extern void abort (void); + float fabsf (float); + float cabsf (_Complex float); + int main () + { + _Complex float cf; + float f; + cf = 3 + 4.0fi; + f = cabsf (cf); + if (fabsf (f - 5.0) > 0.0001) + abort (); + return 0; + } + } "-lm" + } + }] +} + +# Return 1 is this is a TI C6X target supporting C67X instructions +proc check_effective_target_ti_c67x { } { + return [check_no_compiler_messages ti_c67x assembly { + #if !defined(_TMS320C6700) + #error FOO + #endif + }] +} + +# Return 1 is this is a TI C6X target supporting C64X+ instructions +proc check_effective_target_ti_c64xp { } { + return [check_no_compiler_messages ti_c64xp assembly { + #if !defined(_TMS320C6400_PLUS) + #error FOO + #endif + }] +} + + +proc check_alpha_max_hw_available { } { + return [check_runtime alpha_max_hw_available { + int main() { return __builtin_alpha_amask(1<<8) != 0; } + }] +} + +# Returns true iff the FUNCTION is available on the target system. +# (This is essentially a Tcl implementation of Autoconf's +# AC_CHECK_FUNC.) + +proc check_function_available { function } { + return [check_no_compiler_messages ${function}_available \ + executable [subst { + #ifdef __cplusplus + extern "C" + #endif + char $function (); + int main () { $function (); } + }] "-fno-builtin" ] +} + +# Returns true iff "fork" is available on the target system. + +proc check_fork_available {} { + return [check_function_available "fork"] +} + +# Returns true iff "mkfifo" is available on the target system. + +proc check_mkfifo_available {} { + if { [istarget *-*-cygwin*] } { + # Cygwin has mkfifo, but support is incomplete. + return 0 + } + + return [check_function_available "mkfifo"] +} + +# Returns true iff "__cxa_atexit" is used on the target system. + +proc check_cxa_atexit_available { } { + return [check_cached_effective_target cxa_atexit_available { + if { [istarget hppa*-*-hpux10*] } { + # HP-UX 10 doesn't have __cxa_atexit but subsequent test passes. + expr 0 + } elseif { [istarget *-*-vxworks] } { + # vxworks doesn't have __cxa_atexit but subsequent test passes. + expr 0 + } else { + check_runtime_nocache cxa_atexit_available { + // C++ + #include + static unsigned int count; + struct X + { + X() { count = 1; } + ~X() + { + if (count != 3) + exit(1); + count = 4; + } + }; + void f() + { + static X x; + } + struct Y + { + Y() { f(); count = 2; } + ~Y() + { + if (count != 2) + exit(1); + count = 3; + } + }; + Y y; + int main() { return 0; } + } + } + }] +} + +proc check_effective_target_objc2 { } { + return [check_no_compiler_messages objc2 object { + #ifdef __OBJC2__ + int dummy[1]; + #else + #error + #endif + }] +} + +proc check_effective_target_next_runtime { } { + return [check_no_compiler_messages objc2 object { + #ifdef __NEXT_RUNTIME__ + int dummy[1]; + #else + #error + #endif + }] +} + +# Return 1 if we're generating 32-bit code using default options, 0 +# otherwise. + +proc check_effective_target_ilp32 { } { + return [check_no_compiler_messages ilp32 object { + int dummy[sizeof (int) == 4 + && sizeof (void *) == 4 + && sizeof (long) == 4 ? 1 : -1]; + }] +} + +# Return 1 if we're generating ia32 code using default options, 0 +# otherwise. + +proc check_effective_target_ia32 { } { + return [check_no_compiler_messages ia32 object { + int dummy[sizeof (int) == 4 + && sizeof (void *) == 4 + && sizeof (long) == 4 ? 1 : -1] = { __i386__ }; + }] +} + +# Return 1 if we're generating x32 code using default options, 0 +# otherwise. + +proc check_effective_target_x32 { } { + return [check_no_compiler_messages x32 object { + int dummy[sizeof (int) == 4 + && sizeof (void *) == 4 + && sizeof (long) == 4 ? 1 : -1] = { __x86_64__ }; + }] +} + +# Return 1 if we're generating 32-bit integers using default +# options, 0 otherwise. + +proc check_effective_target_int32 { } { + return [check_no_compiler_messages int32 object { + int dummy[sizeof (int) == 4 ? 1 : -1]; + }] +} + +# Return 1 if we're generating 32-bit or larger integers using default +# options, 0 otherwise. + +proc check_effective_target_int32plus { } { + return [check_no_compiler_messages int32plus object { + int dummy[sizeof (int) >= 4 ? 1 : -1]; + }] +} + +# Return 1 if we're generating 32-bit or larger pointers using default +# options, 0 otherwise. + +proc check_effective_target_ptr32plus { } { + # The msp430 has 16-bit or 20-bit pointers. The 20-bit pointer is stored + # in a 32-bit slot when in memory, so sizeof(void *) returns 4, but it + # cannot really hold a 32-bit address, so we always return false here. + if { [istarget msp430-*-*] } { + return 0 + } + + return [check_no_compiler_messages ptr32plus object { + int dummy[sizeof (void *) >= 4 ? 1 : -1]; + }] +} + +# Return 1 if we support 32-bit or larger array and structure sizes +# using default options, 0 otherwise. + +proc check_effective_target_size32plus { } { + return [check_no_compiler_messages size32plus object { + char dummy[65537]; + }] +} + +# Returns 1 if we're generating 16-bit or smaller integers with the +# default options, 0 otherwise. + +proc check_effective_target_int16 { } { + return [check_no_compiler_messages int16 object { + int dummy[sizeof (int) < 4 ? 1 : -1]; + }] +} + +# Return 1 if we're generating 64-bit code using default options, 0 +# otherwise. + +proc check_effective_target_lp64 { } { + return [check_no_compiler_messages lp64 object { + int dummy[sizeof (int) == 4 + && sizeof (void *) == 8 + && sizeof (long) == 8 ? 1 : -1]; + }] +} + +# Return 1 if we're generating 64-bit code using default llp64 options, +# 0 otherwise. + +proc check_effective_target_llp64 { } { + return [check_no_compiler_messages llp64 object { + int dummy[sizeof (int) == 4 + && sizeof (void *) == 8 + && sizeof (long long) == 8 + && sizeof (long) == 4 ? 1 : -1]; + }] +} + +# Return 1 if long and int have different sizes, +# 0 otherwise. + +proc check_effective_target_long_neq_int { } { + return [check_no_compiler_messages long_ne_int object { + int dummy[sizeof (int) != sizeof (long) ? 1 : -1]; + }] +} + +# Return 1 if the target supports long double larger than double, +# 0 otherwise. + +proc check_effective_target_large_long_double { } { + return [check_no_compiler_messages large_long_double object { + int dummy[sizeof(long double) > sizeof(double) ? 1 : -1]; + }] +} + +# Return 1 if the target supports double larger than float, +# 0 otherwise. + +proc check_effective_target_large_double { } { + return [check_no_compiler_messages large_double object { + int dummy[sizeof(double) > sizeof(float) ? 1 : -1]; + }] +} + +# Return 1 if the target supports double of 64 bits, +# 0 otherwise. + +proc check_effective_target_double64 { } { + return [check_no_compiler_messages double64 object { + int dummy[sizeof(double) == 8 ? 1 : -1]; + }] +} + +# Return 1 if the target supports double of at least 64 bits, +# 0 otherwise. + +proc check_effective_target_double64plus { } { + return [check_no_compiler_messages double64plus object { + int dummy[sizeof(double) >= 8 ? 1 : -1]; + }] +} + +# Return 1 if the target supports 'w' suffix on floating constant +# 0 otherwise. + +proc check_effective_target_has_w_floating_suffix { } { + set opts "" + if [check_effective_target_c++] { + append opts "-std=gnu++03" + } + return [check_no_compiler_messages w_fp_suffix object { + float dummy = 1.0w; + } "$opts"] +} + +# Return 1 if the target supports 'q' suffix on floating constant +# 0 otherwise. + +proc check_effective_target_has_q_floating_suffix { } { + set opts "" + if [check_effective_target_c++] { + append opts "-std=gnu++03" + } + return [check_no_compiler_messages q_fp_suffix object { + float dummy = 1.0q; + } "$opts"] +} +# Return 1 if the target supports compiling fixed-point, +# 0 otherwise. + +proc check_effective_target_fixed_point { } { + return [check_no_compiler_messages fixed_point object { + _Sat _Fract x; _Sat _Accum y; + }] +} + +# Return 1 if the target supports compiling decimal floating point, +# 0 otherwise. + +proc check_effective_target_dfp_nocache { } { + verbose "check_effective_target_dfp_nocache: compiling source" 2 + set ret [check_no_compiler_messages_nocache dfp object { + float x __attribute__((mode(DD))); + }] + verbose "check_effective_target_dfp_nocache: returning $ret" 2 + return $ret +} + +proc check_effective_target_dfprt_nocache { } { + return [check_runtime_nocache dfprt { + typedef float d64 __attribute__((mode(DD))); + d64 x = 1.2df, y = 2.3dd, z; + int main () { z = x + y; return 0; } + }] +} + +# Return 1 if the target supports compiling Decimal Floating Point, +# 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_dfp { } { + return [check_cached_effective_target dfp { + check_effective_target_dfp_nocache + }] +} + +# Return 1 if the target supports linking and executing Decimal Floating +# Point, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_dfprt { } { + return [check_cached_effective_target dfprt { + check_effective_target_dfprt_nocache + }] +} + +# Return 1 if the target supports compiling and assembling UCN, 0 otherwise. + +proc check_effective_target_ucn_nocache { } { + # -std=c99 is only valid for C + if [check_effective_target_c] { + set ucnopts "-std=c99" + } + append ucnopts " -fextended-identifiers" + verbose "check_effective_target_ucn_nocache: compiling source" 2 + set ret [check_no_compiler_messages_nocache ucn object { + int \u00C0; + } $ucnopts] + verbose "check_effective_target_ucn_nocache: returning $ret" 2 + return $ret +} + +# Return 1 if the target supports compiling and assembling UCN, 0 otherwise. +# +# This won't change for different subtargets, so cache the result. + +proc check_effective_target_ucn { } { + return [check_cached_effective_target ucn { + check_effective_target_ucn_nocache + }] +} + +# Return 1 if the target needs a command line argument to enable a SIMD +# instruction set. + +proc check_effective_target_vect_cmdline_needed { } { + global et_vect_cmdline_needed_saved + global et_vect_cmdline_needed_target_name + + if { ![info exists et_vect_cmdline_needed_target_name] } { + set et_vect_cmdline_needed_target_name "" + } + + # If the target has changed since we set the cached value, clear it. + set current_target [current_target_name] + if { $current_target != $et_vect_cmdline_needed_target_name } { + verbose "check_effective_target_vect_cmdline_needed: `$et_vect_cmdline_needed_target_name' `$current_target'" 2 + set et_vect_cmdline_needed_target_name $current_target + if { [info exists et_vect_cmdline_needed_saved] } { + verbose "check_effective_target_vect_cmdline_needed: removing cached result" 2 + unset et_vect_cmdline_needed_saved + } + } + + if [info exists et_vect_cmdline_needed_saved] { + verbose "check_effective_target_vect_cmdline_needed: using cached result" 2 + } else { + set et_vect_cmdline_needed_saved 1 + if { [istarget alpha*-*-*] + || [istarget ia64-*-*] + || (([istarget x86_64-*-*] || [istarget i?86-*-*]) + && ([check_effective_target_x32] + || [check_effective_target_lp64])) + || ([istarget powerpc*-*-*] + && ([check_effective_target_powerpc_spe] + || [check_effective_target_powerpc_altivec])) + || ([istarget sparc*-*-*] && [check_effective_target_sparc_vis]) + || [istarget spu-*-*] + || ([istarget arm*-*-*] && [check_effective_target_arm_neon]) + || [istarget aarch64*-*-*] } { + set et_vect_cmdline_needed_saved 0 + } + } + + verbose "check_effective_target_vect_cmdline_needed: returning $et_vect_cmdline_needed_saved" 2 + return $et_vect_cmdline_needed_saved +} + +# Return 1 if the target supports hardware vectors of int, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_int { } { + global et_vect_int_saved + + if [info exists et_vect_int_saved] { + verbose "check_effective_target_vect_int: using cached result" 2 + } else { + set et_vect_int_saved 0 + if { [istarget i?86-*-*] + || ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || [istarget spu-*-*] + || [istarget x86_64-*-*] + || [istarget sparc*-*-*] + || [istarget alpha*-*-*] + || [istarget ia64-*-*] + || [istarget aarch64*-*-*] + || [check_effective_target_arm32] + || ([istarget mips*-*-*] + && [check_effective_target_mips_loongson]) } { + set et_vect_int_saved 1 + } + } + + verbose "check_effective_target_vect_int: returning $et_vect_int_saved" 2 + return $et_vect_int_saved +} + +# Return 1 if the target supports signed int->float conversion +# + +proc check_effective_target_vect_intfloat_cvt { } { + global et_vect_intfloat_cvt_saved + + if [info exists et_vect_intfloat_cvt_saved] { + verbose "check_effective_target_vect_intfloat_cvt: using cached result" 2 + } else { + set et_vect_intfloat_cvt_saved 0 + if { [istarget i?86-*-*] + || ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || [istarget x86_64-*-*] + || ([istarget arm*-*-*] + && [check_effective_target_arm_neon_ok])} { + set et_vect_intfloat_cvt_saved 1 + } + } + + verbose "check_effective_target_vect_intfloat_cvt: returning $et_vect_intfloat_cvt_saved" 2 + return $et_vect_intfloat_cvt_saved +} + +#Return 1 if we're supporting __int128 for target, 0 otherwise. + +proc check_effective_target_int128 { } { + return [check_no_compiler_messages int128 object { + int dummy[ + #ifndef __SIZEOF_INT128__ + -1 + #else + 1 + #endif + ]; + }] +} + +# Return 1 if the target supports unsigned int->float conversion +# + +proc check_effective_target_vect_uintfloat_cvt { } { + global et_vect_uintfloat_cvt_saved + + if [info exists et_vect_uintfloat_cvt_saved] { + verbose "check_effective_target_vect_uintfloat_cvt: using cached result" 2 + } else { + set et_vect_uintfloat_cvt_saved 0 + if { [istarget i?86-*-*] + || ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || [istarget x86_64-*-*] + || [istarget aarch64*-*-*] + || ([istarget arm*-*-*] + && [check_effective_target_arm_neon_ok])} { + set et_vect_uintfloat_cvt_saved 1 + } + } + + verbose "check_effective_target_vect_uintfloat_cvt: returning $et_vect_uintfloat_cvt_saved" 2 + return $et_vect_uintfloat_cvt_saved +} + + +# Return 1 if the target supports signed float->int conversion +# + +proc check_effective_target_vect_floatint_cvt { } { + global et_vect_floatint_cvt_saved + + if [info exists et_vect_floatint_cvt_saved] { + verbose "check_effective_target_vect_floatint_cvt: using cached result" 2 + } else { + set et_vect_floatint_cvt_saved 0 + if { [istarget i?86-*-*] + || ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || [istarget x86_64-*-*] + || ([istarget arm*-*-*] + && [check_effective_target_arm_neon_ok])} { + set et_vect_floatint_cvt_saved 1 + } + } + + verbose "check_effective_target_vect_floatint_cvt: returning $et_vect_floatint_cvt_saved" 2 + return $et_vect_floatint_cvt_saved +} + +# Return 1 if the target supports unsigned float->int conversion +# + +proc check_effective_target_vect_floatuint_cvt { } { + global et_vect_floatuint_cvt_saved + + if [info exists et_vect_floatuint_cvt_saved] { + verbose "check_effective_target_vect_floatuint_cvt: using cached result" 2 + } else { + set et_vect_floatuint_cvt_saved 0 + if { ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || ([istarget arm*-*-*] + && [check_effective_target_arm_neon_ok])} { + set et_vect_floatuint_cvt_saved 1 + } + } + + verbose "check_effective_target_vect_floatuint_cvt: returning $et_vect_floatuint_cvt_saved" 2 + return $et_vect_floatuint_cvt_saved +} + +# Return 1 if the target supports #pragma omp declare simd, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_simd_clones { } { + global et_vect_simd_clones_saved + + if [info exists et_vect_simd_clones_saved] { + verbose "check_effective_target_vect_simd_clones: using cached result" 2 + } else { + set et_vect_simd_clones_saved 0 + if { [istarget i?86-*-*] || [istarget x86_64-*-*] } { + # On i?86/x86_64 #pragma omp declare simd builds a sse2, avx and + # avx2 clone. Only the right clone for the specified arch will be + # chosen, but still we need to at least be able to assemble + # avx2. + if { [check_effective_target_avx2] } { + set et_vect_simd_clones_saved 1 + } + } + } + + verbose "check_effective_target_vect_simd_clones: returning $et_vect_simd_clones_saved" 2 + return $et_vect_simd_clones_saved +} + +# Return 1 if this is a AArch64 target supporting big endian +proc check_effective_target_aarch64_big_endian { } { + return [check_no_compiler_messages aarch64_big_endian assembly { + #if !defined(__aarch64__) || !defined(__AARCH64EB__) + #error FOO + #endif + }] +} + +# Return 1 if this is a AArch64 target supporting little endian +proc check_effective_target_aarch64_little_endian { } { + return [check_no_compiler_messages aarch64_little_endian assembly { + #if !defined(__aarch64__) || defined(__AARCH64EB__) + #error FOO + #endif + }] +} + +# Return 1 is this is an arm target using 32-bit instructions +proc check_effective_target_arm32 { } { + return [check_no_compiler_messages arm32 assembly { + #if !defined(__arm__) || (defined(__thumb__) && !defined(__thumb2__)) + #error FOO + #endif + }] +} + +# Return 1 is this is an arm target not using Thumb +proc check_effective_target_arm_nothumb { } { + return [check_no_compiler_messages arm_nothumb assembly { + #if (defined(__thumb__) || defined(__thumb2__)) + #error FOO + #endif + }] +} + +# Return 1 if this is a little-endian ARM target +proc check_effective_target_arm_little_endian { } { + return [check_no_compiler_messages arm_little_endian assembly { + #if !defined(__arm__) || !defined(__ARMEL__) + #error FOO + #endif + }] +} + +# Return 1 if this is an ARM target that only supports aligned vector accesses +proc check_effective_target_arm_vect_no_misalign { } { + return [check_no_compiler_messages arm_vect_no_misalign assembly { + #if !defined(__arm__) \ + || (defined(__ARMEL__) \ + && (!defined(__thumb__) || defined(__thumb2__))) + #error FOO + #endif + }] +} + + +# Return 1 if this is an ARM target supporting -mfpu=vfp +# -mfloat-abi=softfp. Some multilibs may be incompatible with these +# options. + +proc check_effective_target_arm_vfp_ok { } { + if { [check_effective_target_arm32] } { + return [check_no_compiler_messages arm_vfp_ok object { + int dummy; + } "-mfpu=vfp -mfloat-abi=softfp"] + } else { + return 0 + } +} + +# Return 1 if this is an ARM target supporting -mfpu=vfp3 +# -mfloat-abi=softfp. + +proc check_effective_target_arm_vfp3_ok { } { + if { [check_effective_target_arm32] } { + return [check_no_compiler_messages arm_vfp3_ok object { + int dummy; + } "-mfpu=vfp3 -mfloat-abi=softfp"] + } else { + return 0 + } +} + +# Return 1 if this is an ARM target supporting -mfpu=fp-armv8 +# -mfloat-abi=softfp. +proc check_effective_target_arm_v8_vfp_ok {} { + if { [check_effective_target_arm32] } { + return [check_no_compiler_messages arm_v8_vfp_ok object { + int foo (void) + { + __asm__ volatile ("vrinta.f32.f32 s0, s0"); + return 0; + } + } "-mfpu=fp-armv8 -mfloat-abi=softfp"] + } else { + return 0 + } +} + +# Return 1 if this is an ARM target supporting -mfpu=vfp +# -mfloat-abi=hard. Some multilibs may be incompatible with these +# options. + +proc check_effective_target_arm_hard_vfp_ok { } { + if { [check_effective_target_arm32] + && ! [check-flags [list "" { *-*-* } { "-mfloat-abi=*" } { "-mfloat-abi=hard" }]] } { + return [check_no_compiler_messages arm_hard_vfp_ok executable { + int main() { return 0;} + } "-mfpu=vfp -mfloat-abi=hard"] + } else { + return 0 + } +} + +# Return 1 if this is an ARM target that supports DSP multiply with +# current multilib flags. + +proc check_effective_target_arm_dsp { } { + return [check_no_compiler_messages arm_dsp assembly { + #ifndef __ARM_FEATURE_DSP + #error not DSP + #endif + int i; + }] +} + +# Return 1 if this is an ARM target that supports unaligned word/halfword +# load/store instructions. + +proc check_effective_target_arm_unaligned { } { + return [check_no_compiler_messages arm_unaligned assembly { + #ifndef __ARM_FEATURE_UNALIGNED + #error no unaligned support + #endif + int i; + }] +} + +# Return 1 if this is an ARM target supporting -mfpu=crypto-neon-fp-armv8 +# -mfloat-abi=softfp or equivalent options. Some multilibs may be +# incompatible with these options. Also set et_arm_crypto_flags to the +# best options to add. + +proc check_effective_target_arm_crypto_ok_nocache { } { + global et_arm_crypto_flags + set et_arm_crypto_flags "" + if { [check_effective_target_arm32] } { + foreach flags {"" "-mfloat-abi=softfp" "-mfpu=crypto-neon-fp-armv8" "-mfpu=crypto-neon-fp-armv8 -mfloat-abi=softfp"} { + if { [check_no_compiler_messages_nocache arm_crypto_ok object { + #include "arm_neon.h" + uint8x16_t + foo (uint8x16_t a, uint8x16_t b) + { + return vaeseq_u8 (a, b); + } + } "$flags"] } { + set et_arm_crypto_flags $flags + return 1 + } + } + } + + return 0 +} + +# Return 1 if this is an ARM target supporting -mfpu=crypto-neon-fp-armv8 + +proc check_effective_target_arm_crypto_ok { } { + return [check_cached_effective_target arm_crypto_ok \ + check_effective_target_arm_crypto_ok_nocache] +} + +# Add options for crypto extensions. +proc add_options_for_arm_crypto { flags } { + if { ! [check_effective_target_arm_crypto_ok] } { + return "$flags" + } + global et_arm_crypto_flags + return "$flags $et_arm_crypto_flags" +} + +# Add the options needed for NEON. We need either -mfloat-abi=softfp +# or -mfloat-abi=hard, but if one is already specified by the +# multilib, use it. Similarly, if a -mfpu option already enables +# NEON, do not add -mfpu=neon. + +proc add_options_for_arm_neon { flags } { + if { ! [check_effective_target_arm_neon_ok] } { + return "$flags" + } + global et_arm_neon_flags + return "$flags $et_arm_neon_flags" +} + +proc add_options_for_arm_v8_vfp { flags } { + if { ! [check_effective_target_arm_v8_vfp_ok] } { + return "$flags" + } + return "$flags -mfpu=fp-armv8 -mfloat-abi=softfp" +} + +proc add_options_for_arm_v8_neon { flags } { + if { ! [check_effective_target_arm_v8_neon_ok] } { + return "$flags" + } + global et_arm_v8_neon_flags + return "$flags $et_arm_v8_neon_flags -march=armv8-a" +} + +proc add_options_for_arm_crc { flags } { + if { ! [check_effective_target_arm_crc_ok] } { + return "$flags" + } + global et_arm_crc_flags + return "$flags $et_arm_crc_flags" +} + +# Add the options needed for NEON. We need either -mfloat-abi=softfp +# or -mfloat-abi=hard, but if one is already specified by the +# multilib, use it. Similarly, if a -mfpu option already enables +# NEON, do not add -mfpu=neon. + +proc add_options_for_arm_neonv2 { flags } { + if { ! [check_effective_target_arm_neonv2_ok] } { + return "$flags" + } + global et_arm_neonv2_flags + return "$flags $et_arm_neonv2_flags" +} + +# Add the options needed for vfp3. +proc add_options_for_arm_vfp3 { flags } { + if { ! [check_effective_target_arm_vfp3_ok] } { + return "$flags" + } + return "$flags -mfpu=vfp3 -mfloat-abi=softfp" +} + +# Return 1 if this is an ARM target supporting -mfpu=neon +# -mfloat-abi=softfp or equivalent options. Some multilibs may be +# incompatible with these options. Also set et_arm_neon_flags to the +# best options to add. + +proc check_effective_target_arm_neon_ok_nocache { } { + global et_arm_neon_flags + set et_arm_neon_flags "" + if { [check_effective_target_arm32] } { + foreach flags {"" "-mfloat-abi=softfp" "-mfpu=neon" "-mfpu=neon -mfloat-abi=softfp"} { + if { [check_no_compiler_messages_nocache arm_neon_ok object { + #include "arm_neon.h" + int dummy; + } "$flags"] } { + set et_arm_neon_flags $flags + return 1 + } + } + } + + return 0 +} + +proc check_effective_target_arm_neon_ok { } { + return [check_cached_effective_target arm_neon_ok \ + check_effective_target_arm_neon_ok_nocache] +} + +proc check_effective_target_arm_crc_ok_nocache { } { + global et_arm_crc_flags + set et_arm_crc_flags "-march=armv8-a+crc" + return [check_no_compiler_messages_nocache arm_crc_ok object { + #if !defined (__ARM_FEATURE_CRC32) + #error FOO + #endif + } "$et_arm_crc_flags"] +} + +proc check_effective_target_arm_crc_ok { } { + return [check_cached_effective_target arm_crc_ok \ + check_effective_target_arm_crc_ok_nocache] +} + +# Return 1 if this is an ARM target supporting -mfpu=neon-fp16 +# -mfloat-abi=softfp or equivalent options. Some multilibs may be +# incompatible with these options. Also set et_arm_neon_flags to the +# best options to add. + +proc check_effective_target_arm_neon_fp16_ok_nocache { } { + global et_arm_neon_fp16_flags + set et_arm_neon_fp16_flags "" + if { [check_effective_target_arm32] } { + foreach flags {"" "-mfloat-abi=softfp" "-mfpu=neon-fp16" + "-mfpu=neon-fp16 -mfloat-abi=softfp"} { + if { [check_no_compiler_messages_nocache arm_neon_fp_16_ok object { + #include "arm_neon.h" + float16x4_t + foo (float32x4_t arg) + { + return vcvt_f16_f32 (arg); + } + } "$flags"] } { + set et_arm_neon_fp16_flags $flags + return 1 + } + } + } + + return 0 +} + +proc check_effective_target_arm_neon_fp16_ok { } { + return [check_cached_effective_target arm_neon_fp16_ok \ + check_effective_target_arm_neon_fp16_ok_nocache] +} + +proc add_options_for_arm_neon_fp16 { flags } { + if { ! [check_effective_target_arm_neon_fp16_ok] } { + return "$flags" + } + global et_arm_neon_fp16_flags + return "$flags $et_arm_neon_fp16_flags" +} + +# Return 1 if this is an ARM target supporting -mfpu=neon-fp-armv8 +# -mfloat-abi=softfp or equivalent options. Some multilibs may be +# incompatible with these options. Also set et_arm_v8_neon_flags to the +# best options to add. + +proc check_effective_target_arm_v8_neon_ok_nocache { } { + global et_arm_v8_neon_flags + set et_arm_v8_neon_flags "" + if { [check_effective_target_arm32] } { + foreach flags {"" "-mfloat-abi=softfp" "-mfpu=neon-fp-armv8" "-mfpu=neon-fp-armv8 -mfloat-abi=softfp"} { + if { [check_no_compiler_messages_nocache arm_v8_neon_ok object { + #include "arm_neon.h" + void + foo () + { + __asm__ volatile ("vrintn.f32 q0, q0"); + } + } "$flags"] } { + set et_arm_v8_neon_flags $flags + return 1 + } + } + } + + return 0 +} + +proc check_effective_target_arm_v8_neon_ok { } { + return [check_cached_effective_target arm_v8_neon_ok \ + check_effective_target_arm_v8_neon_ok_nocache] +} + +# Return 1 if this is an ARM target supporting -mfpu=neon-vfpv4 +# -mfloat-abi=softfp or equivalent options. Some multilibs may be +# incompatible with these options. Also set et_arm_neonv2_flags to the +# best options to add. + +proc check_effective_target_arm_neonv2_ok_nocache { } { + global et_arm_neonv2_flags + set et_arm_neonv2_flags "" + if { [check_effective_target_arm32] } { + foreach flags {"" "-mfloat-abi=softfp" "-mfpu=neon-vfpv4" "-mfpu=neon-vfpv4 -mfloat-abi=softfp"} { + if { [check_no_compiler_messages_nocache arm_neonv2_ok object { + #include "arm_neon.h" + float32x2_t + foo (float32x2_t a, float32x2_t b, float32x2_t c) + { + return vfma_f32 (a, b, c); + } + } "$flags"] } { + set et_arm_neonv2_flags $flags + return 1 + } + } + } + + return 0 +} + +proc check_effective_target_arm_neonv2_ok { } { + return [check_cached_effective_target arm_neonv2_ok \ + check_effective_target_arm_neonv2_ok_nocache] +} + +# Add the options needed for NEON. We need either -mfloat-abi=softfp +# or -mfloat-abi=hard, but if one is already specified by the +# multilib, use it. + +proc add_options_for_arm_fp16 { flags } { + if { ! [check_effective_target_arm_fp16_ok] } { + return "$flags" + } + global et_arm_fp16_flags + return "$flags $et_arm_fp16_flags" +} + +# Return 1 if this is an ARM target that can support a VFP fp16 variant. +# Skip multilibs that are incompatible with these options and set +# et_arm_fp16_flags to the best options to add. + +proc check_effective_target_arm_fp16_ok_nocache { } { + global et_arm_fp16_flags + set et_arm_fp16_flags "" + if { ! [check_effective_target_arm32] } { + return 0; + } + if [check-flags [list "" { *-*-* } { "-mfpu=*" } { "-mfpu=*fp16*" "-mfpu=*fpv[4-9]*" "-mfpu=*fpv[1-9][0-9]*" } ]] { + # Multilib flags would override -mfpu. + return 0 + } + if [check-flags [list "" { *-*-* } { "-mfloat-abi=soft" } { "" } ]] { + # Must generate floating-point instructions. + return 0 + } + if [check_effective_target_arm_hf_eabi] { + # Use existing float-abi and force an fpu which supports fp16 + set et_arm_fp16_flags "-mfpu=vfpv4" + return 1; + } + if [check-flags [list "" { *-*-* } { "-mfpu=*" } { "" } ]] { + # The existing -mfpu value is OK; use it, but add softfp. + set et_arm_fp16_flags "-mfloat-abi=softfp" + return 1; + } + # Add -mfpu for a VFP fp16 variant since there is no preprocessor + # macro to check for this support. + set flags "-mfpu=vfpv4 -mfloat-abi=softfp" + if { [check_no_compiler_messages_nocache arm_fp16_ok assembly { + int dummy; + } "$flags"] } { + set et_arm_fp16_flags "$flags" + return 1 + } + + return 0 +} + +proc check_effective_target_arm_fp16_ok { } { + return [check_cached_effective_target arm_fp16_ok \ + check_effective_target_arm_fp16_ok_nocache] +} + +# Creates a series of routines that return 1 if the given architecture +# can be selected and a routine to give the flags to select that architecture +# Note: Extra flags may be added to disable options from newer compilers +# (Thumb in particular - but others may be added in the future) +# Usage: /* { dg-require-effective-target arm_arch_v5_ok } */ +# /* { dg-add-options arm_arch_v5 } */ +# /* { dg-require-effective-target arm_arch_v5_multilib } */ +foreach { armfunc armflag armdef } { v4 "-march=armv4 -marm" __ARM_ARCH_4__ + v4t "-march=armv4t" __ARM_ARCH_4T__ + v5 "-march=armv5 -marm" __ARM_ARCH_5__ + v5t "-march=armv5t" __ARM_ARCH_5T__ + v5te "-march=armv5te" __ARM_ARCH_5TE__ + v6 "-march=armv6" __ARM_ARCH_6__ + v6k "-march=armv6k" __ARM_ARCH_6K__ + v6t2 "-march=armv6t2" __ARM_ARCH_6T2__ + v6z "-march=armv6z" __ARM_ARCH_6Z__ + v6m "-march=armv6-m -mthumb" __ARM_ARCH_6M__ + v7a "-march=armv7-a" __ARM_ARCH_7A__ + v7ve "-march=armv7ve" __ARM_ARCH_7A__ + v7r "-march=armv7-r" __ARM_ARCH_7R__ + v7m "-march=armv7-m -mthumb" __ARM_ARCH_7M__ + v7em "-march=armv7e-m -mthumb" __ARM_ARCH_7EM__ + v8a "-march=armv8-a" __ARM_ARCH_8A__ } { + eval [string map [list FUNC $armfunc FLAG $armflag DEF $armdef ] { + proc check_effective_target_arm_arch_FUNC_ok { } { + if { [ string match "*-marm*" "FLAG" ] && + ![check_effective_target_arm_arm_ok] } { + return 0 + } + return [check_no_compiler_messages arm_arch_FUNC_ok assembly { + #if !defined (DEF) + #error FOO + #endif + } "FLAG" ] + } + + proc add_options_for_arm_arch_FUNC { flags } { + return "$flags FLAG" + } + + proc check_effective_target_arm_arch_FUNC_multilib { } { + return [check_runtime arm_arch_FUNC_multilib { + int + main (void) + { + return 0; + } + } [add_options_for_arm_arch_FUNC ""]] + } + }] +} + +# Return 1 if this is an ARM target where -marm causes ARM to be +# used (not Thumb) + +proc check_effective_target_arm_arm_ok { } { + return [check_no_compiler_messages arm_arm_ok assembly { + #if !defined (__arm__) || defined (__thumb__) || defined (__thumb2__) + #error FOO + #endif + } "-marm"] +} + + +# Return 1 is this is an ARM target where -mthumb causes Thumb-1 to be +# used. + +proc check_effective_target_arm_thumb1_ok { } { + return [check_no_compiler_messages arm_thumb1_ok assembly { + #if !defined(__arm__) || !defined(__thumb__) || defined(__thumb2__) + #error FOO + #endif + } "-mthumb"] +} + +# Return 1 is this is an ARM target where -mthumb causes Thumb-2 to be +# used. + +proc check_effective_target_arm_thumb2_ok { } { + return [check_no_compiler_messages arm_thumb2_ok assembly { + #if !defined(__thumb2__) + #error FOO + #endif + } "-mthumb"] +} + +# Return 1 if this is an ARM target where Thumb-1 is used without options +# added by the test. + +proc check_effective_target_arm_thumb1 { } { + return [check_no_compiler_messages arm_thumb1 assembly { + #if !defined(__arm__) || !defined(__thumb__) || defined(__thumb2__) + #error not thumb1 + #endif + int i; + } ""] +} + +# Return 1 if this is an ARM target where Thumb-2 is used without options +# added by the test. + +proc check_effective_target_arm_thumb2 { } { + return [check_no_compiler_messages arm_thumb2 assembly { + #if !defined(__thumb2__) + #error FOO + #endif + int i; + } ""] +} + +# Return 1 if this is an ARM target where conditional execution is available. + +proc check_effective_target_arm_cond_exec { } { + return [check_no_compiler_messages arm_cond_exec assembly { + #if defined(__arm__) && defined(__thumb__) && !defined(__thumb2__) + #error FOO + #endif + int i; + } ""] +} + +# Return 1 if this is an ARM cortex-M profile cpu + +proc check_effective_target_arm_cortex_m { } { + return [check_no_compiler_messages arm_cortex_m assembly { + #if !defined(__ARM_ARCH_7M__) \ + && !defined (__ARM_ARCH_7EM__) \ + && !defined (__ARM_ARCH_6M__) + #error FOO + #endif + int i; + } "-mthumb"] +} + +# Return 1 if the target supports executing NEON instructions, 0 +# otherwise. Cache the result. + +proc check_effective_target_arm_neon_hw { } { + return [check_runtime arm_neon_hw_available { + int + main (void) + { + long long a = 0, b = 1; + asm ("vorr %P0, %P1, %P2" + : "=w" (a) + : "0" (a), "w" (b)); + return (a != 1); + } + } [add_options_for_arm_neon ""]] +} + +proc check_effective_target_arm_neonv2_hw { } { + return [check_runtime arm_neon_hwv2_available { + #include "arm_neon.h" + int + main (void) + { + float32x2_t a, b, c; + asm ("vfma.f32 %P0, %P1, %P2" + : "=w" (a) + : "w" (b), "w" (c)); + return 0; + } + } [add_options_for_arm_neonv2 ""]] +} + +# Return 1 if the target supports executing ARMv8 NEON instructions, 0 +# otherwise. + +proc check_effective_target_arm_v8_neon_hw { } { + return [check_runtime arm_v8_neon_hw_available { + #include "arm_neon.h" + int + main (void) + { + float32x2_t a; + asm ("vrinta.f32 %P0, %P1" + : "=w" (a) + : "0" (a)); + return 0; + } + } [add_options_for_arm_v8_neon ""]] +} + +# Return 1 if this is a ARM target with NEON enabled. + +proc check_effective_target_arm_neon { } { + if { [check_effective_target_arm32] } { + return [check_no_compiler_messages arm_neon object { + #ifndef __ARM_NEON__ + #error not NEON + #else + int dummy; + #endif + }] + } else { + return 0 + } +} + +proc check_effective_target_arm_neonv2 { } { + if { [check_effective_target_arm32] } { + return [check_no_compiler_messages arm_neon object { + #ifndef __ARM_NEON__ + #error not NEON + #else + #ifndef __ARM_FEATURE_FMA + #error not NEONv2 + #else + int dummy; + #endif + #endif + }] + } else { + return 0 + } +} + +# Return 1 if this a Loongson-2E or -2F target using an ABI that supports +# the Loongson vector modes. + +proc check_effective_target_mips_loongson { } { + return [check_no_compiler_messages loongson assembly { + #if !defined(__mips_loongson_vector_rev) + #error FOO + #endif + }] +} + +# Return 1 if this is an ARM target that adheres to the ABI for the ARM +# Architecture. + +proc check_effective_target_arm_eabi { } { + return [check_no_compiler_messages arm_eabi object { + #ifndef __ARM_EABI__ + #error not EABI + #else + int dummy; + #endif + }] +} + +# Return 1 if this is an ARM target that adheres to the hard-float variant of +# the ABI for the ARM Architecture (e.g. -mfloat-abi=hard). + +proc check_effective_target_arm_hf_eabi { } { + return [check_no_compiler_messages arm_hf_eabi object { + #if !defined(__ARM_EABI__) || !defined(__ARM_PCS_VFP) + #error not hard-float EABI + #else + int dummy; + #endif + }] +} + +# Return 1 if this is an ARM target supporting -mcpu=iwmmxt. +# Some multilibs may be incompatible with this option. + +proc check_effective_target_arm_iwmmxt_ok { } { + if { [check_effective_target_arm32] } { + return [check_no_compiler_messages arm_iwmmxt_ok object { + int dummy; + } "-mcpu=iwmmxt"] + } else { + return 0 + } +} + +# Return true if LDRD/STRD instructions are prefered over LDM/STM instructions +# for an ARM target. +proc check_effective_target_arm_prefer_ldrd_strd { } { + if { ![check_effective_target_arm32] } { + return 0; + } + + return [check_no_messages_and_pattern arm_prefer_ldrd_strd "strd\tr" assembly { + void foo (int *p) { p[0] = 1; p[1] = 0;} + } "-O2 -mthumb" ] +} + +# Return 1 if this is a PowerPC target supporting -meabi. + +proc check_effective_target_powerpc_eabi_ok { } { + if { [istarget powerpc*-*-*] } { + return [check_no_compiler_messages powerpc_eabi_ok object { + int dummy; + } "-meabi"] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target with floating-point registers. + +proc check_effective_target_powerpc_fprs { } { + if { [istarget powerpc*-*-*] + || [istarget rs6000-*-*] } { + return [check_no_compiler_messages powerpc_fprs object { + #ifdef __NO_FPRS__ + #error no FPRs + #else + int dummy; + #endif + }] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target with hardware double-precision +# floating point. + +proc check_effective_target_powerpc_hard_double { } { + if { [istarget powerpc*-*-*] + || [istarget rs6000-*-*] } { + return [check_no_compiler_messages powerpc_hard_double object { + #ifdef _SOFT_DOUBLE + #error soft double + #else + int dummy; + #endif + }] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target supporting -maltivec. + +proc check_effective_target_powerpc_altivec_ok { } { + if { ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || [istarget rs6000-*-*] } { + # AltiVec is not supported on AIX before 5.3. + if { [istarget powerpc*-*-aix4*] + || [istarget powerpc*-*-aix5.1*] + || [istarget powerpc*-*-aix5.2*] } { + return 0 + } + return [check_no_compiler_messages powerpc_altivec_ok object { + int dummy; + } "-maltivec"] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target supporting -mpower8-vector + +proc check_effective_target_powerpc_p8vector_ok { } { + if { ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || [istarget rs6000-*-*] } { + # AltiVec is not supported on AIX before 5.3. + if { [istarget powerpc*-*-aix4*] + || [istarget powerpc*-*-aix5.1*] + || [istarget powerpc*-*-aix5.2*] } { + return 0 + } + return [check_no_compiler_messages powerpc_p8vector_ok object { + int main (void) { +#ifdef __MACH__ + asm volatile ("xxlorc vs0,vs0,vs0"); +#else + asm volatile ("xxlorc 0,0,0"); +#endif + return 0; + } + } "-mpower8-vector"] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target supporting -mvsx + +proc check_effective_target_powerpc_vsx_ok { } { + if { ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || [istarget rs6000-*-*] } { + # VSX is not supported on AIX before 7.1. + if { [istarget powerpc*-*-aix4*] + || [istarget powerpc*-*-aix5*] + || [istarget powerpc*-*-aix6*] } { + return 0 + } + return [check_no_compiler_messages powerpc_vsx_ok object { + int main (void) { +#ifdef __MACH__ + asm volatile ("xxlor vs0,vs0,vs0"); +#else + asm volatile ("xxlor 0,0,0"); +#endif + return 0; + } + } "-mvsx"] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target supporting -mhtm + +proc check_effective_target_powerpc_htm_ok { } { + if { ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || [istarget rs6000-*-*] } { + # HTM is not supported on AIX yet. + if { [istarget powerpc*-*-aix*] } { + return 0 + } + return [check_no_compiler_messages powerpc_htm_ok object { + int main (void) { + asm volatile ("tbegin. 0"); + return 0; + } + } "-mhtm"] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target supporting -mcpu=cell. + +proc check_effective_target_powerpc_ppu_ok { } { + if [check_effective_target_powerpc_altivec_ok] { + return [check_no_compiler_messages cell_asm_available object { + int main (void) { +#ifdef __MACH__ + asm volatile ("lvlx v0,v0,v0"); +#else + asm volatile ("lvlx 0,0,0"); +#endif + return 0; + } + }] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target that supports SPU. + +proc check_effective_target_powerpc_spu { } { + if { [istarget powerpc*-*-linux*] } { + return [check_effective_target_powerpc_altivec_ok] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC SPE target. The check includes options +# specified by dg-options for this test, so don't cache the result. + +proc check_effective_target_powerpc_spe_nocache { } { + if { [istarget powerpc*-*-*] } { + return [check_no_compiler_messages_nocache powerpc_spe object { + #ifndef __SPE__ + #error not SPE + #else + int dummy; + #endif + } [current_compiler_flags]] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target with SPE enabled. + +proc check_effective_target_powerpc_spe { } { + if { [istarget powerpc*-*-*] } { + return [check_no_compiler_messages powerpc_spe object { + #ifndef __SPE__ + #error not SPE + #else + int dummy; + #endif + }] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target with Altivec enabled. + +proc check_effective_target_powerpc_altivec { } { + if { [istarget powerpc*-*-*] } { + return [check_no_compiler_messages powerpc_altivec object { + #ifndef __ALTIVEC__ + #error not Altivec + #else + int dummy; + #endif + }] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC 405 target. The check includes options +# specified by dg-options for this test, so don't cache the result. + +proc check_effective_target_powerpc_405_nocache { } { + if { [istarget powerpc*-*-*] || [istarget rs6000-*-*] } { + return [check_no_compiler_messages_nocache powerpc_405 object { + #ifdef __PPC405__ + int dummy; + #else + #error not a PPC405 + #endif + } [current_compiler_flags]] + } else { + return 0 + } +} + +# Return 1 if this is a PowerPC target using the ELFv2 ABI. + +proc check_effective_target_powerpc_elfv2 { } { + if { [istarget powerpc*-*-*] } { + return [check_no_compiler_messages powerpc_elfv2 object { + #if _CALL_ELF != 2 + #error not ELF v2 ABI + #else + int dummy; + #endif + }] + } else { + return 0 + } +} + +# Return 1 if this is a SPU target with a toolchain that +# supports automatic overlay generation. + +proc check_effective_target_spu_auto_overlay { } { + if { [istarget spu*-*-elf*] } { + return [check_no_compiler_messages spu_auto_overlay executable { + int main (void) { } + } "-Wl,--auto-overlay" ] + } else { + return 0 + } +} + +# The VxWorks SPARC simulator accepts only EM_SPARC executables and +# chokes on EM_SPARC32PLUS or EM_SPARCV9 executables. Return 1 if the +# test environment appears to run executables on such a simulator. + +proc check_effective_target_ultrasparc_hw { } { + return [check_runtime ultrasparc_hw { + int main() { return 0; } + } "-mcpu=ultrasparc"] +} + +# Return 1 if the test environment supports executing UltraSPARC VIS2 +# instructions. We check this by attempting: "bmask %g0, %g0, %g0" + +proc check_effective_target_ultrasparc_vis2_hw { } { + return [check_runtime ultrasparc_vis2_hw { + int main() { __asm__(".word 0x81b00320"); return 0; } + } "-mcpu=ultrasparc3"] +} + +# Return 1 if the test environment supports executing UltraSPARC VIS3 +# instructions. We check this by attempting: "addxc %g0, %g0, %g0" + +proc check_effective_target_ultrasparc_vis3_hw { } { + return [check_runtime ultrasparc_vis3_hw { + int main() { __asm__(".word 0x81b00220"); return 0; } + } "-mcpu=niagara3"] +} + +# Return 1 if this is a SPARC-V9 target. + +proc check_effective_target_sparc_v9 { } { + if { [istarget sparc*-*-*] } { + return [check_no_compiler_messages sparc_v9 object { + int main (void) { + asm volatile ("return %i7+8"); + return 0; + } + }] + } else { + return 0 + } +} + +# Return 1 if this is a SPARC target with VIS enabled. + +proc check_effective_target_sparc_vis { } { + if { [istarget sparc*-*-*] } { + return [check_no_compiler_messages sparc_vis object { + #ifndef __VIS__ + #error not VIS + #else + int dummy; + #endif + }] + } else { + return 0 + } +} + +# Return 1 if the target supports hardware vector shift operation. + +proc check_effective_target_vect_shift { } { + global et_vect_shift_saved + + if [info exists et_vect_shift_saved] { + verbose "check_effective_target_vect_shift: using cached result" 2 + } else { + set et_vect_shift_saved 0 + if { ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || [istarget ia64-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget aarch64*-*-*] + || [check_effective_target_arm32] + || ([istarget mips*-*-*] + && [check_effective_target_mips_loongson]) } { + set et_vect_shift_saved 1 + } + } + + verbose "check_effective_target_vect_shift: returning $et_vect_shift_saved" 2 + return $et_vect_shift_saved +} + +# Return 1 if the target supports hardware vector shift operation for char. + +proc check_effective_target_vect_shift_char { } { + global et_vect_shift_char_saved + + if [info exists et_vect_shift_char_saved] { + verbose "check_effective_target_vect_shift_char: using cached result" 2 + } else { + set et_vect_shift_char_saved 0 + if { ([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + || [check_effective_target_arm32] } { + set et_vect_shift_char_saved 1 + } + } + + verbose "check_effective_target_vect_shift_char: returning $et_vect_shift_char_saved" 2 + return $et_vect_shift_char_saved +} + +# Return 1 if the target supports hardware vectors of long, 0 otherwise. +# +# This can change for different subtargets so do not cache the result. + +proc check_effective_target_vect_long { } { + if { [istarget i?86-*-*] + || (([istarget powerpc*-*-*] + && ![istarget powerpc-*-linux*paired*]) + && [check_effective_target_ilp32]) + || [istarget x86_64-*-*] + || [check_effective_target_arm32] + || ([istarget sparc*-*-*] && [check_effective_target_ilp32]) } { + set answer 1 + } else { + set answer 0 + } + + verbose "check_effective_target_vect_long: returning $answer" 2 + return $answer +} + +# Return 1 if the target supports hardware vectors of float, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_float { } { + global et_vect_float_saved + + if [info exists et_vect_float_saved] { + verbose "check_effective_target_vect_float: using cached result" 2 + } else { + set et_vect_float_saved 0 + if { [istarget i?86-*-*] + || [istarget powerpc*-*-*] + || [istarget spu-*-*] + || [istarget mips-sde-elf] + || [istarget mipsisa64*-*-*] + || [istarget x86_64-*-*] + || [istarget ia64-*-*] + || [istarget aarch64*-*-*] + || [check_effective_target_arm32] } { + set et_vect_float_saved 1 + } + } + + verbose "check_effective_target_vect_float: returning $et_vect_float_saved" 2 + return $et_vect_float_saved +} + +# Return 1 if the target supports hardware vectors of double, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_double { } { + global et_vect_double_saved + + if [info exists et_vect_double_saved] { + verbose "check_effective_target_vect_double: using cached result" 2 + } else { + set et_vect_double_saved 0 + if { [istarget i?86-*-*] + || [istarget aarch64*-*-*] + || [istarget x86_64-*-*] } { + if { [check_no_compiler_messages vect_double assembly { + #ifdef __tune_atom__ + # error No double vectorizer support. + #endif + }] } { + set et_vect_double_saved 1 + } else { + set et_vect_double_saved 0 + } + } elseif { [istarget spu-*-*] } { + set et_vect_double_saved 1 + } + } + + verbose "check_effective_target_vect_double: returning $et_vect_double_saved" 2 + return $et_vect_double_saved +} + +# Return 1 if the target supports hardware vectors of long long, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_long_long { } { + global et_vect_long_long_saved + + if [info exists et_vect_long_long_saved] { + verbose "check_effective_target_vect_long_long: using cached result" 2 + } else { + set et_vect_long_long_saved 0 + if { [istarget i?86-*-*] + || [istarget x86_64-*-*] } { + set et_vect_long_long_saved 1 + } + } + + verbose "check_effective_target_vect_long_long: returning $et_vect_long_long_saved" 2 + return $et_vect_long_long_saved +} + + +# Return 1 if the target plus current options does not support a vector +# max instruction on "int", 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_no_int_max { } { + global et_vect_no_int_max_saved + + if [info exists et_vect_no_int_max_saved] { + verbose "check_effective_target_vect_no_int_max: using cached result" 2 + } else { + set et_vect_no_int_max_saved 0 + if { [istarget sparc*-*-*] + || [istarget spu-*-*] + || [istarget alpha*-*-*] + || ([istarget mips*-*-*] + && [check_effective_target_mips_loongson]) } { + set et_vect_no_int_max_saved 1 + } + } + verbose "check_effective_target_vect_no_int_max: returning $et_vect_no_int_max_saved" 2 + return $et_vect_no_int_max_saved +} + +# Return 1 if the target plus current options does not support a vector +# add instruction on "int", 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_no_int_add { } { + global et_vect_no_int_add_saved + + if [info exists et_vect_no_int_add_saved] { + verbose "check_effective_target_vect_no_int_add: using cached result" 2 + } else { + set et_vect_no_int_add_saved 0 + # Alpha only supports vector add on V8QI and V4HI. + if { [istarget alpha*-*-*] } { + set et_vect_no_int_add_saved 1 + } + } + verbose "check_effective_target_vect_no_int_add: returning $et_vect_no_int_add_saved" 2 + return $et_vect_no_int_add_saved +} + +# Return 1 if the target plus current options does not support vector +# bitwise instructions, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_no_bitwise { } { + global et_vect_no_bitwise_saved + + if [info exists et_vect_no_bitwise_saved] { + verbose "check_effective_target_vect_no_bitwise: using cached result" 2 + } else { + set et_vect_no_bitwise_saved 0 + } + verbose "check_effective_target_vect_no_bitwise: returning $et_vect_no_bitwise_saved" 2 + return $et_vect_no_bitwise_saved +} + +# Return 1 if the target plus current options supports vector permutation, +# 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_perm { } { + global et_vect_perm + + if [info exists et_vect_perm_saved] { + verbose "check_effective_target_vect_perm: using cached result" 2 + } else { + set et_vect_perm_saved 0 + if { [is-effective-target arm_neon_ok] + || ([istarget aarch64*-*-*] + && [is-effective-target aarch64_little_endian]) + || [istarget powerpc*-*-*] + || [istarget spu-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || ([istarget mips*-*-*] + && [check_effective_target_mpaired_single]) } { + set et_vect_perm_saved 1 + } + } + verbose "check_effective_target_vect_perm: returning $et_vect_perm_saved" 2 + return $et_vect_perm_saved +} + +# Return 1 if the target plus current options supports vector permutation +# on byte-sized elements, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_perm_byte { } { + global et_vect_perm_byte + + if [info exists et_vect_perm_byte_saved] { + verbose "check_effective_target_vect_perm_byte: using cached result" 2 + } else { + set et_vect_perm_byte_saved 0 + if { ([is-effective-target arm_neon_ok] + && [is-effective-target arm_little_endian]) + || ([istarget aarch64*-*-*] + && [is-effective-target aarch64_little_endian]) + || [istarget powerpc*-*-*] + || [istarget spu-*-*] } { + set et_vect_perm_byte_saved 1 + } + } + verbose "check_effective_target_vect_perm_byte: returning $et_vect_perm_byte_saved" 2 + return $et_vect_perm_byte_saved +} + +# Return 1 if the target plus current options supports vector permutation +# on short-sized elements, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_perm_short { } { + global et_vect_perm_short + + if [info exists et_vect_perm_short_saved] { + verbose "check_effective_target_vect_perm_short: using cached result" 2 + } else { + set et_vect_perm_short_saved 0 + if { ([is-effective-target arm_neon_ok] + && [is-effective-target arm_little_endian]) + || ([istarget aarch64*-*-*] + && [is-effective-target aarch64_little_endian]) + || [istarget powerpc*-*-*] + || [istarget spu-*-*] } { + set et_vect_perm_short_saved 1 + } + } + verbose "check_effective_target_vect_perm_short: returning $et_vect_perm_short_saved" 2 + return $et_vect_perm_short_saved +} + +# Return 1 if the target plus current options supports a vector +# widening summation of *short* args into *int* result, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_widen_sum_hi_to_si_pattern { } { + global et_vect_widen_sum_hi_to_si_pattern + + if [info exists et_vect_widen_sum_hi_to_si_pattern_saved] { + verbose "check_effective_target_vect_widen_sum_hi_to_si_pattern: using cached result" 2 + } else { + set et_vect_widen_sum_hi_to_si_pattern_saved 0 + if { [istarget powerpc*-*-*] + || [istarget ia64-*-*] } { + set et_vect_widen_sum_hi_to_si_pattern_saved 1 + } + } + verbose "check_effective_target_vect_widen_sum_hi_to_si_pattern: returning $et_vect_widen_sum_hi_to_si_pattern_saved" 2 + return $et_vect_widen_sum_hi_to_si_pattern_saved +} + +# Return 1 if the target plus current options supports a vector +# widening summation of *short* args into *int* result, 0 otherwise. +# A target can also support this widening summation if it can support +# promotion (unpacking) from shorts to ints. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_widen_sum_hi_to_si { } { + global et_vect_widen_sum_hi_to_si + + if [info exists et_vect_widen_sum_hi_to_si_saved] { + verbose "check_effective_target_vect_widen_sum_hi_to_si: using cached result" 2 + } else { + set et_vect_widen_sum_hi_to_si_saved [check_effective_target_vect_unpack] + if { [istarget powerpc*-*-*] + || [istarget ia64-*-*] } { + set et_vect_widen_sum_hi_to_si_saved 1 + } + } + verbose "check_effective_target_vect_widen_sum_hi_to_si: returning $et_vect_widen_sum_hi_to_si_saved" 2 + return $et_vect_widen_sum_hi_to_si_saved +} + +# Return 1 if the target plus current options supports a vector +# widening summation of *char* args into *short* result, 0 otherwise. +# A target can also support this widening summation if it can support +# promotion (unpacking) from chars to shorts. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_widen_sum_qi_to_hi { } { + global et_vect_widen_sum_qi_to_hi + + if [info exists et_vect_widen_sum_qi_to_hi_saved] { + verbose "check_effective_target_vect_widen_sum_qi_to_hi: using cached result" 2 + } else { + set et_vect_widen_sum_qi_to_hi_saved 0 + if { [check_effective_target_vect_unpack] + || [check_effective_target_arm_neon_ok] + || [istarget ia64-*-*] } { + set et_vect_widen_sum_qi_to_hi_saved 1 + } + } + verbose "check_effective_target_vect_widen_sum_qi_to_hi: returning $et_vect_widen_sum_qi_to_hi_saved" 2 + return $et_vect_widen_sum_qi_to_hi_saved +} + +# Return 1 if the target plus current options supports a vector +# widening summation of *char* args into *int* result, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_widen_sum_qi_to_si { } { + global et_vect_widen_sum_qi_to_si + + if [info exists et_vect_widen_sum_qi_to_si_saved] { + verbose "check_effective_target_vect_widen_sum_qi_to_si: using cached result" 2 + } else { + set et_vect_widen_sum_qi_to_si_saved 0 + if { [istarget powerpc*-*-*] } { + set et_vect_widen_sum_qi_to_si_saved 1 + } + } + verbose "check_effective_target_vect_widen_sum_qi_to_si: returning $et_vect_widen_sum_qi_to_si_saved" 2 + return $et_vect_widen_sum_qi_to_si_saved +} + +# Return 1 if the target plus current options supports a vector +# widening multiplication of *char* args into *short* result, 0 otherwise. +# A target can also support this widening multplication if it can support +# promotion (unpacking) from chars to shorts, and vect_short_mult (non-widening +# multiplication of shorts). +# +# This won't change for different subtargets so cache the result. + + +proc check_effective_target_vect_widen_mult_qi_to_hi { } { + global et_vect_widen_mult_qi_to_hi + + if [info exists et_vect_widen_mult_qi_to_hi_saved] { + verbose "check_effective_target_vect_widen_mult_qi_to_hi: using cached result" 2 + } else { + if { [check_effective_target_vect_unpack] + && [check_effective_target_vect_short_mult] } { + set et_vect_widen_mult_qi_to_hi_saved 1 + } else { + set et_vect_widen_mult_qi_to_hi_saved 0 + } + if { [istarget powerpc*-*-*] + || [istarget aarch64*-*-*] + || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]) } { + set et_vect_widen_mult_qi_to_hi_saved 1 + } + } + verbose "check_effective_target_vect_widen_mult_qi_to_hi: returning $et_vect_widen_mult_qi_to_hi_saved" 2 + return $et_vect_widen_mult_qi_to_hi_saved +} + +# Return 1 if the target plus current options supports a vector +# widening multiplication of *short* args into *int* result, 0 otherwise. +# A target can also support this widening multplication if it can support +# promotion (unpacking) from shorts to ints, and vect_int_mult (non-widening +# multiplication of ints). +# +# This won't change for different subtargets so cache the result. + + +proc check_effective_target_vect_widen_mult_hi_to_si { } { + global et_vect_widen_mult_hi_to_si + + if [info exists et_vect_widen_mult_hi_to_si_saved] { + verbose "check_effective_target_vect_widen_mult_hi_to_si: using cached result" 2 + } else { + if { [check_effective_target_vect_unpack] + && [check_effective_target_vect_int_mult] } { + set et_vect_widen_mult_hi_to_si_saved 1 + } else { + set et_vect_widen_mult_hi_to_si_saved 0 + } + if { [istarget powerpc*-*-*] + || [istarget spu-*-*] + || [istarget ia64-*-*] + || [istarget aarch64*-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]) } { + set et_vect_widen_mult_hi_to_si_saved 1 + } + } + verbose "check_effective_target_vect_widen_mult_hi_to_si: returning $et_vect_widen_mult_hi_to_si_saved" 2 + return $et_vect_widen_mult_hi_to_si_saved +} + +# Return 1 if the target plus current options supports a vector +# widening multiplication of *char* args into *short* result, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_widen_mult_qi_to_hi_pattern { } { + global et_vect_widen_mult_qi_to_hi_pattern + + if [info exists et_vect_widen_mult_qi_to_hi_pattern_saved] { + verbose "check_effective_target_vect_widen_mult_qi_to_hi_pattern: using cached result" 2 + } else { + set et_vect_widen_mult_qi_to_hi_pattern_saved 0 + if { [istarget powerpc*-*-*] + || ([istarget arm*-*-*] + && [check_effective_target_arm_neon_ok] + && [check_effective_target_arm_little_endian]) } { + set et_vect_widen_mult_qi_to_hi_pattern_saved 1 + } + } + verbose "check_effective_target_vect_widen_mult_qi_to_hi_pattern: returning $et_vect_widen_mult_qi_to_hi_pattern_saved" 2 + return $et_vect_widen_mult_qi_to_hi_pattern_saved +} + +# Return 1 if the target plus current options supports a vector +# widening multiplication of *short* args into *int* result, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_widen_mult_hi_to_si_pattern { } { + global et_vect_widen_mult_hi_to_si_pattern + + if [info exists et_vect_widen_mult_hi_to_si_pattern_saved] { + verbose "check_effective_target_vect_widen_mult_hi_to_si_pattern: using cached result" 2 + } else { + set et_vect_widen_mult_hi_to_si_pattern_saved 0 + if { [istarget powerpc*-*-*] + || [istarget spu-*-*] + || [istarget ia64-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || ([istarget arm*-*-*] + && [check_effective_target_arm_neon_ok] + && [check_effective_target_arm_little_endian]) } { + set et_vect_widen_mult_hi_to_si_pattern_saved 1 + } + } + verbose "check_effective_target_vect_widen_mult_hi_to_si_pattern: returning $et_vect_widen_mult_hi_to_si_pattern_saved" 2 + return $et_vect_widen_mult_hi_to_si_pattern_saved +} + +# Return 1 if the target plus current options supports a vector +# widening shift, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_widen_shift { } { + global et_vect_widen_shift_saved + + if [info exists et_vect_shift_saved] { + verbose "check_effective_target_vect_widen_shift: using cached result" 2 + } else { + set et_vect_widen_shift_saved 0 + if { ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]) } { + set et_vect_widen_shift_saved 1 + } + } + verbose "check_effective_target_vect_widen_shift: returning $et_vect_widen_shift_saved" 2 + return $et_vect_widen_shift_saved +} + +# Return 1 if the target plus current options supports a vector +# dot-product of signed chars, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_sdot_qi { } { + global et_vect_sdot_qi + + if [info exists et_vect_sdot_qi_saved] { + verbose "check_effective_target_vect_sdot_qi: using cached result" 2 + } else { + set et_vect_sdot_qi_saved 0 + if { [istarget ia64-*-*] } { + set et_vect_udot_qi_saved 1 + } + } + verbose "check_effective_target_vect_sdot_qi: returning $et_vect_sdot_qi_saved" 2 + return $et_vect_sdot_qi_saved +} + +# Return 1 if the target plus current options supports a vector +# dot-product of unsigned chars, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_udot_qi { } { + global et_vect_udot_qi + + if [info exists et_vect_udot_qi_saved] { + verbose "check_effective_target_vect_udot_qi: using cached result" 2 + } else { + set et_vect_udot_qi_saved 0 + if { [istarget powerpc*-*-*] + || [istarget ia64-*-*] } { + set et_vect_udot_qi_saved 1 + } + } + verbose "check_effective_target_vect_udot_qi: returning $et_vect_udot_qi_saved" 2 + return $et_vect_udot_qi_saved +} + +# Return 1 if the target plus current options supports a vector +# dot-product of signed shorts, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_sdot_hi { } { + global et_vect_sdot_hi + + if [info exists et_vect_sdot_hi_saved] { + verbose "check_effective_target_vect_sdot_hi: using cached result" 2 + } else { + set et_vect_sdot_hi_saved 0 + if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*]) + || [istarget ia64-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] } { + set et_vect_sdot_hi_saved 1 + } + } + verbose "check_effective_target_vect_sdot_hi: returning $et_vect_sdot_hi_saved" 2 + return $et_vect_sdot_hi_saved +} + +# Return 1 if the target plus current options supports a vector +# dot-product of unsigned shorts, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_udot_hi { } { + global et_vect_udot_hi + + if [info exists et_vect_udot_hi_saved] { + verbose "check_effective_target_vect_udot_hi: using cached result" 2 + } else { + set et_vect_udot_hi_saved 0 + if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*]) } { + set et_vect_udot_hi_saved 1 + } + } + verbose "check_effective_target_vect_udot_hi: returning $et_vect_udot_hi_saved" 2 + return $et_vect_udot_hi_saved +} + + +# Return 1 if the target plus current options supports a vector +# demotion (packing) of shorts (to chars) and ints (to shorts) +# using modulo arithmetic, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_pack_trunc { } { + global et_vect_pack_trunc + + if [info exists et_vect_pack_trunc_saved] { + verbose "check_effective_target_vect_pack_trunc: using cached result" 2 + } else { + set et_vect_pack_trunc_saved 0 + if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*]) + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget aarch64*-*-*] + || [istarget spu-*-*] + || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok] + && [check_effective_target_arm_little_endian]) } { + set et_vect_pack_trunc_saved 1 + } + } + verbose "check_effective_target_vect_pack_trunc: returning $et_vect_pack_trunc_saved" 2 + return $et_vect_pack_trunc_saved +} + +# Return 1 if the target plus current options supports a vector +# promotion (unpacking) of chars (to shorts) and shorts (to ints), 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_unpack { } { + global et_vect_unpack + + if [info exists et_vect_unpack_saved] { + verbose "check_effective_target_vect_unpack: using cached result" 2 + } else { + set et_vect_unpack_saved 0 + if { ([istarget powerpc*-*-*] && ![istarget powerpc-*paired*]) + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget spu-*-*] + || [istarget ia64-*-*] + || [istarget aarch64*-*-*] + || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok] + && [check_effective_target_arm_little_endian]) } { + set et_vect_unpack_saved 1 + } + } + verbose "check_effective_target_vect_unpack: returning $et_vect_unpack_saved" 2 + return $et_vect_unpack_saved +} + +# Return 1 if the target plus current options does not guarantee +# that its STACK_BOUNDARY is >= the reguired vector alignment. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_unaligned_stack { } { + global et_unaligned_stack_saved + + if [info exists et_unaligned_stack_saved] { + verbose "check_effective_target_unaligned_stack: using cached result" 2 + } else { + set et_unaligned_stack_saved 0 + } + verbose "check_effective_target_unaligned_stack: returning $et_unaligned_stack_saved" 2 + return $et_unaligned_stack_saved +} + +# Return 1 if the target plus current options does not support a vector +# alignment mechanism, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_no_align { } { + global et_vect_no_align_saved + + if [info exists et_vect_no_align_saved] { + verbose "check_effective_target_vect_no_align: using cached result" 2 + } else { + set et_vect_no_align_saved 0 + if { [istarget mipsisa64*-*-*] + || [istarget mips-sde-elf] + || [istarget sparc*-*-*] + || [istarget ia64-*-*] + || [check_effective_target_arm_vect_no_misalign] + || ([istarget mips*-*-*] + && [check_effective_target_mips_loongson]) } { + set et_vect_no_align_saved 1 + } + } + verbose "check_effective_target_vect_no_align: returning $et_vect_no_align_saved" 2 + return $et_vect_no_align_saved +} + +# Return 1 if the target supports a vector misalign access, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_hw_misalign { } { + global et_vect_hw_misalign_saved + + if [info exists et_vect_hw_misalign_saved] { + verbose "check_effective_target_vect_hw_misalign: using cached result" 2 + } else { + set et_vect_hw_misalign_saved 0 + if { ([istarget x86_64-*-*] + || [istarget aarch64*-*-*] + || [istarget i?86-*-*]) } { + set et_vect_hw_misalign_saved 1 + } + } + verbose "check_effective_target_vect_hw_misalign: returning $et_vect_hw_misalign_saved" 2 + return $et_vect_hw_misalign_saved +} + + +# Return 1 if arrays are aligned to the vector alignment +# boundary, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_aligned_arrays { } { + global et_vect_aligned_arrays + + if [info exists et_vect_aligned_arrays_saved] { + verbose "check_effective_target_vect_aligned_arrays: using cached result" 2 + } else { + set et_vect_aligned_arrays_saved 0 + if { ([istarget x86_64-*-*] || [istarget i?86-*-*]) } { + if { ([is-effective-target lp64] + && ( ![check_avx_available] + || [check_prefer_avx128])) } { + set et_vect_aligned_arrays_saved 1 + } + } + if [istarget spu-*-*] { + set et_vect_aligned_arrays_saved 1 + } + } + verbose "check_effective_target_vect_aligned_arrays: returning $et_vect_aligned_arrays_saved" 2 + return $et_vect_aligned_arrays_saved +} + +# Return 1 if types of size 32 bit or less are naturally aligned +# (aligned to their type-size), 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_natural_alignment_32 { } { + global et_natural_alignment_32 + + if [info exists et_natural_alignment_32_saved] { + verbose "check_effective_target_natural_alignment_32: using cached result" 2 + } else { + # FIXME: 32bit powerpc: guaranteed only if MASK_ALIGN_NATURAL/POWER. + set et_natural_alignment_32_saved 1 + if { ([istarget *-*-darwin*] && [is-effective-target lp64]) } { + set et_natural_alignment_32_saved 0 + } + } + verbose "check_effective_target_natural_alignment_32: returning $et_natural_alignment_32_saved" 2 + return $et_natural_alignment_32_saved +} + +# Return 1 if types of size 64 bit or less are naturally aligned (aligned to their +# type-size), 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_natural_alignment_64 { } { + global et_natural_alignment_64 + + if [info exists et_natural_alignment_64_saved] { + verbose "check_effective_target_natural_alignment_64: using cached result" 2 + } else { + set et_natural_alignment_64_saved 0 + if { ([is-effective-target lp64] && ![istarget *-*-darwin*]) + || [istarget spu-*-*] } { + set et_natural_alignment_64_saved 1 + } + } + verbose "check_effective_target_natural_alignment_64: returning $et_natural_alignment_64_saved" 2 + return $et_natural_alignment_64_saved +} + +# Return 1 if all vector types are naturally aligned (aligned to their +# type-size), 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vect_natural_alignment { } { + global et_vect_natural_alignment + + if [info exists et_vect_natural_alignment_saved] { + verbose "check_effective_target_vect_natural_alignment: using cached result" 2 + } else { + set et_vect_natural_alignment_saved 1 + if { [check_effective_target_arm_eabi] } { + set et_vect_natural_alignment_saved 0 + } + } + verbose "check_effective_target_vect_natural_alignment: returning $et_vect_natural_alignment_saved" 2 + return $et_vect_natural_alignment_saved +} + +# Return 1 if vector alignment (for types of size 32 bit or less) is reachable, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vector_alignment_reachable { } { + global et_vector_alignment_reachable + + if [info exists et_vector_alignment_reachable_saved] { + verbose "check_effective_target_vector_alignment_reachable: using cached result" 2 + } else { + if { [check_effective_target_vect_aligned_arrays] + || [check_effective_target_natural_alignment_32] } { + set et_vector_alignment_reachable_saved 1 + } else { + set et_vector_alignment_reachable_saved 0 + } + } + verbose "check_effective_target_vector_alignment_reachable: returning $et_vector_alignment_reachable_saved" 2 + return $et_vector_alignment_reachable_saved +} + +# Return 1 if vector alignment for 64 bit is reachable, 0 otherwise. +# +# This won't change for different subtargets so cache the result. + +proc check_effective_target_vector_alignment_reachable_for_64bit { } { + global et_vector_alignment_reachable_for_64bit + + if [info exists et_vector_alignment_reachable_for_64bit_saved] { + verbose "check_effective_target_vector_alignment_reachable_for_64bit: using cached result" 2 + } else { + if { [check_effective_target_vect_aligned_arrays] + || [check_effective_target_natural_alignment_64] } { + set et_vector_alignment_reachable_for_64bit_saved 1 + } else { + set et_vector_alignment_reachable_for_64bit_saved 0 + } + } + verbose "check_effective_target_vector_alignment_reachable_for_64bit: returning $et_vector_alignment_reachable_for_64bit_saved" 2 + return $et_vector_alignment_reachable_for_64bit_saved +} + +# Return 1 if the target only requires element alignment for vector accesses + +proc check_effective_target_vect_element_align { } { + global et_vect_element_align + + if [info exists et_vect_element_align] { + verbose "check_effective_target_vect_element_align: using cached result" 2 + } else { + set et_vect_element_align 0 + if { ([istarget arm*-*-*] + && ![check_effective_target_arm_vect_no_misalign]) + || [check_effective_target_vect_hw_misalign] } { + set et_vect_element_align 1 + } + } + + verbose "check_effective_target_vect_element_align: returning $et_vect_element_align" 2 + return $et_vect_element_align +} + +# Return 1 if the target supports vector conditional operations, 0 otherwise. + +proc check_effective_target_vect_condition { } { + global et_vect_cond_saved + + if [info exists et_vect_cond_saved] { + verbose "check_effective_target_vect_cond: using cached result" 2 + } else { + set et_vect_cond_saved 0 + if { [istarget aarch64*-*-*] + || [istarget powerpc*-*-*] + || [istarget ia64-*-*] + || [istarget i?86-*-*] + || [istarget spu-*-*] + || [istarget x86_64-*-*] + || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]) } { + set et_vect_cond_saved 1 + } + } + + verbose "check_effective_target_vect_cond: returning $et_vect_cond_saved" 2 + return $et_vect_cond_saved +} + +# Return 1 if the target supports vector conditional operations where +# the comparison has different type from the lhs, 0 otherwise. + +proc check_effective_target_vect_cond_mixed { } { + global et_vect_cond_mixed_saved + + if [info exists et_vect_cond_mixed_saved] { + verbose "check_effective_target_vect_cond_mixed: using cached result" 2 + } else { + set et_vect_cond_mixed_saved 0 + if { [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget powerpc*-*-*] } { + set et_vect_cond_mixed_saved 1 + } + } + + verbose "check_effective_target_vect_cond_mixed: returning $et_vect_cond_mixed_saved" 2 + return $et_vect_cond_mixed_saved +} + +# Return 1 if the target supports vector char multiplication, 0 otherwise. + +proc check_effective_target_vect_char_mult { } { + global et_vect_char_mult_saved + + if [info exists et_vect_char_mult_saved] { + verbose "check_effective_target_vect_char_mult: using cached result" 2 + } else { + set et_vect_char_mult_saved 0 + if { [istarget aarch64*-*-*] + || [istarget ia64-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [check_effective_target_arm32] } { + set et_vect_char_mult_saved 1 + } + } + + verbose "check_effective_target_vect_char_mult: returning $et_vect_char_mult_saved" 2 + return $et_vect_char_mult_saved +} + +# Return 1 if the target supports vector short multiplication, 0 otherwise. + +proc check_effective_target_vect_short_mult { } { + global et_vect_short_mult_saved + + if [info exists et_vect_short_mult_saved] { + verbose "check_effective_target_vect_short_mult: using cached result" 2 + } else { + set et_vect_short_mult_saved 0 + if { [istarget ia64-*-*] + || [istarget spu-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget powerpc*-*-*] + || [istarget aarch64*-*-*] + || [check_effective_target_arm32] + || ([istarget mips*-*-*] + && [check_effective_target_mips_loongson]) } { + set et_vect_short_mult_saved 1 + } + } + + verbose "check_effective_target_vect_short_mult: returning $et_vect_short_mult_saved" 2 + return $et_vect_short_mult_saved +} + +# Return 1 if the target supports vector int multiplication, 0 otherwise. + +proc check_effective_target_vect_int_mult { } { + global et_vect_int_mult_saved + + if [info exists et_vect_int_mult_saved] { + verbose "check_effective_target_vect_int_mult: using cached result" 2 + } else { + set et_vect_int_mult_saved 0 + if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*]) + || [istarget spu-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget ia64-*-*] + || [istarget aarch64*-*-*] + || [check_effective_target_arm32] } { + set et_vect_int_mult_saved 1 + } + } + + verbose "check_effective_target_vect_int_mult: returning $et_vect_int_mult_saved" 2 + return $et_vect_int_mult_saved +} + +# Return 1 if the target supports vector even/odd elements extraction, 0 otherwise. + +proc check_effective_target_vect_extract_even_odd { } { + global et_vect_extract_even_odd_saved + + if [info exists et_vect_extract_even_odd_saved] { + verbose "check_effective_target_vect_extract_even_odd: using cached result" 2 + } else { + set et_vect_extract_even_odd_saved 0 + if { [istarget aarch64*-*-*] + || [istarget powerpc*-*-*] + || [is-effective-target arm_neon_ok] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget ia64-*-*] + || [istarget spu-*-*] + || ([istarget mips*-*-*] + && [check_effective_target_mpaired_single]) } { + set et_vect_extract_even_odd_saved 1 + } + } + + verbose "check_effective_target_vect_extract_even_odd: returning $et_vect_extract_even_odd_saved" 2 + return $et_vect_extract_even_odd_saved +} + +# Return 1 if the target supports vector interleaving, 0 otherwise. + +proc check_effective_target_vect_interleave { } { + global et_vect_interleave_saved + + if [info exists et_vect_interleave_saved] { + verbose "check_effective_target_vect_interleave: using cached result" 2 + } else { + set et_vect_interleave_saved 0 + if { [istarget aarch64*-*-*] + || [istarget powerpc*-*-*] + || [is-effective-target arm_neon_ok] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget ia64-*-*] + || [istarget spu-*-*] + || ([istarget mips*-*-*] + && [check_effective_target_mpaired_single]) } { + set et_vect_interleave_saved 1 + } + } + + verbose "check_effective_target_vect_interleave: returning $et_vect_interleave_saved" 2 + return $et_vect_interleave_saved +} + +foreach N {2 3 4 8} { + eval [string map [list N $N] { + # Return 1 if the target supports 2-vector interleaving + proc check_effective_target_vect_stridedN { } { + global et_vect_stridedN_saved + + if [info exists et_vect_stridedN_saved] { + verbose "check_effective_target_vect_stridedN: using cached result" 2 + } else { + set et_vect_stridedN_saved 0 + if { (N & -N) == N + && [check_effective_target_vect_interleave] + && [check_effective_target_vect_extract_even_odd] } { + set et_vect_stridedN_saved 1 + } + if { ([istarget arm*-*-*] + || [istarget aarch64*-*-*]) && N >= 2 && N <= 4 } { + set et_vect_stridedN_saved 1 + } + } + + verbose "check_effective_target_vect_stridedN: returning $et_vect_stridedN_saved" 2 + return $et_vect_stridedN_saved + } + }] +} + +# Return 1 if the target supports multiple vector sizes + +proc check_effective_target_vect_multiple_sizes { } { + global et_vect_multiple_sizes_saved + + set et_vect_multiple_sizes_saved 0 + if { ([istarget aarch64*-*-*] + || ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok])) } { + set et_vect_multiple_sizes_saved 1 + } + if { ([istarget x86_64-*-*] || [istarget i?86-*-*]) } { + if { ([check_avx_available] && ![check_prefer_avx128]) } { + set et_vect_multiple_sizes_saved 1 + } + } + + verbose "check_effective_target_vect_multiple_sizes: returning $et_vect_multiple_sizes_saved" 2 + return $et_vect_multiple_sizes_saved +} + +# Return 1 if the target supports vectors of 64 bits. + +proc check_effective_target_vect64 { } { + global et_vect64_saved + + if [info exists et_vect64_saved] { + verbose "check_effective_target_vect64: using cached result" 2 + } else { + set et_vect64_saved 0 + if { ([istarget arm*-*-*] + && [check_effective_target_arm_neon_ok] + && [check_effective_target_arm_little_endian]) } { + set et_vect64_saved 1 + } + } + + verbose "check_effective_target_vect64: returning $et_vect64_saved" 2 + return $et_vect64_saved +} + +# Return 1 if the target supports vector copysignf calls. + +proc check_effective_target_vect_call_copysignf { } { + global et_vect_call_copysignf_saved + + if [info exists et_vect_call_copysignf_saved] { + verbose "check_effective_target_vect_call_copysignf: using cached result" 2 + } else { + set et_vect_call_copysignf_saved 0 + if { [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget powerpc*-*-*] } { + set et_vect_call_copysignf_saved 1 + } + } + + verbose "check_effective_target_vect_call_copysignf: returning $et_vect_call_copysignf_saved" 2 + return $et_vect_call_copysignf_saved +} + +# Return 1 if the target supports vector sqrtf calls. + +proc check_effective_target_vect_call_sqrtf { } { + global et_vect_call_sqrtf_saved + + if [info exists et_vect_call_sqrtf_saved] { + verbose "check_effective_target_vect_call_sqrtf: using cached result" 2 + } else { + set et_vect_call_sqrtf_saved 0 + if { [istarget aarch64*-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || ([istarget powerpc*-*-*] && [check_vsx_hw_available]) } { + set et_vect_call_sqrtf_saved 1 + } + } + + verbose "check_effective_target_vect_call_sqrtf: returning $et_vect_call_sqrtf_saved" 2 + return $et_vect_call_sqrtf_saved +} + +# Return 1 if the target supports vector lrint calls. + +proc check_effective_target_vect_call_lrint { } { + set et_vect_call_lrint 0 + if { ([istarget i?86-*-*] || [istarget x86_64-*-*]) && [check_effective_target_ilp32] } { + set et_vect_call_lrint 1 + } + + verbose "check_effective_target_vect_call_lrint: returning $et_vect_call_lrint" 2 + return $et_vect_call_lrint +} + +# Return 1 if the target supports vector btrunc calls. + +proc check_effective_target_vect_call_btrunc { } { + global et_vect_call_btrunc_saved + + if [info exists et_vect_call_btrunc_saved] { + verbose "check_effective_target_vect_call_btrunc: using cached result" 2 + } else { + set et_vect_call_btrunc_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_btrunc_saved 1 + } + } + + verbose "check_effective_target_vect_call_btrunc: returning $et_vect_call_btrunc_saved" 2 + return $et_vect_call_btrunc_saved +} + +# Return 1 if the target supports vector btruncf calls. + +proc check_effective_target_vect_call_btruncf { } { + global et_vect_call_btruncf_saved + + if [info exists et_vect_call_btruncf_saved] { + verbose "check_effective_target_vect_call_btruncf: using cached result" 2 + } else { + set et_vect_call_btruncf_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_btruncf_saved 1 + } + } + + verbose "check_effective_target_vect_call_btruncf: returning $et_vect_call_btruncf_saved" 2 + return $et_vect_call_btruncf_saved +} + +# Return 1 if the target supports vector ceil calls. + +proc check_effective_target_vect_call_ceil { } { + global et_vect_call_ceil_saved + + if [info exists et_vect_call_ceil_saved] { + verbose "check_effective_target_vect_call_ceil: using cached result" 2 + } else { + set et_vect_call_ceil_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_ceil_saved 1 + } + } + + verbose "check_effective_target_vect_call_ceil: returning $et_vect_call_ceil_saved" 2 + return $et_vect_call_ceil_saved +} + +# Return 1 if the target supports vector ceilf calls. + +proc check_effective_target_vect_call_ceilf { } { + global et_vect_call_ceilf_saved + + if [info exists et_vect_call_ceilf_saved] { + verbose "check_effective_target_vect_call_ceilf: using cached result" 2 + } else { + set et_vect_call_ceilf_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_ceilf_saved 1 + } + } + + verbose "check_effective_target_vect_call_ceilf: returning $et_vect_call_ceilf_saved" 2 + return $et_vect_call_ceilf_saved +} + +# Return 1 if the target supports vector floor calls. + +proc check_effective_target_vect_call_floor { } { + global et_vect_call_floor_saved + + if [info exists et_vect_call_floor_saved] { + verbose "check_effective_target_vect_call_floor: using cached result" 2 + } else { + set et_vect_call_floor_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_floor_saved 1 + } + } + + verbose "check_effective_target_vect_call_floor: returning $et_vect_call_floor_saved" 2 + return $et_vect_call_floor_saved +} + +# Return 1 if the target supports vector floorf calls. + +proc check_effective_target_vect_call_floorf { } { + global et_vect_call_floorf_saved + + if [info exists et_vect_call_floorf_saved] { + verbose "check_effective_target_vect_call_floorf: using cached result" 2 + } else { + set et_vect_call_floorf_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_floorf_saved 1 + } + } + + verbose "check_effective_target_vect_call_floorf: returning $et_vect_call_floorf_saved" 2 + return $et_vect_call_floorf_saved +} + +# Return 1 if the target supports vector lceil calls. + +proc check_effective_target_vect_call_lceil { } { + global et_vect_call_lceil_saved + + if [info exists et_vect_call_lceil_saved] { + verbose "check_effective_target_vect_call_lceil: using cached result" 2 + } else { + set et_vect_call_lceil_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_lceil_saved 1 + } + } + + verbose "check_effective_target_vect_call_lceil: returning $et_vect_call_lceil_saved" 2 + return $et_vect_call_lceil_saved +} + +# Return 1 if the target supports vector lfloor calls. + +proc check_effective_target_vect_call_lfloor { } { + global et_vect_call_lfloor_saved + + if [info exists et_vect_call_lfloor_saved] { + verbose "check_effective_target_vect_call_lfloor: using cached result" 2 + } else { + set et_vect_call_lfloor_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_lfloor_saved 1 + } + } + + verbose "check_effective_target_vect_call_lfloor: returning $et_vect_call_lfloor_saved" 2 + return $et_vect_call_lfloor_saved +} + +# Return 1 if the target supports vector nearbyint calls. + +proc check_effective_target_vect_call_nearbyint { } { + global et_vect_call_nearbyint_saved + + if [info exists et_vect_call_nearbyint_saved] { + verbose "check_effective_target_vect_call_nearbyint: using cached result" 2 + } else { + set et_vect_call_nearbyint_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_nearbyint_saved 1 + } + } + + verbose "check_effective_target_vect_call_nearbyint: returning $et_vect_call_nearbyint_saved" 2 + return $et_vect_call_nearbyint_saved +} + +# Return 1 if the target supports vector nearbyintf calls. + +proc check_effective_target_vect_call_nearbyintf { } { + global et_vect_call_nearbyintf_saved + + if [info exists et_vect_call_nearbyintf_saved] { + verbose "check_effective_target_vect_call_nearbyintf: using cached result" 2 + } else { + set et_vect_call_nearbyintf_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_nearbyintf_saved 1 + } + } + + verbose "check_effective_target_vect_call_nearbyintf: returning $et_vect_call_nearbyintf_saved" 2 + return $et_vect_call_nearbyintf_saved +} + +# Return 1 if the target supports vector round calls. + +proc check_effective_target_vect_call_round { } { + global et_vect_call_round_saved + + if [info exists et_vect_call_round_saved] { + verbose "check_effective_target_vect_call_round: using cached result" 2 + } else { + set et_vect_call_round_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_round_saved 1 + } + } + + verbose "check_effective_target_vect_call_round: returning $et_vect_call_round_saved" 2 + return $et_vect_call_round_saved +} + +# Return 1 if the target supports vector roundf calls. + +proc check_effective_target_vect_call_roundf { } { + global et_vect_call_roundf_saved + + if [info exists et_vect_call_roundf_saved] { + verbose "check_effective_target_vect_call_roundf: using cached result" 2 + } else { + set et_vect_call_roundf_saved 0 + if { [istarget aarch64*-*-*] } { + set et_vect_call_roundf_saved 1 + } + } + + verbose "check_effective_target_vect_call_roundf: returning $et_vect_call_roundf_saved" 2 + return $et_vect_call_roundf_saved +} + +# Return 1 if the target supports section-anchors + +proc check_effective_target_section_anchors { } { + global et_section_anchors_saved + + if [info exists et_section_anchors_saved] { + verbose "check_effective_target_section_anchors: using cached result" 2 + } else { + set et_section_anchors_saved 0 + if { [istarget powerpc*-*-*] + || [istarget arm*-*-*] } { + set et_section_anchors_saved 1 + } + } + + verbose "check_effective_target_section_anchors: returning $et_section_anchors_saved" 2 + return $et_section_anchors_saved +} + +# Return 1 if the target supports atomic operations on "int_128" values. + +proc check_effective_target_sync_int_128 { } { + if { ([istarget x86_64-*-*] || [istarget i?86-*-*]) + && ![is-effective-target ia32] } { + return 1 + } else { + return 0 + } +} + +# Return 1 if the target supports atomic operations on "int_128" values +# and can execute them. + +proc check_effective_target_sync_int_128_runtime { } { + if { ([istarget x86_64-*-*] || [istarget i?86-*-*]) + && ![is-effective-target ia32] } { + return [check_cached_effective_target sync_int_128_available { + check_runtime_nocache sync_int_128_available { + #include "cpuid.h" + int main () + { + unsigned int eax, ebx, ecx, edx; + if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)) + return !(ecx & bit_CMPXCHG16B); + return 1; + } + } "" + }] + } else { + return 0 + } +} + +# Return 1 if the target supports atomic operations on "long long". +# +# Note: 32bit x86 targets require -march=pentium in dg-options. + +proc check_effective_target_sync_long_long { } { + if { [istarget x86_64-*-*] + || [istarget i?86-*-*]) + || [istarget aarch64*-*-*] + || [istarget arm*-*-*] + || [istarget alpha*-*-*] + || ([istarget sparc*-*-*] && [check_effective_target_lp64]) } { + return 1 + } else { + return 0 + } +} + +# Return 1 if the target supports atomic operations on "long long" +# and can execute them. +# +# Note: 32bit x86 targets require -march=pentium in dg-options. + +proc check_effective_target_sync_long_long_runtime { } { + if { [istarget x86_64-*-*] + || [istarget i?86-*-*] } { + return [check_cached_effective_target sync_long_long_available { + check_runtime_nocache sync_long_long_available { + #include "cpuid.h" + int main () + { + unsigned int eax, ebx, ecx, edx; + if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)) + return !(edx & bit_CMPXCHG8B); + return 1; + } + } "" + }] + } elseif { [istarget aarch64*-*-*] } { + return 1 + } elseif { [istarget arm*-*-linux-*] } { + return [check_runtime sync_longlong_runtime { + #include + int main () + { + long long l1; + + if (sizeof (long long) != 8) + exit (1); + + /* Just check for native; checking for kernel fallback is tricky. */ + asm volatile ("ldrexd r0,r1, [%0]" : : "r" (&l1) : "r0", "r1"); + + exit (0); + } + } "" ] + } elseif { [istarget alpha*-*-*] } { + return 1 + } elseif { ([istarget sparc*-*-*] + && [check_effective_target_lp64] + && [check_effective_target_ultrasparc_hw]) } { + return 1 + } elseif { [istarget powerpc*-*-*] && [check_effective_target_lp64] } { + return 1 + } else { + return 0 + } +} + +# Return 1 if the target supports atomic operations on "int" and "long". + +proc check_effective_target_sync_int_long { } { + global et_sync_int_long_saved + + if [info exists et_sync_int_long_saved] { + verbose "check_effective_target_sync_int_long: using cached result" 2 + } else { + set et_sync_int_long_saved 0 +# This is intentionally powerpc but not rs6000, rs6000 doesn't have the +# load-reserved/store-conditional instructions. + if { [istarget ia64-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget aarch64*-*-*] + || [istarget alpha*-*-*] + || [istarget arm*-*-linux-*] + || [istarget bfin*-*linux*] + || [istarget hppa*-*linux*] + || [istarget s390*-*-*] + || [istarget powerpc*-*-*] + || [istarget crisv32-*-*] || [istarget cris-*-*] + || ([istarget sparc*-*-*] && [check_effective_target_sparc_v9]) + || [check_effective_target_mips_llsc] } { + set et_sync_int_long_saved 1 + } + } + + verbose "check_effective_target_sync_int_long: returning $et_sync_int_long_saved" 2 + return $et_sync_int_long_saved +} + +# Return 1 if the target supports atomic operations on "char" and "short". + +proc check_effective_target_sync_char_short { } { + global et_sync_char_short_saved + + if [info exists et_sync_char_short_saved] { + verbose "check_effective_target_sync_char_short: using cached result" 2 + } else { + set et_sync_char_short_saved 0 +# This is intentionally powerpc but not rs6000, rs6000 doesn't have the +# load-reserved/store-conditional instructions. + if { [istarget aarch64*-*-*] + || [istarget ia64-*-*] + || [istarget i?86-*-*] + || [istarget x86_64-*-*] + || [istarget alpha*-*-*] + || [istarget arm*-*-linux-*] + || [istarget hppa*-*linux*] + || [istarget s390*-*-*] + || [istarget powerpc*-*-*] + || [istarget crisv32-*-*] || [istarget cris-*-*] + || ([istarget sparc*-*-*] && [check_effective_target_sparc_v9]) + || [check_effective_target_mips_llsc] } { + set et_sync_char_short_saved 1 + } + } + + verbose "check_effective_target_sync_char_short: returning $et_sync_char_short_saved" 2 + return $et_sync_char_short_saved +} + +# Return 1 if the target uses a ColdFire FPU. + +proc check_effective_target_coldfire_fpu { } { + return [check_no_compiler_messages coldfire_fpu assembly { + #ifndef __mcffpu__ + #error FOO + #endif + }] +} + +# Return true if this is a uClibc target. + +proc check_effective_target_uclibc {} { + return [check_no_compiler_messages uclibc object { + #include + #if !defined (__UCLIBC__) + #error FOO + #endif + }] +} + +# Return true if this is a uclibc target and if the uclibc feature +# described by __$feature__ is not present. + +proc check_missing_uclibc_feature {feature} { + return [check_no_compiler_messages $feature object " + #include + #if !defined (__UCLIBC) || defined (__${feature}__) + #error FOO + #endif + "] +} + +# Return true if this is a Newlib target. + +proc check_effective_target_newlib {} { + return [check_no_compiler_messages newlib object { + #include + }] +} + +# Return true if this is NOT a Bionic target. + +proc check_effective_target_non_bionic {} { + return [check_no_compiler_messages non_bionic object { + #include + #if defined (__BIONIC__) + #error FOO + #endif + }] +} + +# Return 1 if +# (a) an error of a few ULP is expected in string to floating-point +# conversion functions; and +# (b) overflow is not always detected correctly by those functions. + +proc check_effective_target_lax_strtofp {} { + # By default, assume that all uClibc targets suffer from this. + return [check_effective_target_uclibc] +} + +# Return 1 if this is a target for which wcsftime is a dummy +# function that always returns 0. + +proc check_effective_target_dummy_wcsftime {} { + # By default, assume that all uClibc targets suffer from this. + return [check_effective_target_uclibc] +} + +# Return 1 if constructors with initialization priority arguments are +# supposed on this target. + +proc check_effective_target_init_priority {} { + return [check_no_compiler_messages init_priority assembly " + void f() __attribute__((constructor (1000))); + void f() \{\} + "] +} + +# Return 1 if the target matches the effective target 'arg', 0 otherwise. +# This can be used with any check_* proc that takes no argument and +# returns only 1 or 0. It could be used with check_* procs that take +# arguments with keywords that pass particular arguments. + +proc is-effective-target { arg } { + set selected 0 + if { [info procs check_effective_target_${arg}] != [list] } { + set selected [check_effective_target_${arg}] + } else { + switch $arg { + "vmx_hw" { set selected [check_vmx_hw_available] } + "vsx_hw" { set selected [check_vsx_hw_available] } + "p8vector_hw" { set selected [check_p8vector_hw_available] } + "ppc_recip_hw" { set selected [check_ppc_recip_hw_available] } + "named_sections" { set selected [check_named_sections_available] } + "gc_sections" { set selected [check_gc_sections_available] } + "cxa_atexit" { set selected [check_cxa_atexit_available] } + default { error "unknown effective target keyword `$arg'" } + } + } + verbose "is-effective-target: $arg $selected" 2 + return $selected +} + +# Return 1 if the argument is an effective-target keyword, 0 otherwise. + +proc is-effective-target-keyword { arg } { + if { [info procs check_effective_target_${arg}] != [list] } { + return 1 + } else { + # These have different names for their check_* procs. + switch $arg { + "vmx_hw" { return 1 } + "vsx_hw" { return 1 } + "p8vector_hw" { return 1 } + "ppc_recip_hw" { return 1 } + "named_sections" { return 1 } + "gc_sections" { return 1 } + "cxa_atexit" { return 1 } + default { return 0 } + } + } +} + +# Return 1 if target default to short enums + +proc check_effective_target_short_enums { } { + return [check_no_compiler_messages short_enums assembly { + enum foo { bar }; + int s[sizeof (enum foo) == 1 ? 1 : -1]; + }] +} + +# Return 1 if target supports merging string constants at link time. + +proc check_effective_target_string_merging { } { + return [check_no_messages_and_pattern string_merging \ + "rodata\\.str" assembly { + const char *var = "String"; + } {-O2}] +} + +# Return 1 if target has the basic signed and unsigned types in +# , 0 otherwise. This will be obsolete when GCC ensures a +# working for all targets. + +proc check_effective_target_stdint_types { } { + return [check_no_compiler_messages stdint_types assembly { + #include + int8_t a; int16_t b; int32_t c; int64_t d; + uint8_t e; uint16_t f; uint32_t g; uint64_t h; + }] +} + +# Return 1 if target has the basic signed and unsigned types in +# , 0 otherwise. This is for tests that GCC's notions of +# these types agree with those in the header, as some systems have +# only . + +proc check_effective_target_inttypes_types { } { + return [check_no_compiler_messages inttypes_types assembly { + #include + int8_t a; int16_t b; int32_t c; int64_t d; + uint8_t e; uint16_t f; uint32_t g; uint64_t h; + }] +} + +# Return 1 if programs are intended to be run on a simulator +# (i.e. slowly) rather than hardware (i.e. fast). + +proc check_effective_target_simulator { } { + + # All "src/sim" simulators set this one. + if [board_info target exists is_simulator] { + return [board_info target is_simulator] + } + + # The "sid" simulators don't set that one, but at least they set + # this one. + if [board_info target exists slow_simulator] { + return [board_info target slow_simulator] + } + + return 0 +} + +# Return 1 if programs are intended to be run on hardware rather than +# on a simulator + +proc check_effective_target_hw { } { + + # All "src/sim" simulators set this one. + if [board_info target exists is_simulator] { + if [board_info target is_simulator] { + return 0 + } else { + return 1 + } + } + + # The "sid" simulators don't set that one, but at least they set + # this one. + if [board_info target exists slow_simulator] { + if [board_info target slow_simulator] { + return 0 + } else { + return 1 + } + } + + return 1 +} + +# Return 1 if the target is a VxWorks kernel. + +proc check_effective_target_vxworks_kernel { } { + return [check_no_compiler_messages vxworks_kernel assembly { + #if !defined __vxworks || defined __RTP__ + #error NO + #endif + }] +} + +# Return 1 if the target is a VxWorks RTP. + +proc check_effective_target_vxworks_rtp { } { + return [check_no_compiler_messages vxworks_rtp assembly { + #if !defined __vxworks || !defined __RTP__ + #error NO + #endif + }] +} + +# Return 1 if the target is expected to provide wide character support. + +proc check_effective_target_wchar { } { + if {[check_missing_uclibc_feature UCLIBC_HAS_WCHAR]} { + return 0 + } + return [check_no_compiler_messages wchar assembly { + #include + }] +} + +# Return 1 if the target has . + +proc check_effective_target_pthread_h { } { + return [check_no_compiler_messages pthread_h assembly { + #include + }] +} + +# Return 1 if the target can truncate a file from a file-descriptor, +# as used by libgfortran/io/unix.c:fd_truncate; i.e. ftruncate or +# chsize. We test for a trivially functional truncation; no stubs. +# As libgfortran uses _FILE_OFFSET_BITS 64, we do too; it'll cause a +# different function to be used. + +proc check_effective_target_fd_truncate { } { + set prog { + #define _FILE_OFFSET_BITS 64 + #include + #include + #include + int main () + { + FILE *f = fopen ("tst.tmp", "wb"); + int fd; + const char t[] = "test writing more than ten characters"; + char s[11]; + int status = 0; + fd = fileno (f); + write (fd, t, sizeof (t) - 1); + lseek (fd, 0, 0); + if (ftruncate (fd, 10) != 0) + status = 1; + close (fd); + fclose (f); + if (status) + { + unlink ("tst.tmp"); + exit (status); + } + f = fopen ("tst.tmp", "rb"); + if (fread (s, 1, sizeof (s), f) != 10 || strncmp (s, t, 10) != 0) + status = 1; + fclose (f); + unlink ("tst.tmp"); + exit (status); + } + } + + if { [check_runtime ftruncate $prog] } { + return 1; + } + + regsub "ftruncate" $prog "chsize" prog + return [check_runtime chsize $prog] +} + +# Add to FLAGS all the target-specific flags needed to access the c99 runtime. + +proc add_options_for_c99_runtime { flags } { + if { [istarget *-*-solaris2*] } { + return "$flags -std=c99" + } + if { [istarget powerpc-*-darwin*] } { + return "$flags -mmacosx-version-min=10.3" + } + return $flags +} + +# Add to FLAGS all the target-specific flags needed to enable +# full IEEE compliance mode. + +proc add_options_for_ieee { flags } { + if { [istarget alpha*-*-*] + || [istarget sh*-*-*] } { + return "$flags -mieee" + } + if { [istarget rx-*-*] } { + return "$flags -mnofpu" + } + return $flags +} + +# Add to FLAGS the flags needed to enable functions to bind locally +# when using pic/PIC passes in the testsuite. + +proc add_options_for_bind_pic_locally { flags } { + if {[check_no_compiler_messages using_pic2 assembly { + #if __PIC__ != 2 + #error FOO + #endif + }]} { + return "$flags -fPIE" + } + if {[check_no_compiler_messages using_pic1 assembly { + #if __PIC__ != 1 + #error FOO + #endif + }]} { + return "$flags -fpie" + } + + return $flags +} + +# Add to FLAGS the flags needed to enable 64-bit vectors. + +proc add_options_for_double_vectors { flags } { + if [is-effective-target arm_neon_ok] { + return "$flags -mvectorize-with-neon-double" + } + + return $flags +} + +# Return 1 if the target provides a full C99 runtime. + +proc check_effective_target_c99_runtime { } { + return [check_cached_effective_target c99_runtime { + global srcdir + + set file [open "$srcdir/gcc.dg/builtins-config.h"] + set contents [read $file] + close $file + append contents { + #ifndef HAVE_C99_RUNTIME + #error FOO + #endif + } + check_no_compiler_messages_nocache c99_runtime assembly \ + $contents [add_options_for_c99_runtime ""] + }] +} + +# Return 1 if target wchar_t is at least 4 bytes. + +proc check_effective_target_4byte_wchar_t { } { + return [check_no_compiler_messages 4byte_wchar_t object { + int dummy[sizeof (__WCHAR_TYPE__) >= 4 ? 1 : -1]; + }] +} + +# Return 1 if the target supports automatic stack alignment. + +proc check_effective_target_automatic_stack_alignment { } { + # Ordinarily x86 supports automatic stack alignment ... + if { [istarget i?86*-*-*] || [istarget x86_64-*-*] } then { + if { [istarget *-*-mingw*] || [istarget *-*-cygwin*] } { + # ... except Win64 SEH doesn't. Succeed for Win32 though. + return [check_effective_target_ilp32]; + } + return 1; + } + return 0; +} + +# Return true if we are compiling for AVX target. + +proc check_avx_available { } { + if { [check_no_compiler_messages avx_available assembly { + #ifndef __AVX__ + #error unsupported + #endif + } ""] } { + return 1; + } + return 0; +} + +# Return true if 32- and 16-bytes vectors are available. + +proc check_effective_target_vect_sizes_32B_16B { } { + return [check_avx_available]; +} + +# Return true if 128-bits vectors are preferred even if 256-bits vectors +# are available. + +proc check_prefer_avx128 { } { + if ![check_avx_available] { + return 0; + } + return [check_no_messages_and_pattern avx_explicit "xmm" assembly { + float a[1024],b[1024],c[1024]; + void foo (void) { int i; for (i = 0; i < 1024; i++) a[i]=b[i]+c[i];} + } "-O2 -ftree-vectorize"] +} + + +# Return 1 if avx512f instructions can be compiled. + +proc check_effective_target_avx512f { } { + return [check_no_compiler_messages avx512f object { + typedef double __m512d __attribute__ ((__vector_size__ (64))); + + __m512d _mm512_add (__m512d a) + { + return __builtin_ia32_addpd512_mask (a, a, a, 1, 4); + } + } "-O2 -mavx512f" ] +} + +# Return 1 if avx instructions can be compiled. + +proc check_effective_target_avx { } { + return [check_no_compiler_messages avx object { + void _mm256_zeroall (void) + { + __builtin_ia32_vzeroall (); + } + } "-O2 -mavx" ] +} + +# Return 1 if avx2 instructions can be compiled. +proc check_effective_target_avx2 { } { + return [check_no_compiler_messages avx2 object { + typedef long long __v4di __attribute__ ((__vector_size__ (32))); + __v4di + mm256_is32_andnotsi256 (__v4di __X, __v4di __Y) + { + return __builtin_ia32_andnotsi256 (__X, __Y); + } + } "-O0 -mavx2" ] +} + +# Return 1 if sse instructions can be compiled. +proc check_effective_target_sse { } { + return [check_no_compiler_messages sse object { + int main () + { + __builtin_ia32_stmxcsr (); + return 0; + } + } "-O2 -msse" ] +} + +# Return 1 if sse2 instructions can be compiled. +proc check_effective_target_sse2 { } { + return [check_no_compiler_messages sse2 object { + typedef long long __m128i __attribute__ ((__vector_size__ (16))); + + __m128i _mm_srli_si128 (__m128i __A, int __N) + { + return (__m128i)__builtin_ia32_psrldqi128 (__A, 8); + } + } "-O2 -msse2" ] +} + +# Return 1 if F16C instructions can be compiled. + +proc check_effective_target_f16c { } { + return [check_no_compiler_messages f16c object { + #include "immintrin.h" + float + foo (unsigned short val) + { + return _cvtsh_ss (val); + } + } "-O2 -mf16c" ] +} + +# Return 1 if C wchar_t type is compatible with char16_t. + +proc check_effective_target_wchar_t_char16_t_compatible { } { + return [check_no_compiler_messages wchar_t_char16_t object { + __WCHAR_TYPE__ wc; + __CHAR16_TYPE__ *p16 = &wc; + char t[(((__CHAR16_TYPE__) -1) < 0 == ((__WCHAR_TYPE__) -1) < 0) ? 1 : -1]; + }] +} + +# Return 1 if C wchar_t type is compatible with char32_t. + +proc check_effective_target_wchar_t_char32_t_compatible { } { + return [check_no_compiler_messages wchar_t_char32_t object { + __WCHAR_TYPE__ wc; + __CHAR32_TYPE__ *p32 = &wc; + char t[(((__CHAR32_TYPE__) -1) < 0 == ((__WCHAR_TYPE__) -1) < 0) ? 1 : -1]; + }] +} + +# Return 1 if pow10 function exists. + +proc check_effective_target_pow10 { } { + return [check_runtime pow10 { + #include + int main () { + double x; + x = pow10 (1); + return 0; + } + } "-lm" ] +} + +# Return 1 if current options generate DFP instructions, 0 otherwise. + +proc check_effective_target_hard_dfp {} { + return [check_no_messages_and_pattern hard_dfp "!adddd3" assembly { + typedef float d64 __attribute__((mode(DD))); + d64 x, y, z; + void foo (void) { z = x + y; } + }] +} + +# Return 1 if string.h and wchar.h headers provide C++ requires overloads +# for strchr etc. functions. + +proc check_effective_target_correct_iso_cpp_string_wchar_protos { } { + return [check_no_compiler_messages correct_iso_cpp_string_wchar_protos assembly { + #include + #include + #if !defined(__cplusplus) \ + || !defined(__CORRECT_ISO_CPP_STRING_H_PROTO) \ + || !defined(__CORRECT_ISO_CPP_WCHAR_H_PROTO) + ISO C++ correct string.h and wchar.h protos not supported. + #else + int i; + #endif + }] +} + +# Return 1 if GNU as is used. + +proc check_effective_target_gas { } { + global use_gas_saved + global tool + + if {![info exists use_gas_saved]} { + # Check if the as used by gcc is GNU as. + set gcc_as [lindex [${tool}_target_compile "-print-prog-name=as" "" "none" ""] 0] + # Provide /dev/null as input, otherwise gas times out reading from + # stdin. + set status [remote_exec host "$gcc_as" "-v /dev/null"] + set as_output [lindex $status 1] + if { [ string first "GNU" $as_output ] >= 0 } { + set use_gas_saved 1 + } else { + set use_gas_saved 0 + } + } + return $use_gas_saved +} + +# Return 1 if GNU ld is used. + +proc check_effective_target_gld { } { + global use_gld_saved + global tool + + if {![info exists use_gld_saved]} { + # Check if the ld used by gcc is GNU ld. + set gcc_ld [lindex [${tool}_target_compile "-print-prog-name=ld" "" "none" ""] 0] + set status [remote_exec host "$gcc_ld" "--version"] + set ld_output [lindex $status 1] + if { [ string first "GNU" $ld_output ] >= 0 } { + set use_gld_saved 1 + } else { + set use_gld_saved 0 + } + } + return $use_gld_saved +} + +# Return 1 if the compiler has been configure with link-time optimization +# (LTO) support. + +proc check_effective_target_lto { } { + global ENABLE_LTO + return [info exists ENABLE_LTO] +} + +# Return 1 if -mx32 -maddress-mode=short can compile, 0 otherwise. + +proc check_effective_target_maybe_x32 { } { + return [check_no_compiler_messages maybe_x32 object { + void foo (void) {} + } "-mx32 -maddress-mode=short"] +} + +# Return 1 if this target supports the -fsplit-stack option, 0 +# otherwise. + +proc check_effective_target_split_stack {} { + return [check_no_compiler_messages split_stack object { + void foo (void) { } + } "-fsplit-stack"] +} + +# Return 1 if this target supports the -masm=intel option, 0 +# otherwise + +proc check_effective_target_masm_intel {} { + return [check_no_compiler_messages masm_intel object { + extern void abort (void); + } "-masm=intel"] +} + +# Return 1 if the language for the compiler under test is C. + +proc check_effective_target_c { } { + global tool + if [string match $tool "gcc"] { + return 1 + } + return 0 +} + +# Return 1 if the language for the compiler under test is C++. + +proc check_effective_target_c++ { } { + global tool + if [string match $tool "g++"] { + return 1 + } + return 0 +} + +# Check whether the current active language standard supports the features +# of C++11/C++1y by checking for the presence of one of the -std +# flags. This assumes that the default for the compiler is C++98, and that +# there will never be multiple -std= arguments on the command line. +proc check_effective_target_c++11_only { } { + if ![check_effective_target_c++] { + return 0 + } + return [check-flags { { } { } { -std=c++0x -std=gnu++0x -std=c++11 -std=gnu++11 } }] +} +proc check_effective_target_c++11 { } { + if [check_effective_target_c++11_only] { + return 1 + } + return [check_effective_target_c++1y] +} +proc check_effective_target_c++11_down { } { + if ![check_effective_target_c++] { + return 0 + } + return ![check_effective_target_c++1y] +} + +proc check_effective_target_c++1y_only { } { + if ![check_effective_target_c++] { + return 0 + } + return [check-flags { { } { } { -std=c++1y -std=gnu++1y -std=c++14 -std=gnu++14 } }] +} +proc check_effective_target_c++1y { } { + return [check_effective_target_c++1y_only] +} + +proc check_effective_target_c++98_only { } { + if ![check_effective_target_c++] { + return 0 + } + return ![check_effective_target_c++11] +} + +# Return 1 if expensive testcases should be run. + +proc check_effective_target_run_expensive_tests { } { + if { [getenv GCC_TEST_RUN_EXPENSIVE] != "" } { + return 1 + } + return 0 +} + +# Returns 1 if "mempcpy" is available on the target system. + +proc check_effective_target_mempcpy {} { + return [check_function_available "mempcpy"] +} + +# Check whether the vectorizer tests are supported by the target and +# append additional target-dependent compile flags to DEFAULT_VECTCFLAGS. +# Set dg-do-what-default to either compile or run, depending on target +# capabilities. Return 1 if vectorizer tests are supported by +# target, 0 otherwise. + +proc check_vect_support_and_set_flags { } { + global DEFAULT_VECTCFLAGS + global dg-do-what-default + + if [istarget powerpc-*paired*] { + lappend DEFAULT_VECTCFLAGS "-mpaired" + if [check_750cl_hw_available] { + set dg-do-what-default run + } else { + set dg-do-what-default compile + } + } elseif [istarget powerpc*-*-*] { + # Skip targets not supporting -maltivec. + if ![is-effective-target powerpc_altivec_ok] { + return 0 + } + + lappend DEFAULT_VECTCFLAGS "-maltivec" + if [check_p8vector_hw_available] { + lappend DEFAULT_VECTCFLAGS "-mpower8-vector" "-mno-allow-movmisalign" + } elseif [check_vsx_hw_available] { + lappend DEFAULT_VECTCFLAGS "-mvsx" "-mno-allow-movmisalign" + } + + if [check_vmx_hw_available] { + set dg-do-what-default run + } else { + if [is-effective-target ilp32] { + # Specify a cpu that supports VMX for compile-only tests. + lappend DEFAULT_VECTCFLAGS "-mcpu=970" + } + set dg-do-what-default compile + } + } elseif { [istarget spu-*-*] } { + set dg-do-what-default run + } elseif { [istarget i?86-*-*] || [istarget x86_64-*-*] } { + lappend DEFAULT_VECTCFLAGS "-msse2" + if { [check_effective_target_sse2_runtime] } { + set dg-do-what-default run + } else { + set dg-do-what-default compile + } + } elseif { [istarget mips*-*-*] + && ([check_effective_target_mpaired_single] + || [check_effective_target_mips_loongson]) + && [check_effective_target_nomips16] } { + if { [check_effective_target_mpaired_single] } { + lappend DEFAULT_VECTCFLAGS "-mpaired-single" + } + set dg-do-what-default run + } elseif [istarget sparc*-*-*] { + lappend DEFAULT_VECTCFLAGS "-mcpu=ultrasparc" "-mvis" + if [check_effective_target_ultrasparc_hw] { + set dg-do-what-default run + } else { + set dg-do-what-default compile + } + } elseif [istarget alpha*-*-*] { + # Alpha's vectorization capabilities are extremely limited. + # It's more effort than its worth disabling all of the tests + # that it cannot pass. But if you actually want to see what + # does work, command out the return. + return 0 + + lappend DEFAULT_VECTCFLAGS "-mmax" + if [check_alpha_max_hw_available] { + set dg-do-what-default run + } else { + set dg-do-what-default compile + } + } elseif [istarget ia64-*-*] { + set dg-do-what-default run + } elseif [is-effective-target arm_neon_ok] { + eval lappend DEFAULT_VECTCFLAGS [add_options_for_arm_neon ""] + # NEON does not support denormals, so is not used for vectorization by + # default to avoid loss of precision. We must pass -ffast-math to test + # vectorization of float operations. + lappend DEFAULT_VECTCFLAGS "-ffast-math" + if [is-effective-target arm_neon_hw] { + set dg-do-what-default run + } else { + set dg-do-what-default compile + } + } elseif [istarget "aarch64*-*-*"] { + set dg-do-what-default run + } else { + return 0 + } + + return 1 +} + +proc check_effective_target_non_strict_align {} { + return [check_no_compiler_messages non_strict_align assembly { + char *y; + typedef char __attribute__ ((__aligned__(__BIGGEST_ALIGNMENT__))) c; + c *z; + void foo(void) { z = (c *) y; } + } "-Wcast-align"] +} + +# Return 1 if the target has . + +proc check_effective_target_ucontext_h { } { + return [check_no_compiler_messages ucontext_h assembly { + #include + }] +} + +proc check_effective_target_aarch64_tiny { } { + if { [istarget aarch64*-*-*] } { + return [check_no_compiler_messages aarch64_tiny object { + #ifdef __AARCH64_CMODEL_TINY__ + int dummy; + #else + #error target not AArch64 tiny code model + #endif + }] + } else { + return 0 + } +} + +proc check_effective_target_aarch64_small { } { + if { [istarget aarch64*-*-*] } { + return [check_no_compiler_messages aarch64_small object { + #ifdef __AARCH64_CMODEL_SMALL__ + int dummy; + #else + #error target not AArch64 small code model + #endif + }] + } else { + return 0 + } +} + +proc check_effective_target_aarch64_large { } { + if { [istarget aarch64*-*-*] } { + return [check_no_compiler_messages aarch64_large object { + #ifdef __AARCH64_CMODEL_LARGE__ + int dummy; + #else + #error target not AArch64 large code model + #endif + }] + } else { + return 0 + } +} + +# Return 1 if is available with all the standard IEEE +# exceptions and floating-point exceptions are raised by arithmetic +# operations. (If the target requires special options for "inexact" +# exceptions, those need to be specified in the testcases.) + +proc check_effective_target_fenv_exceptions {} { + return [check_runtime fenv_exceptions { + #include + #include + #ifndef FE_DIVBYZERO + # error Missing FE_DIVBYZERO + #endif + #ifndef FE_INEXACT + # error Missing FE_INEXACT + #endif + #ifndef FE_INVALID + # error Missing FE_INVALID + #endif + #ifndef FE_OVERFLOW + # error Missing FE_OVERFLOW + #endif + #ifndef FE_UNDERFLOW + # error Missing FE_UNDERFLOW + #endif + volatile float a = 0.0f, r; + int + main (void) + { + r = a / a; + if (fetestexcept (FE_INVALID)) + exit (0); + else + abort (); + } + } "-std=gnu99"] +} + +# Return 1 if LOGICAL_OP_NON_SHORT_CIRCUIT is set to 0 for the current target. + +proc check_effective_target_logical_op_short_circuit {} { + if { [istarget mips*-*-*] + || [istarget arc*-*-*] + || [istarget avr*-*-*] + || [istarget crisv32-*-*] || [istarget cris-*-*] + || [check_effective_target_arm_cortex_m] } { + return 1 + } + return 0 +} + +# Record that dg-final test TEST requires convential compilation. + +proc force_conventional_output_for { test } { + if { [info proc $test] == "" } { + perror "$test does not exist" + exit 1 + } + proc ${test}_required_options {} { + global gcc_force_conventional_output + return $gcc_force_conventional_output + } +} + -- cgit v1.2.3