aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/libgomp
diff options
context:
space:
mode:
authorRong Xu <xur@google.com>2014-07-21 16:47:22 -0700
committerRong Xu <xur@google.com>2014-07-29 15:31:03 -0700
commit38a8aecfb882072900434499696b5c32a2274515 (patch)
tree2aac97f0ae24b03cd98c1a06e989c031c173f889 /gcc-4.9/libgomp
parentc231900e5dcc14d8296bd9f62b45997a49d4d5e7 (diff)
downloadtoolchain_gcc-38a8aecfb882072900434499696b5c32a2274515.tar.gz
toolchain_gcc-38a8aecfb882072900434499696b5c32a2274515.tar.bz2
toolchain_gcc-38a8aecfb882072900434499696b5c32a2274515.zip
[4.9] Switch gcc-4.9 to use google/gcc-4_9 branch.
This source drop uses svn version r212828 of google/gcc-4.9 branch. We also cherry-picked r213062, r213063 and r213064 to fix windows build issues. All gcc-4.9 patches before July 3rd are ported to google/gcc-4.9. The following prior commits has not been merged to google branch yet. (They are included in this commit). e7af147f979e657fe2df00808e5b4319b0e088c6, baf87df3cb2683649ba7e9872362a7e721117c23, and c231900e5dcc14d8296bd9f62b45997a49d4d5e7. Change-Id: I4bea3ea470387ff751c2be4cb0d4a12059b9299b
Diffstat (limited to 'gcc-4.9/libgomp')
-rw-r--r--gcc-4.9/libgomp/ChangeLog131
-rw-r--r--gcc-4.9/libgomp/config/linux/proc.c7
-rwxr-xr-xgcc-4.9/libgomp/configure24
-rw-r--r--gcc-4.9/libgomp/configure.ac1
-rw-r--r--gcc-4.9/libgomp/configure.tgt3
-rw-r--r--gcc-4.9/libgomp/libgomp.info3232
-rw-r--r--gcc-4.9/libgomp/libgomp.texi84
-rw-r--r--gcc-4.9/libgomp/omp_lib.f90.in2
-rw-r--r--gcc-4.9/libgomp/omp_lib.h.in2
-rw-r--r--gcc-4.9/libgomp/testsuite/config/default.exp1
-rw-r--r--gcc-4.9/libgomp/testsuite/gompconfig.exp.in2
-rw-r--r--gcc-4.9/libgomp/testsuite/lib/libgomp.exp5
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c++/for-10.C3
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c++/simd-9.C52
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c++/simd10.C6
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c++/simd11.C6
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c++/simd12.C6
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c++/simd13.C6
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/for-2.c3
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/for-2.h21
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/loop-16.c27
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-10.c26
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-11.c27
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-12.c19
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-13.c18
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-14.c123
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-15.c129
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-16.c67
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-17.c73
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-7.c96
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-8.c44
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/simd-9.c70
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.c/target-8.c26
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/aligned1.f03133
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-1.f90328
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-2.f90367
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-3.f90372
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable10.f90112
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable11.f9072
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable12.f9074
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable9.f90156
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/associate1.f9023
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/associate2.f9046
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-do-1.f9014
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-do-2.f9090
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-parallel-1.f9010
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-parallel-3.f9038
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-sections-1.f9023
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-taskgroup-2.f9028
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-1.f9095
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-2.f9025
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-3.f9022
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-1.f90203
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-2.f9034
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-3.f9042
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/nestedfn5.f9096
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/omp_atomic5.f9059
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/openmp_version-1.f2
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/openmp_version-2.f902
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/procptr1.f9042
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/simd1.f9035
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/simd2.f90101
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/simd3.f90109
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/simd4.f90103
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/simd5.f90124
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/simd6.f90135
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/simd7.f90172
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/target1.f9058
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/target2.f9096
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/target3.f9029
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/target4.f9048
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/target5.f9021
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/target6.f9050
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/target7.f9038
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/target8.f9033
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/taskgroup1.f9080
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr1.f9051
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr10.f9032
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr11.f9095
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr12.f9076
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr13.f90106
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr14.f9050
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr15.f9064
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr2.f9051
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr3.f9038
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr4.f9050
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr5.f9057
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr6.f9069
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr7.f9046
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr8.f9046
-rw-r--r--gcc-4.9/libgomp/testsuite/libgomp.fortran/udr9.f9065
91 files changed, 5681 insertions, 3297 deletions
diff --git a/gcc-4.9/libgomp/ChangeLog b/gcc-4.9/libgomp/ChangeLog
index 819bfa19b..00e34875e 100644
--- a/gcc-4.9/libgomp/ChangeLog
+++ b/gcc-4.9/libgomp/ChangeLog
@@ -1,3 +1,134 @@
+2014-06-30 Jakub Jelinek <jakub@redhat.com>
+
+ Backported from mainline
+ 2014-06-25 Jakub Jelinek <jakub@redhat.com>
+
+ * testsuite/libgomp.fortran/simd5.f90: New test.
+ * testsuite/libgomp.fortran/simd6.f90: New test.
+ * testsuite/libgomp.fortran/simd7.f90: New test.
+
+ 2014-06-24 Jakub Jelinek <jakub@redhat.com>
+
+ * testsuite/libgomp.fortran/aligned1.f03: New test.
+ * testsuite/libgomp.fortran/nestedfn5.f90: New test.
+ * testsuite/libgomp.fortran/target7.f90: Surround loop spawning
+ tasks with !$omp parallel !$omp single.
+ * testsuite/libgomp.fortran/target8.f90: New test.
+ * testsuite/libgomp.fortran/udr4.f90 (foo UDR, bar UDR): Adjust
+ not to use trim in the combiner, instead call elemental function.
+ (fn): New elemental function.
+ * testsuite/libgomp.fortran/udr6.f90 (do_add, dp_add, dp_init):
+ Make elemental.
+ * testsuite/libgomp.fortran/udr7.f90 (omp_priv, omp_orig, omp_out,
+ omp_in): Likewise.
+ * testsuite/libgomp.fortran/udr12.f90: New test.
+ * testsuite/libgomp.fortran/udr13.f90: New test.
+ * testsuite/libgomp.fortran/udr14.f90: New test.
+ * testsuite/libgomp.fortran/udr15.f90: New test.
+
+ 2014-06-18 Jakub Jelinek <jakub@redhat.com>
+
+ * omp_lib.f90.in (openmp_version): Set to 201307.
+ * omp_lib.h.in (openmp_version): Likewise.
+ * testsuite/libgomp.c/target-8.c: New test.
+ * testsuite/libgomp.fortran/declare-simd-1.f90: Add notinbranch
+ and inbranch clauses.
+ * testsuite/libgomp.fortran/depend-3.f90: New test.
+ * testsuite/libgomp.fortran/openmp_version-1.f: Adjust for new
+ openmp_version.
+ * testsuite/libgomp.fortran/openmp_version-2.f90: Likewise.
+ * testsuite/libgomp.fortran/target1.f90: New test.
+ * testsuite/libgomp.fortran/target2.f90: New test.
+ * testsuite/libgomp.fortran/target3.f90: New test.
+ * testsuite/libgomp.fortran/target4.f90: New test.
+ * testsuite/libgomp.fortran/target5.f90: New test.
+ * testsuite/libgomp.fortran/target6.f90: New test.
+ * testsuite/libgomp.fortran/target7.f90: New test.
+
+ 2014-06-10 Jakub Jelinek <jakub@redhat.com>
+
+ PR fortran/60928
+ * testsuite/libgomp.fortran/allocatable9.f90: New test.
+ * testsuite/libgomp.fortran/allocatable10.f90: New test.
+ * testsuite/libgomp.fortran/allocatable11.f90: New test.
+ * testsuite/libgomp.fortran/allocatable12.f90: New test.
+ * testsuite/libgomp.fortran/alloc-comp-1.f90: New test.
+ * testsuite/libgomp.fortran/alloc-comp-2.f90: New test.
+ * testsuite/libgomp.fortran/alloc-comp-3.f90: New test.
+ * testsuite/libgomp.fortran/associate1.f90: New test.
+ * testsuite/libgomp.fortran/associate2.f90: New test.
+ * testsuite/libgomp.fortran/procptr1.f90: New test.
+
+ 2014-06-06 Jakub Jelinek <jakub@redhat.com>
+
+ * testsuite/libgomp.fortran/simd1.f90: New test.
+ * testsuite/libgomp.fortran/udr1.f90: New test.
+ * testsuite/libgomp.fortran/udr2.f90: New test.
+ * testsuite/libgomp.fortran/udr3.f90: New test.
+ * testsuite/libgomp.fortran/udr4.f90: New test.
+ * testsuite/libgomp.fortran/udr5.f90: New test.
+ * testsuite/libgomp.fortran/udr6.f90: New test.
+ * testsuite/libgomp.fortran/udr7.f90: New test.
+ * testsuite/libgomp.fortran/udr8.f90: New test.
+ * testsuite/libgomp.fortran/udr9.f90: New test.
+ * testsuite/libgomp.fortran/udr10.f90: New test.
+ * testsuite/libgomp.fortran/udr11.f90: New test.
+
+ 2014-05-27 Uros Bizjak <ubizjak@gmail.com>
+
+ * testsuite/libgomp.fortran/declare-simd-1.f90: Require
+ vect_simd_clones effective target.
+ * testsuite/libgomp.fortran/declare-simd-2.f90: Ditto.
+
+ 2014-05-11 Jakub Jelinek <jakub@redhat.com>
+
+ * testsuite/libgomp.fortran/cancel-do-1.f90: New test.
+ * testsuite/libgomp.fortran/cancel-do-2.f90: New test.
+ * testsuite/libgomp.fortran/cancel-parallel-1.f90: New test.
+ * testsuite/libgomp.fortran/cancel-parallel-3.f90: New test.
+ * testsuite/libgomp.fortran/cancel-sections-1.f90: New test.
+ * testsuite/libgomp.fortran/cancel-taskgroup-2.f90: New test.
+ * testsuite/libgomp.fortran/declare-simd-1.f90: New test.
+ * testsuite/libgomp.fortran/declare-simd-2.f90: New test.
+ * testsuite/libgomp.fortran/declare-simd-3.f90: New test.
+ * testsuite/libgomp.fortran/depend-1.f90: New test.
+ * testsuite/libgomp.fortran/depend-2.f90: New test.
+ * testsuite/libgomp.fortran/omp_atomic5.f90: New test.
+ * testsuite/libgomp.fortran/simd1.f90: New test.
+ * testsuite/libgomp.fortran/simd2.f90: New test.
+ * testsuite/libgomp.fortran/simd3.f90: New test.
+ * testsuite/libgomp.fortran/simd4.f90: New test.
+ * testsuite/libgomp.fortran/taskgroup1.f90: New test.
+
+2014-06-24 Jakub Jelinek <jakub@redhat.com>
+
+ * testsuite/libgomp.c/for-2.c: Define SC to static for
+ #pragma omp for simd testing.
+ * testsuite/libgomp.c/for-2.h (SC): Define if not defined.
+ (N(f5), N(f6), N(f7), N(f8), N(f10), N(f12), N(f14)): Use
+ SC macro.
+ * testsuite/libgomp.c/simd-14.c: New test.
+ * testsuite/libgomp.c/simd-15.c: New test.
+ * testsuite/libgomp.c/simd-16.c: New test.
+ * testsuite/libgomp.c/simd-17.c: New test.
+ * testsuite/libgomp.c++/for-10.C: Define SC to static for
+ #pragma omp for simd testing.
+ * testsuite/libgomp.c++/simd10.C: New test.
+ * testsuite/libgomp.c++/simd11.C: New test.
+ * testsuite/libgomp.c++/simd12.C: New test.
+ * testsuite/libgomp.c++/simd13.C: New test.
+
+2014-05-21 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/61252
+ * testsuite/libgomp.c++/simd-9.C: New test.
+
+2014-05-18 Uros Bizjak <ubizjak@gmail.com>
+
+ * libgomp.texi (Runitme Library Routines): Remove multiple @menu.
+ (Environment Variables) Move OMP_PROC_BIND and OMP_STACKSIZE node
+ texts according to their @menu entry positions.
+
2014-05-02 Jakub Jelinek <jakub@redhat.com>
* testsuite/libgomp.c/simd-10.c: New test.
diff --git a/gcc-4.9/libgomp/config/linux/proc.c b/gcc-4.9/libgomp/config/linux/proc.c
index a6a5e9900..5609ff896 100644
--- a/gcc-4.9/libgomp/config/linux/proc.c
+++ b/gcc-4.9/libgomp/config/linux/proc.c
@@ -31,11 +31,14 @@
#include "libgomp.h"
#include "proc.h"
#include <errno.h>
+#include <stdlib.h>
#include <unistd.h>
+#if defined (__ANDROID__)
#include <stdio.h>
-#include <stdlib.h>
+#include <string.h>
#include <fcntl.h>
#include <errno.h>
+#endif
#ifdef HAVE_GETLOADAVG
# ifdef HAVE_SYS_LOADAVG_H
# include <sys/loadavg.h>
@@ -76,6 +79,7 @@ gomp_cpuset_popcount (unsigned long cpusetsize, cpu_set_t *cpusetp)
}
#endif
+#if defined (__ANDROID__)
/* Read the content of a file.
* Return the length of the data, or -1 on error. Does *not*
* zero-terminate the content. Will not read more
@@ -255,6 +259,7 @@ sc_nprocessors_actu ()
}
return cpuCount;
}
+#endif
/* At startup, determine the default number of threads. It would seem
this should be related to the number of cpus online. */
diff --git a/gcc-4.9/libgomp/configure b/gcc-4.9/libgomp/configure
index 6fbafe321..39bb5cdee 100755
--- a/gcc-4.9/libgomp/configure
+++ b/gcc-4.9/libgomp/configure
@@ -7695,7 +7695,22 @@ fi
# Check whether --with-pic was given.
if test "${with_pic+set}" = set; then :
- withval=$with_pic; pic_mode="$withval"
+ withval=$with_pic; p=${PACKAGE-default}
+ case "$withval" in
+ yes|no) pic_mode="$withval" ;;
+ *)
+ pic_mode=default
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $withval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ pic_mode=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac
else
pic_mode=default
fi
@@ -11094,7 +11109,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 11097 "configure"
+#line 11106 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -11200,7 +11215,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 11203 "configure"
+#line 11212 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -16332,6 +16347,8 @@ ac_config_files="$ac_config_files omp.h omp_lib.h omp_lib.f90 libgomp_f.h"
ac_config_files="$ac_config_files Makefile testsuite/Makefile libgomp.spec"
+ac_config_files="$ac_config_files testsuite/gompconfig.exp"
+
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
# tests run on this system so they can be shared between configure
@@ -17472,6 +17489,7 @@ do
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
"testsuite/Makefile") CONFIG_FILES="$CONFIG_FILES testsuite/Makefile" ;;
"libgomp.spec") CONFIG_FILES="$CONFIG_FILES libgomp.spec" ;;
+ "testsuite/gompconfig.exp") CONFIG_FILES="$CONFIG_FILES testsuite/gompconfig.exp" ;;
*) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac
diff --git a/gcc-4.9/libgomp/configure.ac b/gcc-4.9/libgomp/configure.ac
index 59b9516b6..43632f74d 100644
--- a/gcc-4.9/libgomp/configure.ac
+++ b/gcc-4.9/libgomp/configure.ac
@@ -354,4 +354,5 @@ CFLAGS="$save_CFLAGS"
AC_CONFIG_FILES(omp.h omp_lib.h omp_lib.f90 libgomp_f.h)
AC_CONFIG_FILES(Makefile testsuite/Makefile libgomp.spec)
+AC_CONFIG_FILES(testsuite/gompconfig.exp)
AC_OUTPUT
diff --git a/gcc-4.9/libgomp/configure.tgt b/gcc-4.9/libgomp/configure.tgt
index 8b1841792..f9aab70ff 100644
--- a/gcc-4.9/libgomp/configure.tgt
+++ b/gcc-4.9/libgomp/configure.tgt
@@ -18,7 +18,8 @@ if test $gcc_cv_have_tls = yes ; then
;;
*-*-linux* | *-*-gnu*)
- XCFLAGS="${XCFLAGS} -ftls-model=initial-exec"
+ # Google ref b/15780253; b/6368405
+ # XCFLAGS="${XCFLAGS} -ftls-model=initial-exec"
;;
esac
fi
diff --git a/gcc-4.9/libgomp/libgomp.info b/gcc-4.9/libgomp/libgomp.info
deleted file mode 100644
index a2d8495a2..000000000
--- a/gcc-4.9/libgomp/libgomp.info
+++ /dev/null
@@ -1,3232 +0,0 @@
-This is libgomp.info, produced by makeinfo version 5.1 from
-libgomp.texi.
-
-Copyright (C) 2006-2014 Free Software Foundation, Inc.
-
- Permission is granted to copy, distribute and/or modify this document
-under the terms of the GNU Free Documentation License, Version 1.3 or
-any later version published by the Free Software Foundation; with the
-Invariant Sections being "Funding Free Software", the Front-Cover texts
-being (a) (see below), and with the Back-Cover Texts being (b) (see
-below). A copy of the license is included in the section entitled "GNU
-Free Documentation License".
-
- (a) The FSF's Front-Cover Text is:
-
- A GNU Manual
-
- (b) The FSF's Back-Cover Text is:
-
- You have freedom to copy and modify this GNU Manual, like GNU
-software. Copies published by the Free Software Foundation raise funds
-for GNU development.
-INFO-DIR-SECTION GNU Libraries
-START-INFO-DIR-ENTRY
-* libgomp: (libgomp). GNU OpenMP runtime library
-END-INFO-DIR-ENTRY
-
- This manual documents the GNU implementation of the OpenMP API for
-multi-platform shared-memory parallel programming in C/C++ and Fortran.
-
- Published by the Free Software Foundation 51 Franklin Street, Fifth
-Floor Boston, MA 02110-1301 USA
-
- Copyright (C) 2006-2014 Free Software Foundation, Inc.
-
- Permission is granted to copy, distribute and/or modify this document
-under the terms of the GNU Free Documentation License, Version 1.3 or
-any later version published by the Free Software Foundation; with the
-Invariant Sections being "Funding Free Software", the Front-Cover texts
-being (a) (see below), and with the Back-Cover Texts being (b) (see
-below). A copy of the license is included in the section entitled "GNU
-Free Documentation License".
-
- (a) The FSF's Front-Cover Text is:
-
- A GNU Manual
-
- (b) The FSF's Back-Cover Text is:
-
- You have freedom to copy and modify this GNU Manual, like GNU
-software. Copies published by the Free Software Foundation raise funds
-for GNU development.
-
-
-File: libgomp.info, Node: Top, Next: Enabling OpenMP, Up: (dir)
-
-Introduction
-************
-
-This manual documents the usage of libgomp, the GNU implementation of
-the OpenMP (http://www.openmp.org) Application Programming Interface
-(API) for multi-platform shared-memory parallel programming in C/C++ and
-Fortran.
-
-* Menu:
-
-* Enabling OpenMP:: How to enable OpenMP for your applications.
-* Runtime Library Routines:: The OpenMP runtime application programming
- interface.
-* Environment Variables:: Influencing runtime behavior with environment
- variables.
-* The libgomp ABI:: Notes on the external ABI presented by libgomp.
-* Reporting Bugs:: How to report bugs in GNU OpenMP.
-* Copying:: GNU general public license says
- how you can copy and share libgomp.
-* GNU Free Documentation License::
- How you can copy and share this manual.
-* Funding:: How to help assure continued work for free
- software.
-* Library Index:: Index of this documentation.
-
-
-File: libgomp.info, Node: Enabling OpenMP, Next: Runtime Library Routines, Prev: Top, Up: Top
-
-1 Enabling OpenMP
-*****************
-
-To activate the OpenMP extensions for C/C++ and Fortran, the
-compile-time flag '-fopenmp' must be specified. This enables the OpenMP
-directive '#pragma omp' in C/C++ and '!$omp' directives in free form,
-'c$omp', '*$omp' and '!$omp' directives in fixed form, '!$' conditional
-compilation sentinels in free form and 'c$', '*$' and '!$' sentinels in
-fixed form, for Fortran. The flag also arranges for automatic linking
-of the OpenMP runtime library (*note Runtime Library Routines::).
-
- A complete description of all OpenMP directives accepted may be found
-in the OpenMP Application Program Interface (http://www.openmp.org)
-manual, version 4.0.
-
-
-File: libgomp.info, Node: Runtime Library Routines, Next: Environment Variables, Prev: Enabling OpenMP, Up: Top
-
-2 Runtime Library Routines
-**************************
-
-The runtime routines described here are defined by Section 3 of the
-OpenMP specification in version 4.0. The routines are structured in
-following three parts:
-
- Control threads, processors and the parallel environment. They have
-C linkage, and do not throw exceptions.
-
-* Menu:
-
-* omp_get_active_level:: Number of active parallel regions
-* omp_get_ancestor_thread_num:: Ancestor thread ID
-* omp_get_cancellation:: Whether cancellation support is enabled
-* omp_get_default_device:: Get the default device for target regions
-* omp_get_dynamic:: Dynamic teams setting
-* omp_get_level:: Number of parallel regions
-* omp_get_max_active_levels:: Maximum number of active regions
-* omp_get_max_threads:: Maximum number of threads of parallel region
-* omp_get_nested:: Nested parallel regions
-* omp_get_num_devices:: Number of target devices
-* omp_get_num_procs:: Number of processors online
-* omp_get_num_teams:: Number of teams
-* omp_get_num_threads:: Size of the active team
-* omp_get_proc_bind:: Whether theads may be moved between CPUs
-* omp_get_schedule:: Obtain the runtime scheduling method
-* omp_get_team_num:: Get team number
-* omp_get_team_size:: Number of threads in a team
-* omp_get_thread_limit:: Maximum number of threads
-* omp_get_thread_num:: Current thread ID
-* omp_in_parallel:: Whether a parallel region is active
-* omp_in_final:: Whether in final or included task region
-* omp_is_initial_device:: Whether executing on the host device
-* omp_set_default_device:: Set the default device for target regions
-* omp_set_dynamic:: Enable/disable dynamic teams
-* omp_set_max_active_levels:: Limits the number of active parallel regions
-* omp_set_nested:: Enable/disable nested parallel regions
-* omp_set_num_threads:: Set upper team size limit
-* omp_set_schedule:: Set the runtime scheduling method
-
- Initialize, set, test, unset and destroy simple and nested locks.
-
-* Menu:
-
-* omp_init_lock:: Initialize simple lock
-* omp_set_lock:: Wait for and set simple lock
-* omp_test_lock:: Test and set simple lock if available
-* omp_unset_lock:: Unset simple lock
-* omp_destroy_lock:: Destroy simple lock
-* omp_init_nest_lock:: Initialize nested lock
-* omp_set_nest_lock:: Wait for and set simple lock
-* omp_test_nest_lock:: Test and set nested lock if available
-* omp_unset_nest_lock:: Unset nested lock
-* omp_destroy_nest_lock:: Destroy nested lock
-
- Portable, thread-based, wall clock timer.
-
-* Menu:
-
-* omp_get_wtick:: Get timer precision.
-* omp_get_wtime:: Elapsed wall clock time.
-
-
-File: libgomp.info, Node: omp_get_active_level, Next: omp_get_ancestor_thread_num, Up: Runtime Library Routines
-
-2.1 'omp_get_active_level' - Number of parallel regions
-=======================================================
-
-_Description_:
- This function returns the nesting level for the active parallel
- blocks, which enclose the calling call.
-
-_C/C++_
- _Prototype_: 'int omp_get_active_level(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_active_level()'
-
-_See also_:
- *note omp_get_level::, *note omp_get_max_active_levels::, *note
- omp_set_max_active_levels::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.20.
-
-
-File: libgomp.info, Node: omp_get_ancestor_thread_num, Next: omp_get_cancellation, Prev: omp_get_active_level, Up: Runtime Library Routines
-
-2.2 'omp_get_ancestor_thread_num' - Ancestor thread ID
-======================================================
-
-_Description_:
- This function returns the thread identification number for the
- given nesting level of the current thread. For values of LEVEL
- outside zero to 'omp_get_level' -1 is returned; if LEVEL is
- 'omp_get_level' the result is identical to 'omp_get_thread_num'.
-
-_C/C++_
- _Prototype_: 'int omp_get_ancestor_thread_num(int level);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_ancestor_thread_num(level)'
- 'integer level'
-
-_See also_:
- *note omp_get_level::, *note omp_get_thread_num::, *note
- omp_get_team_size::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.18.
-
-
-File: libgomp.info, Node: omp_get_cancellation, Next: omp_get_default_device, Prev: omp_get_ancestor_thread_num, Up: Runtime Library Routines
-
-2.3 'omp_get_cancellation' - Whether cancellation support is enabled
-====================================================================
-
-_Description_:
- This function returns 'true' if cancellation is activated, 'false'
- otherwise. Here, 'true' and 'false' represent their
- language-specific counterparts. Unless 'OMP_CANCELLATION' is set
- true, cancellations are deactivated.
-
-_C/C++_:
- _Prototype_: 'int omp_get_cancellation(void);'
-
-_Fortran_:
- _Interface_: 'logical function omp_get_cancellation()'
-
-_See also_:
- *note OMP_CANCELLATION::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.9.
-
-
-File: libgomp.info, Node: omp_get_default_device, Next: omp_get_dynamic, Prev: omp_get_cancellation, Up: Runtime Library Routines
-
-2.4 'omp_get_default_device' - Get the default device for target regions
-========================================================================
-
-_Description_:
- Get the default device for target regions without device clause.
-
-_C/C++_:
- _Prototype_: 'int omp_get_default_device(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_default_device()'
-
-_See also_:
- *note OMP_DEFAULT_DEVICE::, *note omp_set_default_device::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.24.
-
-
-File: libgomp.info, Node: omp_get_dynamic, Next: omp_get_level, Prev: omp_get_default_device, Up: Runtime Library Routines
-
-2.5 'omp_get_dynamic' - Dynamic teams setting
-=============================================
-
-_Description_:
- This function returns 'true' if enabled, 'false' otherwise. Here,
- 'true' and 'false' represent their language-specific counterparts.
-
- The dynamic team setting may be initialized at startup by the
- 'OMP_DYNAMIC' environment variable or at runtime using
- 'omp_set_dynamic'. If undefined, dynamic adjustment is disabled by
- default.
-
-_C/C++_:
- _Prototype_: 'int omp_get_dynamic(void);'
-
-_Fortran_:
- _Interface_: 'logical function omp_get_dynamic()'
-
-_See also_:
- *note omp_set_dynamic::, *note OMP_DYNAMIC::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.8.
-
-
-File: libgomp.info, Node: omp_get_level, Next: omp_get_max_active_levels, Prev: omp_get_dynamic, Up: Runtime Library Routines
-
-2.6 'omp_get_level' - Obtain the current nesting level
-======================================================
-
-_Description_:
- This function returns the nesting level for the parallel blocks,
- which enclose the calling call.
-
-_C/C++_
- _Prototype_: 'int omp_get_level(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_level()'
-
-_See also_:
- *note omp_get_active_level::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.17.
-
-
-File: libgomp.info, Node: omp_get_max_active_levels, Next: omp_get_max_threads, Prev: omp_get_level, Up: Runtime Library Routines
-
-2.7 'omp_get_max_active_levels' - Maximum number of active regions
-==================================================================
-
-_Description_:
- This function obtains the maximum allowed number of nested, active
- parallel regions.
-
-_C/C++_
- _Prototype_: 'int omp_get_max_active_levels(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_max_active_levels()'
-
-_See also_:
- *note omp_set_max_active_levels::, *note omp_get_active_level::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.16.
-
-
-File: libgomp.info, Node: omp_get_max_threads, Next: omp_get_nested, Prev: omp_get_max_active_levels, Up: Runtime Library Routines
-
-2.8 'omp_get_max_threads' - Maximum number of threads of parallel region
-========================================================================
-
-_Description_:
- Return the maximum number of threads used for the current parallel
- region that does not use the clause 'num_threads'.
-
-_C/C++_:
- _Prototype_: 'int omp_get_max_threads(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_max_threads()'
-
-_See also_:
- *note omp_set_num_threads::, *note omp_set_dynamic::, *note
- omp_get_thread_limit::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.3.
-
-
-File: libgomp.info, Node: omp_get_nested, Next: omp_get_num_devices, Prev: omp_get_max_threads, Up: Runtime Library Routines
-
-2.9 'omp_get_nested' - Nested parallel regions
-==============================================
-
-_Description_:
- This function returns 'true' if nested parallel regions are
- enabled, 'false' otherwise. Here, 'true' and 'false' represent
- their language-specific counterparts.
-
- Nested parallel regions may be initialized at startup by the
- 'OMP_NESTED' environment variable or at runtime using
- 'omp_set_nested'. If undefined, nested parallel regions are
- disabled by default.
-
-_C/C++_:
- _Prototype_: 'int omp_get_nested(void);'
-
-_Fortran_:
- _Interface_: 'logical function omp_get_nested()'
-
-_See also_:
- *note omp_set_nested::, *note OMP_NESTED::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.11.
-
-
-File: libgomp.info, Node: omp_get_num_devices, Next: omp_get_num_procs, Prev: omp_get_nested, Up: Runtime Library Routines
-
-2.10 'omp_get_num_devices' - Number of target devices
-=====================================================
-
-_Description_:
- Returns the number of target devices.
-
-_C/C++_:
- _Prototype_: 'int omp_get_num_devices(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_num_devices()'
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.25.
-
-
-File: libgomp.info, Node: omp_get_num_procs, Next: omp_get_num_teams, Prev: omp_get_num_devices, Up: Runtime Library Routines
-
-2.11 'omp_get_num_procs' - Number of processors online
-======================================================
-
-_Description_:
- Returns the number of processors online on that device.
-
-_C/C++_:
- _Prototype_: 'int omp_get_num_procs(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_num_procs()'
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.5.
-
-
-File: libgomp.info, Node: omp_get_num_teams, Next: omp_get_num_threads, Prev: omp_get_num_procs, Up: Runtime Library Routines
-
-2.12 'omp_get_num_teams' - Number of teams
-==========================================
-
-_Description_:
- Returns the number of teams in the current team region.
-
-_C/C++_:
- _Prototype_: 'int omp_get_num_teams(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_num_teams()'
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.26.
-
-
-File: libgomp.info, Node: omp_get_num_threads, Next: omp_get_proc_bind, Prev: omp_get_num_teams, Up: Runtime Library Routines
-
-2.13 'omp_get_num_threads' - Size of the active team
-====================================================
-
-_Description_:
- Returns the number of threads in the current team. In a sequential
- section of the program 'omp_get_num_threads' returns 1.
-
- The default team size may be initialized at startup by the
- 'OMP_NUM_THREADS' environment variable. At runtime, the size of
- the current team may be set either by the 'NUM_THREADS' clause or
- by 'omp_set_num_threads'. If none of the above were used to define
- a specific value and 'OMP_DYNAMIC' is disabled, one thread per CPU
- online is used.
-
-_C/C++_:
- _Prototype_: 'int omp_get_num_threads(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_num_threads()'
-
-_See also_:
- *note omp_get_max_threads::, *note omp_set_num_threads::, *note
- OMP_NUM_THREADS::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.2.
-
-
-File: libgomp.info, Node: omp_get_proc_bind, Next: omp_get_schedule, Prev: omp_get_num_threads, Up: Runtime Library Routines
-
-2.14 'omp_get_proc_bind' - Whether theads may be moved between CPUs
-===================================================================
-
-_Description_:
- This functions returns the currently active thread affinity policy,
- which is set via 'OMP_PROC_BIND'. Possible values are
- 'omp_proc_bind_false', 'omp_proc_bind_true',
- 'omp_proc_bind_master', 'omp_proc_bind_close' and
- 'omp_proc_bind_spread'.
-
-_C/C++_:
- _Prototype_: 'omp_proc_bind_t omp_get_proc_bind(void);'
-
-_Fortran_:
- _Interface_: 'integer(kind=omp_proc_bind_kind) function
- omp_get_proc_bind()'
-
-_See also_:
- *note OMP_PROC_BIND::, *note OMP_PLACES::, *note
- GOMP_CPU_AFFINITY::,
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.22.
-
-
-File: libgomp.info, Node: omp_get_schedule, Next: omp_get_team_num, Prev: omp_get_proc_bind, Up: Runtime Library Routines
-
-2.15 'omp_get_schedule' - Obtain the runtime scheduling method
-==============================================================
-
-_Description_:
- Obtain the runtime scheduling method. The KIND argument will be
- set to the value 'omp_sched_static', 'omp_sched_dynamic',
- 'omp_sched_guided' or 'omp_sched_auto'. The second argument,
- MODIFIER, is set to the chunk size.
-
-_C/C++_
- _Prototype_: 'void omp_get_schedule(omp_sched_t *kind, int
- *modifier);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_get_schedule(kind, modifier)'
- 'integer(kind=omp_sched_kind) kind'
- 'integer modifier'
-
-_See also_:
- *note omp_set_schedule::, *note OMP_SCHEDULE::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.13.
-
-
-File: libgomp.info, Node: omp_get_team_num, Next: omp_get_team_size, Prev: omp_get_schedule, Up: Runtime Library Routines
-
-2.16 'omp_get_team_num' - Get team number
-=========================================
-
-_Description_:
- Returns the team number of the calling thread.
-
-_C/C++_:
- _Prototype_: 'int omp_get_team_num(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_team_num()'
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.27.
-
-
-File: libgomp.info, Node: omp_get_team_size, Next: omp_get_thread_limit, Prev: omp_get_team_num, Up: Runtime Library Routines
-
-2.17 'omp_get_team_size' - Number of threads in a team
-======================================================
-
-_Description_:
- This function returns the number of threads in a thread team to
- which either the current thread or its ancestor belongs. For
- values of LEVEL outside zero to 'omp_get_level', -1 is returned; if
- LEVEL is zero, 1 is returned, and for 'omp_get_level', the result
- is identical to 'omp_get_num_threads'.
-
-_C/C++_:
- _Prototype_: 'int omp_get_team_size(int level);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_team_size(level)'
- 'integer level'
-
-_See also_:
- *note omp_get_num_threads::, *note omp_get_level::, *note
- omp_get_ancestor_thread_num::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.19.
-
-
-File: libgomp.info, Node: omp_get_thread_limit, Next: omp_get_thread_num, Prev: omp_get_team_size, Up: Runtime Library Routines
-
-2.18 'omp_get_thread_limit' - Maximum number of threads
-=======================================================
-
-_Description_:
- Return the maximum number of threads of the program.
-
-_C/C++_:
- _Prototype_: 'int omp_get_thread_limit(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_thread_limit()'
-
-_See also_:
- *note omp_get_max_threads::, *note OMP_THREAD_LIMIT::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.14.
-
-
-File: libgomp.info, Node: omp_get_thread_num, Next: omp_in_parallel, Prev: omp_get_thread_limit, Up: Runtime Library Routines
-
-2.19 'omp_get_thread_num' - Current thread ID
-=============================================
-
-_Description_:
- Returns a unique thread identification number within the current
- team. In a sequential parts of the program, 'omp_get_thread_num'
- always returns 0. In parallel regions the return value varies from
- 0 to 'omp_get_num_threads'-1 inclusive. The return value of the
- master thread of a team is always 0.
-
-_C/C++_:
- _Prototype_: 'int omp_get_thread_num(void);'
-
-_Fortran_:
- _Interface_: 'integer function omp_get_thread_num()'
-
-_See also_:
- *note omp_get_num_threads::, *note omp_get_ancestor_thread_num::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.4.
-
-
-File: libgomp.info, Node: omp_in_parallel, Next: omp_in_final, Prev: omp_get_thread_num, Up: Runtime Library Routines
-
-2.20 'omp_in_parallel' - Whether a parallel region is active
-============================================================
-
-_Description_:
- This function returns 'true' if currently running in parallel,
- 'false' otherwise. Here, 'true' and 'false' represent their
- language-specific counterparts.
-
-_C/C++_:
- _Prototype_: 'int omp_in_parallel(void);'
-
-_Fortran_:
- _Interface_: 'logical function omp_in_parallel()'
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.6.
-
-
-File: libgomp.info, Node: omp_in_final, Next: omp_is_initial_device, Prev: omp_in_parallel, Up: Runtime Library Routines
-
-2.21 'omp_in_final' - Whether in final or included task region
-==============================================================
-
-_Description_:
- This function returns 'true' if currently running in a final or
- included task region, 'false' otherwise. Here, 'true' and 'false'
- represent their language-specific counterparts.
-
-_C/C++_:
- _Prototype_: 'int omp_in_final(void);'
-
-_Fortran_:
- _Interface_: 'logical function omp_in_final()'
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.21.
-
-
-File: libgomp.info, Node: omp_is_initial_device, Next: omp_set_default_device, Prev: omp_in_final, Up: Runtime Library Routines
-
-2.22 'omp_is_initial_device' - Whether executing on the host device
-===================================================================
-
-_Description_:
- This function returns 'true' if currently running on the host
- device, 'false' otherwise. Here, 'true' and 'false' represent
- their language-specific counterparts.
-
-_C/C++_:
- _Prototype_: 'int omp_is_initial_device(void);'
-
-_Fortran_:
- _Interface_: 'logical function omp_is_initial_device()'
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.28.
-
-
-File: libgomp.info, Node: omp_set_default_device, Next: omp_set_dynamic, Prev: omp_is_initial_device, Up: Runtime Library Routines
-
-2.23 'omp_set_default_device' - Set the default device for target regions
-=========================================================================
-
-_Description_:
- Set the default device for target regions without device clause.
- The argument shall be a nonnegative device number.
-
-_C/C++_:
- _Prototype_: 'void omp_set_default_device(int device_num);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_set_default_device(device_num)'
- 'integer device_num'
-
-_See also_:
- *note OMP_DEFAULT_DEVICE::, *note omp_get_default_device::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.23.
-
-
-File: libgomp.info, Node: omp_set_dynamic, Next: omp_set_max_active_levels, Prev: omp_set_default_device, Up: Runtime Library Routines
-
-2.24 'omp_set_dynamic' - Enable/disable dynamic teams
-=====================================================
-
-_Description_:
- Enable or disable the dynamic adjustment of the number of threads
- within a team. The function takes the language-specific equivalent
- of 'true' and 'false', where 'true' enables dynamic adjustment of
- team sizes and 'false' disables it.
-
-_C/C++_:
- _Prototype_: 'void omp_set_dynamic(int dynamic_threads);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_set_dynamic(dynamic_threads)'
- 'logical, intent(in) :: dynamic_threads'
-
-_See also_:
- *note OMP_DYNAMIC::, *note omp_get_dynamic::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.7.
-
-
-File: libgomp.info, Node: omp_set_max_active_levels, Next: omp_set_nested, Prev: omp_set_dynamic, Up: Runtime Library Routines
-
-2.25 'omp_set_max_active_levels' - Limits the number of active parallel regions
-===============================================================================
-
-_Description_:
- This function limits the maximum allowed number of nested, active
- parallel regions.
-
-_C/C++_
- _Prototype_: 'void omp_set_max_active_levels(int max_levels);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_set_max_active_levels(max_levels)'
- 'integer max_levels'
-
-_See also_:
- *note omp_get_max_active_levels::, *note omp_get_active_level::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.15.
-
-
-File: libgomp.info, Node: omp_set_nested, Next: omp_set_num_threads, Prev: omp_set_max_active_levels, Up: Runtime Library Routines
-
-2.26 'omp_set_nested' - Enable/disable nested parallel regions
-==============================================================
-
-_Description_:
- Enable or disable nested parallel regions, i.e., whether team
- members are allowed to create new teams. The function takes the
- language-specific equivalent of 'true' and 'false', where 'true'
- enables dynamic adjustment of team sizes and 'false' disables it.
-
-_C/C++_:
- _Prototype_: 'void omp_set_nested(int nested);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_set_nested(nested)'
- 'logical, intent(in) :: nested'
-
-_See also_:
- *note OMP_NESTED::, *note omp_get_nested::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.10.
-
-
-File: libgomp.info, Node: omp_set_num_threads, Next: omp_set_schedule, Prev: omp_set_nested, Up: Runtime Library Routines
-
-2.27 'omp_set_num_threads' - Set upper team size limit
-======================================================
-
-_Description_:
- Specifies the number of threads used by default in subsequent
- parallel sections, if those do not specify a 'num_threads' clause.
- The argument of 'omp_set_num_threads' shall be a positive integer.
-
-_C/C++_:
- _Prototype_: 'void omp_set_num_threads(int num_threads);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_set_num_threads(num_threads)'
- 'integer, intent(in) :: num_threads'
-
-_See also_:
- *note OMP_NUM_THREADS::, *note omp_get_num_threads::, *note
- omp_get_max_threads::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.1.
-
-
-File: libgomp.info, Node: omp_set_schedule, Next: omp_init_lock, Prev: omp_set_num_threads, Up: Runtime Library Routines
-
-2.28 'omp_set_schedule' - Set the runtime scheduling method
-===========================================================
-
-_Description_:
- Sets the runtime scheduling method. The KIND argument can have the
- value 'omp_sched_static', 'omp_sched_dynamic', 'omp_sched_guided'
- or 'omp_sched_auto'. Except for 'omp_sched_auto', the chunk size
- is set to the value of MODIFIER if positive, or to the default
- value if zero or negative. For 'omp_sched_auto' the MODIFIER
- argument is ignored.
-
-_C/C++_
- _Prototype_: 'void omp_set_schedule(omp_sched_t kind, int modifier);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_set_schedule(kind, modifier)'
- 'integer(kind=omp_sched_kind) kind'
- 'integer modifier'
-
-_See also_:
- *note omp_get_schedule:: *note OMP_SCHEDULE::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.2.12.
-
-
-File: libgomp.info, Node: omp_init_lock, Next: omp_set_lock, Prev: omp_set_schedule, Up: Runtime Library Routines
-
-2.29 'omp_init_lock' - Initialize simple lock
-=============================================
-
-_Description_:
- Initialize a simple lock. After initialization, the lock is in an
- unlocked state.
-
-_C/C++_:
- _Prototype_: 'void omp_init_lock(omp_lock_t *lock);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_init_lock(svar)'
- 'integer(omp_lock_kind), intent(out) :: svar'
-
-_See also_:
- *note omp_destroy_lock::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.3.1.
-
-
-File: libgomp.info, Node: omp_set_lock, Next: omp_test_lock, Prev: omp_init_lock, Up: Runtime Library Routines
-
-2.30 'omp_set_lock' - Wait for and set simple lock
-==================================================
-
-_Description_:
- Before setting a simple lock, the lock variable must be initialized
- by 'omp_init_lock'. The calling thread is blocked until the lock
- is available. If the lock is already held by the current thread, a
- deadlock occurs.
-
-_C/C++_:
- _Prototype_: 'void omp_set_lock(omp_lock_t *lock);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_set_lock(svar)'
- 'integer(omp_lock_kind), intent(inout) :: svar'
-
-_See also_:
- *note omp_init_lock::, *note omp_test_lock::, *note
- omp_unset_lock::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.3.3.
-
-
-File: libgomp.info, Node: omp_test_lock, Next: omp_unset_lock, Prev: omp_set_lock, Up: Runtime Library Routines
-
-2.31 'omp_test_lock' - Test and set simple lock if available
-============================================================
-
-_Description_:
- Before setting a simple lock, the lock variable must be initialized
- by 'omp_init_lock'. Contrary to 'omp_set_lock', 'omp_test_lock'
- does not block if the lock is not available. This function returns
- 'true' upon success, 'false' otherwise. Here, 'true' and 'false'
- represent their language-specific counterparts.
-
-_C/C++_:
- _Prototype_: 'int omp_test_lock(omp_lock_t *lock);'
-
-_Fortran_:
- _Interface_: 'logical function omp_test_lock(svar)'
- 'integer(omp_lock_kind), intent(inout) :: svar'
-
-_See also_:
- *note omp_init_lock::, *note omp_set_lock::, *note omp_set_lock::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.3.5.
-
-
-File: libgomp.info, Node: omp_unset_lock, Next: omp_destroy_lock, Prev: omp_test_lock, Up: Runtime Library Routines
-
-2.32 'omp_unset_lock' - Unset simple lock
-=========================================
-
-_Description_:
- A simple lock about to be unset must have been locked by
- 'omp_set_lock' or 'omp_test_lock' before. In addition, the lock
- must be held by the thread calling 'omp_unset_lock'. Then, the
- lock becomes unlocked. If one or more threads attempted to set the
- lock before, one of them is chosen to, again, set the lock to
- itself.
-
-_C/C++_:
- _Prototype_: 'void omp_unset_lock(omp_lock_t *lock);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_unset_lock(svar)'
- 'integer(omp_lock_kind), intent(inout) :: svar'
-
-_See also_:
- *note omp_set_lock::, *note omp_test_lock::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.3.4.
-
-
-File: libgomp.info, Node: omp_destroy_lock, Next: omp_init_nest_lock, Prev: omp_unset_lock, Up: Runtime Library Routines
-
-2.33 'omp_destroy_lock' - Destroy simple lock
-=============================================
-
-_Description_:
- Destroy a simple lock. In order to be destroyed, a simple lock
- must be in the unlocked state.
-
-_C/C++_:
- _Prototype_: 'void omp_destroy_lock(omp_lock_t *lock);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_destroy_lock(svar)'
- 'integer(omp_lock_kind), intent(inout) :: svar'
-
-_See also_:
- *note omp_init_lock::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.3.2.
-
-
-File: libgomp.info, Node: omp_init_nest_lock, Next: omp_set_nest_lock, Prev: omp_destroy_lock, Up: Runtime Library Routines
-
-2.34 'omp_init_nest_lock' - Initialize nested lock
-==================================================
-
-_Description_:
- Initialize a nested lock. After initialization, the lock is in an
- unlocked state and the nesting count is set to zero.
-
-_C/C++_:
- _Prototype_: 'void omp_init_nest_lock(omp_nest_lock_t *lock);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_init_nest_lock(nvar)'
- 'integer(omp_nest_lock_kind), intent(out) :: nvar'
-
-_See also_:
- *note omp_destroy_nest_lock::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.3.1.
-
-
-File: libgomp.info, Node: omp_set_nest_lock, Next: omp_test_nest_lock, Prev: omp_init_nest_lock, Up: Runtime Library Routines
-
-2.35 'omp_set_nest_lock' - Wait for and set nested lock
-=======================================================
-
-_Description_:
- Before setting a nested lock, the lock variable must be initialized
- by 'omp_init_nest_lock'. The calling thread is blocked until the
- lock is available. If the lock is already held by the current
- thread, the nesting count for the lock is incremented.
-
-_C/C++_:
- _Prototype_: 'void omp_set_nest_lock(omp_nest_lock_t *lock);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_set_nest_lock(nvar)'
- 'integer(omp_nest_lock_kind), intent(inout) :: nvar'
-
-_See also_:
- *note omp_init_nest_lock::, *note omp_unset_nest_lock::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.3.3.
-
-
-File: libgomp.info, Node: omp_test_nest_lock, Next: omp_unset_nest_lock, Prev: omp_set_nest_lock, Up: Runtime Library Routines
-
-2.36 'omp_test_nest_lock' - Test and set nested lock if available
-=================================================================
-
-_Description_:
- Before setting a nested lock, the lock variable must be initialized
- by 'omp_init_nest_lock'. Contrary to 'omp_set_nest_lock',
- 'omp_test_nest_lock' does not block if the lock is not available.
- If the lock is already held by the current thread, the new nesting
- count is returned. Otherwise, the return value equals zero.
-
-_C/C++_:
- _Prototype_: 'int omp_test_nest_lock(omp_nest_lock_t *lock);'
-
-_Fortran_:
- _Interface_: 'logical function omp_test_nest_lock(nvar)'
- 'integer(omp_nest_lock_kind), intent(inout) :: nvar'
-
-_See also_:
- *note omp_init_lock::, *note omp_set_lock::, *note omp_set_lock::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.3.5.
-
-
-File: libgomp.info, Node: omp_unset_nest_lock, Next: omp_destroy_nest_lock, Prev: omp_test_nest_lock, Up: Runtime Library Routines
-
-2.37 'omp_unset_nest_lock' - Unset nested lock
-==============================================
-
-_Description_:
- A nested lock about to be unset must have been locked by
- 'omp_set_nested_lock' or 'omp_test_nested_lock' before. In
- addition, the lock must be held by the thread calling
- 'omp_unset_nested_lock'. If the nesting count drops to zero, the
- lock becomes unlocked. If one ore more threads attempted to set
- the lock before, one of them is chosen to, again, set the lock to
- itself.
-
-_C/C++_:
- _Prototype_: 'void omp_unset_nest_lock(omp_nest_lock_t *lock);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_unset_nest_lock(nvar)'
- 'integer(omp_nest_lock_kind), intent(inout) :: nvar'
-
-_See also_:
- *note omp_set_nest_lock::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.3.4.
-
-
-File: libgomp.info, Node: omp_destroy_nest_lock, Next: omp_get_wtick, Prev: omp_unset_nest_lock, Up: Runtime Library Routines
-
-2.38 'omp_destroy_nest_lock' - Destroy nested lock
-==================================================
-
-_Description_:
- Destroy a nested lock. In order to be destroyed, a nested lock
- must be in the unlocked state and its nesting count must equal
- zero.
-
-_C/C++_:
- _Prototype_: 'void omp_destroy_nest_lock(omp_nest_lock_t *);'
-
-_Fortran_:
- _Interface_: 'subroutine omp_destroy_nest_lock(nvar)'
- 'integer(omp_nest_lock_kind), intent(inout) :: nvar'
-
-_See also_:
- *note omp_init_lock::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.3.2.
-
-
-File: libgomp.info, Node: omp_get_wtick, Next: omp_get_wtime, Prev: omp_destroy_nest_lock, Up: Runtime Library Routines
-
-2.39 'omp_get_wtick' - Get timer precision
-==========================================
-
-_Description_:
- Gets the timer precision, i.e., the number of seconds between two
- successive clock ticks.
-
-_C/C++_:
- _Prototype_: 'double omp_get_wtick(void);'
-
-_Fortran_:
- _Interface_: 'double precision function omp_get_wtick()'
-
-_See also_:
- *note omp_get_wtime::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.4.2.
-
-
-File: libgomp.info, Node: omp_get_wtime, Prev: omp_get_wtick, Up: Runtime Library Routines
-
-2.40 'omp_get_wtime' - Elapsed wall clock time
-==============================================
-
-_Description_:
- Elapsed wall clock time in seconds. The time is measured per
- thread, no guarantee can be made that two distinct threads measure
- the same time. Time is measured from some "time in the past",
- which is an arbitrary time guaranteed not to change during the
- execution of the program.
-
-_C/C++_:
- _Prototype_: 'double omp_get_wtime(void);'
-
-_Fortran_:
- _Interface_: 'double precision function omp_get_wtime()'
-
-_See also_:
- *note omp_get_wtick::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 3.4.1.
-
-
-File: libgomp.info, Node: Environment Variables, Next: The libgomp ABI, Prev: Runtime Library Routines, Up: Top
-
-3 Environment Variables
-***********************
-
-The environment variables which beginning with 'OMP_' are defined by
-section 4 of the OpenMP specification in version 4.0, while those
-beginning with 'GOMP_' are GNU extensions.
-
-* Menu:
-
-* OMP_CANCELLATION:: Set whether cancellation is activated
-* OMP_DISPLAY_ENV:: Show OpenMP version and environment variables
-* OMP_DEFAULT_DEVICE:: Set the device used in target regions
-* OMP_DYNAMIC:: Dynamic adjustment of threads
-* OMP_MAX_ACTIVE_LEVELS:: Set the maximum number of nested parallel regions
-* OMP_NESTED:: Nested parallel regions
-* OMP_NUM_THREADS:: Specifies the number of threads to use
-* OMP_PROC_BIND:: Whether theads may be moved between CPUs
-* OMP_PLACES:: Specifies on which CPUs the theads should be placed
-* OMP_STACKSIZE:: Set default thread stack size
-* OMP_SCHEDULE:: How threads are scheduled
-* OMP_THREAD_LIMIT:: Set the maximum number of threads
-* OMP_WAIT_POLICY:: How waiting threads are handled
-* GOMP_CPU_AFFINITY:: Bind threads to specific CPUs
-* GOMP_STACKSIZE:: Set default thread stack size
-* GOMP_SPINCOUNT:: Set the busy-wait spin count
-
-
-File: libgomp.info, Node: OMP_CANCELLATION, Next: OMP_DISPLAY_ENV, Up: Environment Variables
-
-3.1 'OMP_CANCELLATION' - Set whether cancellation is activated
-==============================================================
-
-_Description_:
- If set to 'TRUE', the cancellation is activated. If set to 'FALSE'
- or if unset, cancellation is disabled and the 'cancel' construct is
- ignored.
-
-_See also_:
- *note omp_get_cancellation::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.11
-
-
-File: libgomp.info, Node: OMP_DISPLAY_ENV, Next: OMP_DEFAULT_DEVICE, Prev: OMP_CANCELLATION, Up: Environment Variables
-
-3.2 'OMP_DISPLAY_ENV' - Show OpenMP version and environment variables
-=====================================================================
-
-_Description_:
- If set to 'TRUE', the OpenMP version number and the values
- associated with the OpenMP environment variables are printed to
- 'stderr'. If set to 'VERBOSE', it additionally shows the value of
- the environment variables which are GNU extensions. If undefined
- or set to 'FALSE', this information will not be shown.
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.12
-
-
-File: libgomp.info, Node: OMP_DEFAULT_DEVICE, Next: OMP_DYNAMIC, Prev: OMP_DISPLAY_ENV, Up: Environment Variables
-
-3.3 'OMP_DEFAULT_DEVICE' - Set the device used in target regions
-================================================================
-
-_Description_:
- Set to choose the device which is used in a 'target' region, unless
- the value is overridden by 'omp_set_default_device' or by a
- 'device' clause. The value shall be the nonnegative device number.
- If no device with the given device number exists, the code is
- executed on the host. If unset, device number 0 will be used.
-
-_See also_:
- *note omp_get_default_device::, *note omp_set_default_device::,
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.11
-
-
-File: libgomp.info, Node: OMP_DYNAMIC, Next: OMP_MAX_ACTIVE_LEVELS, Prev: OMP_DEFAULT_DEVICE, Up: Environment Variables
-
-3.4 'OMP_DYNAMIC' - Dynamic adjustment of threads
-=================================================
-
-_Description_:
- Enable or disable the dynamic adjustment of the number of threads
- within a team. The value of this environment variable shall be
- 'TRUE' or 'FALSE'. If undefined, dynamic adjustment is disabled by
- default.
-
-_See also_:
- *note omp_set_dynamic::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.3
-
-
-File: libgomp.info, Node: OMP_MAX_ACTIVE_LEVELS, Next: OMP_NESTED, Prev: OMP_DYNAMIC, Up: Environment Variables
-
-3.5 'OMP_MAX_ACTIVE_LEVELS' - Set the maximum number of nested parallel regions
-===============================================================================
-
-_Description_:
- Specifies the initial value for the maximum number of nested
- parallel regions. The value of this variable shall be a positive
- integer. If undefined, the number of active levels is unlimited.
-
-_See also_:
- *note omp_set_max_active_levels::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.9
-
-
-File: libgomp.info, Node: OMP_NESTED, Next: OMP_NUM_THREADS, Prev: OMP_MAX_ACTIVE_LEVELS, Up: Environment Variables
-
-3.6 'OMP_NESTED' - Nested parallel regions
-==========================================
-
-_Description_:
- Enable or disable nested parallel regions, i.e., whether team
- members are allowed to create new teams. The value of this
- environment variable shall be 'TRUE' or 'FALSE'. If undefined,
- nested parallel regions are disabled by default.
-
-_See also_:
- *note omp_set_nested::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.6
-
-
-File: libgomp.info, Node: OMP_NUM_THREADS, Next: OMP_PLACES, Prev: OMP_NESTED, Up: Environment Variables
-
-3.7 'OMP_NUM_THREADS' - Specifies the number of threads to use
-==============================================================
-
-_Description_:
- Specifies the default number of threads to use in parallel regions.
- The value of this variable shall be a comma-separated list of
- positive integers; the value specified the number of threads to use
- for the corresponding nested level. If undefined one thread per
- CPU is used.
-
-_See also_:
- *note omp_set_num_threads::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.2
-
-
-File: libgomp.info, Node: OMP_PLACES, Next: OMP_PROC_BIND, Prev: OMP_NUM_THREADS, Up: Environment Variables
-
-3.8 'OMP_PLACES' - Specifies on which CPUs the theads should be placed
-======================================================================
-
-_Description_:
- The thread placement can be either specified using an abstract name
- or by an explicit list of the places. The abstract names
- 'threads', 'cores' and 'sockets' can be optionally followed by a
- positive number in parentheses, which denotes the how many places
- shall be created. With 'threads' each place corresponds to a
- single hardware thread; 'cores' to a single core with the
- corresponding number of hardware threads; and with 'sockets' the
- place corresponds to a single socket. The resulting placement can
- be shown by setting the 'OMP_DISPLAY_ENV' environment variable.
-
- Alternatively, the placement can be specified explicitly as
- comma-separated list of places. A place is specified by set of
- nonnegative numbers in curly braces, denoting the denoting the
- hardware threads. The hardware threads belonging to a place can
- either be specified as comma-separated list of nonnegative thread
- numbers or using an interval. Multiple places can also be either
- specified by a comma-separated list of places or by an interval.
- To specify an interval, a colon followed by the count is placed
- after after the hardware thread number or the place. Optionally,
- the length can be followed by a colon and the stride number -
- otherwise a unit stride is assumed. For instance, the following
- specifies the same places list: '"{0,1,2}, {3,4,6}, {7,8,9},
- {10,11,12}"'; '"{0:3}, {3:3}, {7:3}, {10:3}"'; and '"{0:2}:4:3"'.
-
- If 'OMP_PLACES' and 'GOMP_CPU_AFFINITY' are unset and
- 'OMP_PROC_BIND' is either unset or 'false', threads may be moved
- between CPUs following no placement policy.
-
-_See also_:
- *note OMP_PROC_BIND::, *note GOMP_CPU_AFFINITY::, *note
- omp_get_proc_bind::, *note OMP_DISPLAY_ENV::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.5
-
-
-File: libgomp.info, Node: OMP_PROC_BIND, Next: OMP_SCHEDULE, Prev: OMP_PLACES, Up: Environment Variables
-
-3.9 'OMP_PROC_BIND' - Whether theads may be moved between CPUs
-==============================================================
-
-_Description_:
- Specifies whether threads may be moved between processors. If set
- to 'TRUE', OpenMP theads should not be moved; if set to 'FALSE'
- they may be moved. Alternatively, a comma separated list with the
- values 'MASTER', 'CLOSE' and 'SPREAD' can be used to specify the
- thread affinity policy for the corresponding nesting level. With
- 'MASTER' the worker threads are in the same place partition as the
- master thread. With 'CLOSE' those are kept close to the master
- thread in contiguous place partitions. And with 'SPREAD' a sparse
- distribution across the place partitions is used.
-
- When undefined, 'OMP_PROC_BIND' defaults to 'TRUE' when
- 'OMP_PLACES' or 'GOMP_CPU_AFFINITY' is set and 'FALSE' otherwise.
-
-_See also_:
- *note OMP_PLACES::, *note GOMP_CPU_AFFINITY::, *note
- omp_get_proc_bind::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.4
-
-
-File: libgomp.info, Node: OMP_SCHEDULE, Next: OMP_STACKSIZE, Prev: OMP_PROC_BIND, Up: Environment Variables
-
-3.10 'OMP_SCHEDULE' - How threads are scheduled
-===============================================
-
-_Description_:
- Allows to specify 'schedule type' and 'chunk size'. The value of
- the variable shall have the form: 'type[,chunk]' where 'type' is
- one of 'static', 'dynamic', 'guided' or 'auto' The optional 'chunk'
- size shall be a positive integer. If undefined, dynamic scheduling
- and a chunk size of 1 is used.
-
-_See also_:
- *note omp_set_schedule::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Sections 2.7.1
- and 4.1
-
-
-File: libgomp.info, Node: OMP_STACKSIZE, Next: OMP_THREAD_LIMIT, Prev: OMP_SCHEDULE, Up: Environment Variables
-
-3.11 'OMP_STACKSIZE' - Set default thread stack size
-====================================================
-
-_Description_:
- Set the default thread stack size in kilobytes, unless the number
- is suffixed by 'B', 'K', 'M' or 'G', in which case the size is,
- respectively, in bytes, kilobytes, megabytes or gigabytes. This is
- different from 'pthread_attr_setstacksize' which gets the number of
- bytes as an argument. If the stack size cannot be set due to
- system constraints, an error is reported and the initial stack size
- is left unchanged. If undefined, the stack size is system
- dependent.
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.7
-
-
-File: libgomp.info, Node: OMP_THREAD_LIMIT, Next: OMP_WAIT_POLICY, Prev: OMP_STACKSIZE, Up: Environment Variables
-
-3.12 'OMP_THREAD_LIMIT' - Set the maximum number of threads
-===========================================================
-
-_Description_:
- Specifies the number of threads to use for the whole program. The
- value of this variable shall be a positive integer. If undefined,
- the number of threads is not limited.
-
-_See also_:
- *note OMP_NUM_THREADS::, *note omp_get_thread_limit::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.10
-
-
-File: libgomp.info, Node: OMP_WAIT_POLICY, Next: GOMP_CPU_AFFINITY, Prev: OMP_THREAD_LIMIT, Up: Environment Variables
-
-3.13 'OMP_WAIT_POLICY' - How waiting threads are handled
-========================================================
-
-_Description_:
- Specifies whether waiting threads should be active or passive. If
- the value is 'PASSIVE', waiting threads should not consume CPU
- power while waiting; while the value is 'ACTIVE' specifies that
- they should. If undefined, threads wait actively for a short time
- before waiting passively.
-
-_See also_:
- *note GOMP_SPINCOUNT::
-
-_Reference_:
- OpenMP specification v4.0 (http://www.openmp.org/), Section 4.8
-
-
-File: libgomp.info, Node: GOMP_CPU_AFFINITY, Next: GOMP_STACKSIZE, Prev: OMP_WAIT_POLICY, Up: Environment Variables
-
-3.14 'GOMP_CPU_AFFINITY' - Bind threads to specific CPUs
-========================================================
-
-_Description_:
- Binds threads to specific CPUs. The variable should contain a
- space-separated or comma-separated list of CPUs. This list may
- contain different kinds of entries: either single CPU numbers in
- any order, a range of CPUs (M-N) or a range with some stride
- (M-N:S). CPU numbers are zero based. For example,
- 'GOMP_CPU_AFFINITY="0 3 1-2 4-15:2"' will bind the initial thread
- to CPU 0, the second to CPU 3, the third to CPU 1, the fourth to
- CPU 2, the fifth to CPU 4, the sixth through tenth to CPUs 6, 8,
- 10, 12, and 14 respectively and then start assigning back from the
- beginning of the list. 'GOMP_CPU_AFFINITY=0' binds all threads to
- CPU 0.
-
- There is no GNU OpenMP library routine to determine whether a CPU
- affinity specification is in effect. As a workaround,
- language-specific library functions, e.g., 'getenv' in C or
- 'GET_ENVIRONMENT_VARIABLE' in Fortran, may be used to query the
- setting of the 'GOMP_CPU_AFFINITY' environment variable. A defined
- CPU affinity on startup cannot be changed or disabled during the
- runtime of the application.
-
- If both 'GOMP_CPU_AFFINITY' and 'OMP_PROC_BIND' are set,
- 'OMP_PROC_BIND' has a higher precedence. If neither has been set
- and 'OMP_PROC_BIND' is unset, or when 'OMP_PROC_BIND' is set to
- 'FALSE', the host system will handle the assignment of threads to
- CPUs.
-
-_See also_:
- *note OMP_PLACES::, *note OMP_PROC_BIND::
-
-
-File: libgomp.info, Node: GOMP_STACKSIZE, Next: GOMP_SPINCOUNT, Prev: GOMP_CPU_AFFINITY, Up: Environment Variables
-
-3.15 'GOMP_STACKSIZE' - Set default thread stack size
-=====================================================
-
-_Description_:
- Set the default thread stack size in kilobytes. This is different
- from 'pthread_attr_setstacksize' which gets the number of bytes as
- an argument. If the stack size cannot be set due to system
- constraints, an error is reported and the initial stack size is
- left unchanged. If undefined, the stack size is system dependent.
-
-_See also_:
- *note OMP_STACKSIZE::
-
-_Reference_:
- GCC Patches Mailinglist
- (http://gcc.gnu.org/ml/gcc-patches/2006-06/msg00493.html), GCC
- Patches Mailinglist
- (http://gcc.gnu.org/ml/gcc-patches/2006-06/msg00496.html)
-
-
-File: libgomp.info, Node: GOMP_SPINCOUNT, Prev: GOMP_STACKSIZE, Up: Environment Variables
-
-3.16 'GOMP_SPINCOUNT' - Set the busy-wait spin count
-====================================================
-
-_Description_:
- Determines how long a threads waits actively with consuming CPU
- power before waiting passively without consuming CPU power. The
- value may be either 'INFINITE', 'INFINITY' to always wait actively
- or an integer which gives the number of spins of the busy-wait
- loop. The integer may optionally be followed by the following
- suffixes acting as multiplication factors: 'k' (kilo, thousand),
- 'M' (mega, million), 'G' (giga, billion), or 'T' (tera, trillion).
- If undefined, 0 is used when 'OMP_WAIT_POLICY' is 'PASSIVE',
- 300,000 is used when 'OMP_WAIT_POLICY' is undefined and 30 billion
- is used when 'OMP_WAIT_POLICY' is 'ACTIVE'. If there are more
- OpenMP threads than available CPUs, 1000 and 100 spins are used for
- 'OMP_WAIT_POLICY' being 'ACTIVE' or undefined, respectively; unless
- the 'GOMP_SPINCOUNT' is lower or 'OMP_WAIT_POLICY' is 'PASSIVE'.
-
-_See also_:
- *note OMP_WAIT_POLICY::
-
-
-File: libgomp.info, Node: The libgomp ABI, Next: Reporting Bugs, Prev: Environment Variables, Up: Top
-
-4 The libgomp ABI
-*****************
-
-The following sections present notes on the external ABI as presented by
-libgomp. Only maintainers should need them.
-
-* Menu:
-
-* Implementing MASTER construct::
-* Implementing CRITICAL construct::
-* Implementing ATOMIC construct::
-* Implementing FLUSH construct::
-* Implementing BARRIER construct::
-* Implementing THREADPRIVATE construct::
-* Implementing PRIVATE clause::
-* Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses::
-* Implementing REDUCTION clause::
-* Implementing PARALLEL construct::
-* Implementing FOR construct::
-* Implementing ORDERED construct::
-* Implementing SECTIONS construct::
-* Implementing SINGLE construct::
-
-
-File: libgomp.info, Node: Implementing MASTER construct, Next: Implementing CRITICAL construct, Up: The libgomp ABI
-
-4.1 Implementing MASTER construct
-=================================
-
- if (omp_get_thread_num () == 0)
- block
-
- Alternately, we generate two copies of the parallel subfunction and
-only include this in the version run by the master thread. Surely this
-is not worthwhile though...
-
-
-File: libgomp.info, Node: Implementing CRITICAL construct, Next: Implementing ATOMIC construct, Prev: Implementing MASTER construct, Up: The libgomp ABI
-
-4.2 Implementing CRITICAL construct
-===================================
-
-Without a specified name,
-
- void GOMP_critical_start (void);
- void GOMP_critical_end (void);
-
- so that we don't get COPY relocations from libgomp to the main
-application.
-
- With a specified name, use omp_set_lock and omp_unset_lock with name
-being transformed into a variable declared like
-
- omp_lock_t gomp_critical_user_<name> __attribute__((common))
-
- Ideally the ABI would specify that all zero is a valid unlocked
-state, and so we wouldn't need to initialize this at startup.
-
-
-File: libgomp.info, Node: Implementing ATOMIC construct, Next: Implementing FLUSH construct, Prev: Implementing CRITICAL construct, Up: The libgomp ABI
-
-4.3 Implementing ATOMIC construct
-=================================
-
-The target should implement the '__sync' builtins.
-
- Failing that we could add
-
- void GOMP_atomic_enter (void)
- void GOMP_atomic_exit (void)
-
- which reuses the regular lock code, but with yet another lock object
-private to the library.
-
-
-File: libgomp.info, Node: Implementing FLUSH construct, Next: Implementing BARRIER construct, Prev: Implementing ATOMIC construct, Up: The libgomp ABI
-
-4.4 Implementing FLUSH construct
-================================
-
-Expands to the '__sync_synchronize' builtin.
-
-
-File: libgomp.info, Node: Implementing BARRIER construct, Next: Implementing THREADPRIVATE construct, Prev: Implementing FLUSH construct, Up: The libgomp ABI
-
-4.5 Implementing BARRIER construct
-==================================
-
- void GOMP_barrier (void)
-
-
-File: libgomp.info, Node: Implementing THREADPRIVATE construct, Next: Implementing PRIVATE clause, Prev: Implementing BARRIER construct, Up: The libgomp ABI
-
-4.6 Implementing THREADPRIVATE construct
-========================================
-
-In _most_ cases we can map this directly to '__thread'. Except that OMP
-allows constructors for C++ objects. We can either refuse to support
-this (how often is it used?) or we can implement something akin to
-.ctors.
-
- Even more ideally, this ctor feature is handled by extensions to the
-main pthreads library. Failing that, we can have a set of entry points
-to register ctor functions to be called.
-
-
-File: libgomp.info, Node: Implementing PRIVATE clause, Next: Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses, Prev: Implementing THREADPRIVATE construct, Up: The libgomp ABI
-
-4.7 Implementing PRIVATE clause
-===============================
-
-In association with a PARALLEL, or within the lexical extent of a
-PARALLEL block, the variable becomes a local variable in the parallel
-subfunction.
-
- In association with FOR or SECTIONS blocks, create a new automatic
-variable within the current function. This preserves the semantic of
-new variable creation.
-
-
-File: libgomp.info, Node: Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses, Next: Implementing REDUCTION clause, Prev: Implementing PRIVATE clause, Up: The libgomp ABI
-
-4.8 Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses
-========================================================================
-
-This seems simple enough for PARALLEL blocks. Create a private struct
-for communicating between the parent and subfunction. In the parent,
-copy in values for scalar and "small" structs; copy in addresses for
-others TREE_ADDRESSABLE types. In the subfunction, copy the value into
-the local variable.
-
- It is not clear what to do with bare FOR or SECTION blocks. The only
-thing I can figure is that we do something like:
-
- #pragma omp for firstprivate(x) lastprivate(y)
- for (int i = 0; i < n; ++i)
- body;
-
- which becomes
-
- {
- int x = x, y;
-
- // for stuff
-
- if (i == n)
- y = y;
- }
-
- where the "x=x" and "y=y" assignments actually have different uids
-for the two variables, i.e. not something you could write directly in
-C. Presumably this only makes sense if the "outer" x and y are global
-variables.
-
- COPYPRIVATE would work the same way, except the structure broadcast
-would have to happen via SINGLE machinery instead.
-
-
-File: libgomp.info, Node: Implementing REDUCTION clause, Next: Implementing PARALLEL construct, Prev: Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses, Up: The libgomp ABI
-
-4.9 Implementing REDUCTION clause
-=================================
-
-The private struct mentioned in the previous section should have a
-pointer to an array of the type of the variable, indexed by the thread's
-TEAM_ID. The thread stores its final value into the array, and after
-the barrier, the master thread iterates over the array to collect the
-values.
-
-
-File: libgomp.info, Node: Implementing PARALLEL construct, Next: Implementing FOR construct, Prev: Implementing REDUCTION clause, Up: The libgomp ABI
-
-4.10 Implementing PARALLEL construct
-====================================
-
- #pragma omp parallel
- {
- body;
- }
-
- becomes
-
- void subfunction (void *data)
- {
- use data;
- body;
- }
-
- setup data;
- GOMP_parallel_start (subfunction, &data, num_threads);
- subfunction (&data);
- GOMP_parallel_end ();
-
- void GOMP_parallel_start (void (*fn)(void *), void *data, unsigned num_threads)
-
- The FN argument is the subfunction to be run in parallel.
-
- The DATA argument is a pointer to a structure used to communicate
-data in and out of the subfunction, as discussed above with respect to
-FIRSTPRIVATE et al.
-
- The NUM_THREADS argument is 1 if an IF clause is present and false,
-or the value of the NUM_THREADS clause, if present, or 0.
-
- The function needs to create the appropriate number of threads and/or
-launch them from the dock. It needs to create the team structure and
-assign team ids.
-
- void GOMP_parallel_end (void)
-
- Tears down the team and returns us to the previous
-'omp_in_parallel()' state.
-
-
-File: libgomp.info, Node: Implementing FOR construct, Next: Implementing ORDERED construct, Prev: Implementing PARALLEL construct, Up: The libgomp ABI
-
-4.11 Implementing FOR construct
-===============================
-
- #pragma omp parallel for
- for (i = lb; i <= ub; i++)
- body;
-
- becomes
-
- void subfunction (void *data)
- {
- long _s0, _e0;
- while (GOMP_loop_static_next (&_s0, &_e0))
- {
- long _e1 = _e0, i;
- for (i = _s0; i < _e1; i++)
- body;
- }
- GOMP_loop_end_nowait ();
- }
-
- GOMP_parallel_loop_static (subfunction, NULL, 0, lb, ub+1, 1, 0);
- subfunction (NULL);
- GOMP_parallel_end ();
-
- #pragma omp for schedule(runtime)
- for (i = 0; i < n; i++)
- body;
-
- becomes
-
- {
- long i, _s0, _e0;
- if (GOMP_loop_runtime_start (0, n, 1, &_s0, &_e0))
- do {
- long _e1 = _e0;
- for (i = _s0, i < _e0; i++)
- body;
- } while (GOMP_loop_runtime_next (&_s0, _&e0));
- GOMP_loop_end ();
- }
-
- Note that while it looks like there is trickiness to propagating a
-non-constant STEP, there isn't really. We're explicitly allowed to
-evaluate it as many times as we want, and any variables involved should
-automatically be handled as PRIVATE or SHARED like any other variables.
-So the expression should remain evaluable in the subfunction. We can
-also pull it into a local variable if we like, but since its supposed to
-remain unchanged, we can also not if we like.
-
- If we have SCHEDULE(STATIC), and no ORDERED, then we ought to be able
-to get away with no work-sharing context at all, since we can simply
-perform the arithmetic directly in each thread to divide up the
-iterations. Which would mean that we wouldn't need to call any of these
-routines.
-
- There are separate routines for handling loops with an ORDERED
-clause. Bookkeeping for that is non-trivial...
-
-
-File: libgomp.info, Node: Implementing ORDERED construct, Next: Implementing SECTIONS construct, Prev: Implementing FOR construct, Up: The libgomp ABI
-
-4.12 Implementing ORDERED construct
-===================================
-
- void GOMP_ordered_start (void)
- void GOMP_ordered_end (void)
-
-
-File: libgomp.info, Node: Implementing SECTIONS construct, Next: Implementing SINGLE construct, Prev: Implementing ORDERED construct, Up: The libgomp ABI
-
-4.13 Implementing SECTIONS construct
-====================================
-
-A block as
-
- #pragma omp sections
- {
- #pragma omp section
- stmt1;
- #pragma omp section
- stmt2;
- #pragma omp section
- stmt3;
- }
-
- becomes
-
- for (i = GOMP_sections_start (3); i != 0; i = GOMP_sections_next ())
- switch (i)
- {
- case 1:
- stmt1;
- break;
- case 2:
- stmt2;
- break;
- case 3:
- stmt3;
- break;
- }
- GOMP_barrier ();
-
-
-File: libgomp.info, Node: Implementing SINGLE construct, Prev: Implementing SECTIONS construct, Up: The libgomp ABI
-
-4.14 Implementing SINGLE construct
-==================================
-
-A block like
-
- #pragma omp single
- {
- body;
- }
-
- becomes
-
- if (GOMP_single_start ())
- body;
- GOMP_barrier ();
-
- while
-
- #pragma omp single copyprivate(x)
- body;
-
- becomes
-
- datap = GOMP_single_copy_start ();
- if (datap == NULL)
- {
- body;
- data.x = x;
- GOMP_single_copy_end (&data);
- }
- else
- x = datap->x;
- GOMP_barrier ();
-
-
-File: libgomp.info, Node: Reporting Bugs, Next: Copying, Prev: The libgomp ABI, Up: Top
-
-5 Reporting Bugs
-****************
-
-Bugs in the GNU OpenMP implementation should be reported via Bugzilla
-(http://gcc.gnu.org/bugzilla/). For all cases, please add "openmp" to
-the keywords field in the bug report.
-
-
-File: libgomp.info, Node: Copying, Next: GNU Free Documentation License, Prev: Reporting Bugs, Up: Top
-
-GNU General Public License
-**************************
-
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
-
- Everyone is permitted to copy and distribute verbatim copies of this
- license document, but changing it is not allowed.
-
-Preamble
-========
-
-The GNU General Public License is a free, copyleft license for software
-and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program-to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
-TERMS AND CONDITIONS
-====================
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public
- License.
-
- "Copyright" also means copyright-like laws that apply to other
- kinds of works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
- License. Each licensee is addressed as "you". "Licensees" and
- "recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the
- work in a fashion requiring copyright permission, other than the
- making of an exact copy. The resulting work is called a "modified
- version" of the earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work
- based on the Program.
-
- To "propagate" a work means to do anything with it that, without
- permission, would make you directly or secondarily liable for
- infringement under applicable copyright law, except executing it on
- a computer or modifying a private copy. Propagation includes
- copying, distribution (with or without modification), making
- available to the public, and in some countries other activities as
- well.
-
- To "convey" a work means any kind of propagation that enables other
- parties to make or receive copies. Mere interaction with a user
- through a computer network, with no transfer of a copy, is not
- conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
- to the extent that it includes a convenient and prominently visible
- feature that (1) displays an appropriate copyright notice, and (2)
- tells the user that there is no warranty for the work (except to
- the extent that warranties are provided), that licensees may convey
- the work under this License, and how to view a copy of this
- License. If the interface presents a list of user commands or
- options, such as a menu, a prominent item in the list meets this
- criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
- for making modifications to it. "Object code" means any non-source
- form of a work.
-
- A "Standard Interface" means an interface that either is an
- official standard defined by a recognized standards body, or, in
- the case of interfaces specified for a particular programming
- language, one that is widely used among developers working in that
- language.
-
- The "System Libraries" of an executable work include anything,
- other than the work as a whole, that (a) is included in the normal
- form of packaging a Major Component, but which is not part of that
- Major Component, and (b) serves only to enable use of the work with
- that Major Component, or to implement a Standard Interface for
- which an implementation is available to the public in source code
- form. A "Major Component", in this context, means a major
- essential component (kernel, window system, and so on) of the
- specific operating system (if any) on which the executable work
- runs, or a compiler used to produce the work, or an object code
- interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
- the source code needed to generate, install, and (for an executable
- work) run the object code and to modify the work, including scripts
- to control those activities. However, it does not include the
- work's System Libraries, or general-purpose tools or generally
- available free programs which are used unmodified in performing
- those activities but which are not part of the work. For example,
- Corresponding Source includes interface definition files associated
- with source files for the work, and the source code for shared
- libraries and dynamically linked subprograms that the work is
- specifically designed to require, such as by intimate data
- communication or control flow between those subprograms and other
- parts of the work.
-
- The Corresponding Source need not include anything that users can
- regenerate automatically from other parts of the Corresponding
- Source.
-
- The Corresponding Source for a work in source code form is that
- same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
- copyright on the Program, and are irrevocable provided the stated
- conditions are met. This License explicitly affirms your unlimited
- permission to run the unmodified Program. The output from running
- a covered work is covered by this License only if the output, given
- its content, constitutes a covered work. This License acknowledges
- your rights of fair use or other equivalent, as provided by
- copyright law.
-
- You may make, run and propagate covered works that you do not
- convey, without conditions so long as your license otherwise
- remains in force. You may convey covered works to others for the
- sole purpose of having them make modifications exclusively for you,
- or provide you with facilities for running those works, provided
- that you comply with the terms of this License in conveying all
- material for which you do not control copyright. Those thus making
- or running the covered works for you must do so exclusively on your
- behalf, under your direction and control, on terms that prohibit
- them from making any copies of your copyrighted material outside
- their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
- the conditions stated below. Sublicensing is not allowed; section
- 10 makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
- measure under any applicable law fulfilling obligations under
- article 11 of the WIPO copyright treaty adopted on 20 December
- 1996, or similar laws prohibiting or restricting circumvention of
- such measures.
-
- When you convey a covered work, you waive any legal power to forbid
- circumvention of technological measures to the extent such
- circumvention is effected by exercising rights under this License
- with respect to the covered work, and you disclaim any intention to
- limit operation or modification of the work as a means of
- enforcing, against the work's users, your or third parties' legal
- rights to forbid circumvention of technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
- receive it, in any medium, provided that you conspicuously and
- appropriately publish on each copy an appropriate copyright notice;
- keep intact all notices stating that this License and any
- non-permissive terms added in accord with section 7 apply to the
- code; keep intact all notices of the absence of any warranty; and
- give all recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
- and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
- produce it from the Program, in the form of source code under the
- terms of section 4, provided that you also meet all of these
- conditions:
-
- a. The work must carry prominent notices stating that you
- modified it, and giving a relevant date.
-
- b. The work must carry prominent notices stating that it is
- released under this License and any conditions added under
- section 7. This requirement modifies the requirement in
- section 4 to "keep intact all notices".
-
- c. You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable
- section 7 additional terms, to the whole of the work, and all
- its parts, regardless of how they are packaged. This License
- gives no permission to license the work in any other way, but
- it does not invalidate such permission if you have separately
- received it.
-
- d. If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has
- interactive interfaces that do not display Appropriate Legal
- Notices, your work need not make them do so.
-
- A compilation of a covered work with other separate and independent
- works, which are not by their nature extensions of the covered
- work, and which are not combined with it such as to form a larger
- program, in or on a volume of a storage or distribution medium, is
- called an "aggregate" if the compilation and its resulting
- copyright are not used to limit the access or legal rights of the
- compilation's users beyond what the individual works permit.
- Inclusion of a covered work in an aggregate does not cause this
- License to apply to the other parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
- of sections 4 and 5, provided that you also convey the
- machine-readable Corresponding Source under the terms of this
- License, in one of these ways:
-
- a. Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b. Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that
- product model, to give anyone who possesses the object code
- either (1) a copy of the Corresponding Source for all the
- software in the product that is covered by this License, on a
- durable physical medium customarily used for software
- interchange, for a price no more than your reasonable cost of
- physically performing this conveying of source, or (2) access
- to copy the Corresponding Source from a network server at no
- charge.
-
- c. Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially,
- and only if you received the object code with such an offer,
- in accord with subsection 6b.
-
- d. Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to
- the Corresponding Source in the same way through the same
- place at no further charge. You need not require recipients
- to copy the Corresponding Source along with the object code.
- If the place to copy the object code is a network server, the
- Corresponding Source may be on a different server (operated by
- you or a third party) that supports equivalent copying
- facilities, provided you maintain clear directions next to the
- object code saying where to find the Corresponding Source.
- Regardless of what server hosts the Corresponding Source, you
- remain obligated to ensure that it is available for as long as
- needed to satisfy these requirements.
-
- e. Convey the object code using peer-to-peer transmission,
- provided you inform other peers where the object code and
- Corresponding Source of the work are being offered to the
- general public at no charge under subsection 6d.
-
- A separable portion of the object code, whose source code is
- excluded from the Corresponding Source as a System Library, need
- not be included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means
- any tangible personal property which is normally used for personal,
- family, or household purposes, or (2) anything designed or sold for
- incorporation into a dwelling. In determining whether a product is
- a consumer product, doubtful cases shall be resolved in favor of
- coverage. For a particular product received by a particular user,
- "normally used" refers to a typical or common use of that class of
- product, regardless of the status of the particular user or of the
- way in which the particular user actually uses, or expects or is
- expected to use, the product. A product is a consumer product
- regardless of whether the product has substantial commercial,
- industrial or non-consumer uses, unless such uses represent the
- only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
- procedures, authorization keys, or other information required to
- install and execute modified versions of a covered work in that
- User Product from a modified version of its Corresponding Source.
- The information must suffice to ensure that the continued
- functioning of the modified object code is in no case prevented or
- interfered with solely because modification has been made.
-
- If you convey an object code work under this section in, or with,
- or specifically for use in, a User Product, and the conveying
- occurs as part of a transaction in which the right of possession
- and use of the User Product is transferred to the recipient in
- perpetuity or for a fixed term (regardless of how the transaction
- is characterized), the Corresponding Source conveyed under this
- section must be accompanied by the Installation Information. But
- this requirement does not apply if neither you nor any third party
- retains the ability to install modified object code on the User
- Product (for example, the work has been installed in ROM).
-
- The requirement to provide Installation Information does not
- include a requirement to continue to provide support service,
- warranty, or updates for a work that has been modified or installed
- by the recipient, or for the User Product in which it has been
- modified or installed. Access to a network may be denied when the
- modification itself materially and adversely affects the operation
- of the network or violates the rules and protocols for
- communication across the network.
-
- Corresponding Source conveyed, and Installation Information
- provided, in accord with this section must be in a format that is
- publicly documented (and with an implementation available to the
- public in source code form), and must require no special password
- or key for unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of
- this License by making exceptions from one or more of its
- conditions. Additional permissions that are applicable to the
- entire Program shall be treated as though they were included in
- this License, to the extent that they are valid under applicable
- law. If additional permissions apply only to part of the Program,
- that part may be used separately under those permissions, but the
- entire Program remains governed by this License without regard to
- the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
- remove any additional permissions from that copy, or from any part
- of it. (Additional permissions may be written to require their own
- removal in certain cases when you modify the work.) You may place
- additional permissions on material, added by you to a covered work,
- for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material
- you add to a covered work, you may (if authorized by the copyright
- holders of that material) supplement the terms of this License with
- terms:
-
- a. Disclaiming warranty or limiting liability differently from
- the terms of sections 15 and 16 of this License; or
-
- b. Requiring preservation of specified reasonable legal notices
- or author attributions in that material or in the Appropriate
- Legal Notices displayed by works containing it; or
-
- c. Prohibiting misrepresentation of the origin of that material,
- or requiring that modified versions of such material be marked
- in reasonable ways as different from the original version; or
-
- d. Limiting the use for publicity purposes of names of licensors
- or authors of the material; or
-
- e. Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f. Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified
- versions of it) with contractual assumptions of liability to
- the recipient, for any liability that these contractual
- assumptions directly impose on those licensors and authors.
-
- All other non-permissive additional terms are considered "further
- restrictions" within the meaning of section 10. If the Program as
- you received it, or any part of it, contains a notice stating that
- it is governed by this License along with a term that is a further
- restriction, you may remove that term. If a license document
- contains a further restriction but permits relicensing or conveying
- under this License, you may add to a covered work material governed
- by the terms of that license document, provided that the further
- restriction does not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
- must place, in the relevant source files, a statement of the
- additional terms that apply to those files, or a notice indicating
- where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in
- the form of a separately written license, or stated as exceptions;
- the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
- provided under this License. Any attempt otherwise to propagate or
- modify it is void, and will automatically terminate your rights
- under this License (including any patent licenses granted under the
- third paragraph of section 11).
-
- However, if you cease all violation of this License, then your
- license from a particular copyright holder is reinstated (a)
- provisionally, unless and until the copyright holder explicitly and
- finally terminates your license, and (b) permanently, if the
- copyright holder fails to notify you of the violation by some
- reasonable means prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
- reinstated permanently if the copyright holder notifies you of the
- violation by some reasonable means, this is the first time you have
- received notice of violation of this License (for any work) from
- that copyright holder, and you cure the violation prior to 30 days
- after your receipt of the notice.
-
- Termination of your rights under this section does not terminate
- the licenses of parties who have received copies or rights from you
- under this License. If your rights have been terminated and not
- permanently reinstated, you do not qualify to receive new licenses
- for the same material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
- run a copy of the Program. Ancillary propagation of a covered work
- occurring solely as a consequence of using peer-to-peer
- transmission to receive a copy likewise does not require
- acceptance. However, nothing other than this License grants you
- permission to propagate or modify any covered work. These actions
- infringe copyright if you do not accept this License. Therefore,
- by modifying or propagating a covered work, you indicate your
- acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
- receives a license from the original licensors, to run, modify and
- propagate that work, subject to this License. You are not
- responsible for enforcing compliance by third parties with this
- License.
-
- An "entity transaction" is a transaction transferring control of an
- organization, or substantially all assets of one, or subdividing an
- organization, or merging organizations. If propagation of a
- covered work results from an entity transaction, each party to that
- transaction who receives a copy of the work also receives whatever
- licenses to the work the party's predecessor in interest had or
- could give under the previous paragraph, plus a right to possession
- of the Corresponding Source of the work from the predecessor in
- interest, if the predecessor has it or can get it with reasonable
- efforts.
-
- You may not impose any further restrictions on the exercise of the
- rights granted or affirmed under this License. For example, you
- may not impose a license fee, royalty, or other charge for exercise
- of rights granted under this License, and you may not initiate
- litigation (including a cross-claim or counterclaim in a lawsuit)
- alleging that any patent claim is infringed by making, using,
- selling, offering for sale, or importing the Program or any portion
- of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
- License of the Program or a work on which the Program is based.
- The work thus licensed is called the contributor's "contributor
- version".
-
- A contributor's "essential patent claims" are all patent claims
- owned or controlled by the contributor, whether already acquired or
- hereafter acquired, that would be infringed by some manner,
- permitted by this License, of making, using, or selling its
- contributor version, but do not include claims that would be
- infringed only as a consequence of further modification of the
- contributor version. For purposes of this definition, "control"
- includes the right to grant patent sublicenses in a manner
- consistent with the requirements of this License.
-
- Each contributor grants you a non-exclusive, worldwide,
- royalty-free patent license under the contributor's essential
- patent claims, to make, use, sell, offer for sale, import and
- otherwise run, modify and propagate the contents of its contributor
- version.
-
- In the following three paragraphs, a "patent license" is any
- express agreement or commitment, however denominated, not to
- enforce a patent (such as an express permission to practice a
- patent or covenant not to sue for patent infringement). To "grant"
- such a patent license to a party means to make such an agreement or
- commitment not to enforce a patent against the party.
-
- If you convey a covered work, knowingly relying on a patent
- license, and the Corresponding Source of the work is not available
- for anyone to copy, free of charge and under the terms of this
- License, through a publicly available network server or other
- readily accessible means, then you must either (1) cause the
- Corresponding Source to be so available, or (2) arrange to deprive
- yourself of the benefit of the patent license for this particular
- work, or (3) arrange, in a manner consistent with the requirements
- of this License, to extend the patent license to downstream
- recipients. "Knowingly relying" means you have actual knowledge
- that, but for the patent license, your conveying the covered work
- in a country, or your recipient's use of the covered work in a
- country, would infringe one or more identifiable patents in that
- country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
- arrangement, you convey, or propagate by procuring conveyance of, a
- covered work, and grant a patent license to some of the parties
- receiving the covered work authorizing them to use, propagate,
- modify or convey a specific copy of the covered work, then the
- patent license you grant is automatically extended to all
- recipients of the covered work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
- the scope of its coverage, prohibits the exercise of, or is
- conditioned on the non-exercise of one or more of the rights that
- are specifically granted under this License. You may not convey a
- covered work if you are a party to an arrangement with a third
- party that is in the business of distributing software, under which
- you make payment to the third party based on the extent of your
- activity of conveying the work, and under which the third party
- grants, to any of the parties who would receive the covered work
- from you, a discriminatory patent license (a) in connection with
- copies of the covered work conveyed by you (or copies made from
- those copies), or (b) primarily for and in connection with specific
- products or compilations that contain the covered work, unless you
- entered into that arrangement, or that patent license was granted,
- prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
- any implied license or other defenses to infringement that may
- otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement
- or otherwise) that contradict the conditions of this License, they
- do not excuse you from the conditions of this License. If you
- cannot convey a covered work so as to satisfy simultaneously your
- obligations under this License and any other pertinent obligations,
- then as a consequence you may not convey it at all. For example,
- if you agree to terms that obligate you to collect a royalty for
- further conveying from those to whom you convey the Program, the
- only way you could satisfy both those terms and this License would
- be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
- permission to link or combine any covered work with a work licensed
- under version 3 of the GNU Affero General Public License into a
- single combined work, and to convey the resulting work. The terms
- of this License will continue to apply to the part which is the
- covered work, but the special requirements of the GNU Affero
- General Public License, section 13, concerning interaction through
- a network will apply to the combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new
- versions of the GNU General Public License from time to time. Such
- new versions will be similar in spirit to the present version, but
- may differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
- Program specifies that a certain numbered version of the GNU
- General Public License "or any later version" applies to it, you
- have the option of following the terms and conditions either of
- that numbered version or of any later version published by the Free
- Software Foundation. If the Program does not specify a version
- number of the GNU General Public License, you may choose any
- version ever published by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
- versions of the GNU General Public License can be used, that
- proxy's public statement of acceptance of a version permanently
- authorizes you to choose that version for the Program.
-
- Later license versions may give you additional or different
- permissions. However, no additional obligations are imposed on any
- author or copyright holder as a result of your choosing to follow a
- later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
- APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE
- COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
- WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
- INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE
- RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.
- SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
- NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
- WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES
- AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
- DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
- CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE
- THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA
- BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
- PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
- PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF
- THE POSSIBILITY OF SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
- above cannot be given local legal effect according to their terms,
- reviewing courts shall apply local law that most closely
- approximates an absolute waiver of all civil liability in
- connection with the Program, unless a warranty or assumption of
- liability accompanies a copy of the Program in return for a fee.
-
-END OF TERMS AND CONDITIONS
-===========================
-
-How to Apply These Terms to Your New Programs
-=============================================
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these
-terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
- ONE LINE TO GIVE THE PROGRAM'S NAME AND A BRIEF IDEA OF WHAT IT DOES.
- Copyright (C) YEAR NAME OF AUTHOR
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or (at
- your option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-
- Also add information on how to contact you by electronic and paper
-mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- PROGRAM Copyright (C) YEAR NAME OF AUTHOR
- This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type 'show c' for details.
-
- The hypothetical commands 'show w' and 'show c' should show the
-appropriate parts of the General Public License. Of course, your
-program's commands might be different; for a GUI interface, you would
-use an "about box".
-
- You should also get your employer (if you work as a programmer) or
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. For more information on this, and how to apply and follow
-the GNU GPL, see <http://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your
-program into proprietary programs. If your program is a subroutine
-library, you may consider it more useful to permit linking proprietary
-applications with the library. If this is what you want to do, use the
-GNU Lesser General Public License instead of this License. But first,
-please read <http://www.gnu.org/philosophy/why-not-lgpl.html>.
-
-
-File: libgomp.info, Node: GNU Free Documentation License, Next: Funding, Prev: Copying, Up: Top
-
-GNU Free Documentation License
-******************************
-
- Version 1.3, 3 November 2008
-
- Copyright (C) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc.
- <http://fsf.org/>
-
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- 0. PREAMBLE
-
- The purpose of this License is to make a manual, textbook, or other
- functional and useful document "free" in the sense of freedom: to
- assure everyone the effective freedom to copy and redistribute it,
- with or without modifying it, either commercially or
- noncommercially. Secondarily, this License preserves for the
- author and publisher a way to get credit for their work, while not
- being considered responsible for modifications made by others.
-
- This License is a kind of "copyleft", which means that derivative
- works of the document must themselves be free in the same sense.
- It complements the GNU General Public License, which is a copyleft
- license designed for free software.
-
- We have designed this License in order to use it for manuals for
- free software, because free software needs free documentation: a
- free program should come with manuals providing the same freedoms
- that the software does. But this License is not limited to
- software manuals; it can be used for any textual work, regardless
- of subject matter or whether it is published as a printed book. We
- recommend this License principally for works whose purpose is
- instruction or reference.
-
- 1. APPLICABILITY AND DEFINITIONS
-
- This License applies to any manual or other work, in any medium,
- that contains a notice placed by the copyright holder saying it can
- be distributed under the terms of this License. Such a notice
- grants a world-wide, royalty-free license, unlimited in duration,
- to use that work under the conditions stated herein. The
- "Document", below, refers to any such manual or work. Any member
- of the public is a licensee, and is addressed as "you". You accept
- the license if you copy, modify or distribute the work in a way
- requiring permission under copyright law.
-
- A "Modified Version" of the Document means any work containing the
- Document or a portion of it, either copied verbatim, or with
- modifications and/or translated into another language.
-
- A "Secondary Section" is a named appendix or a front-matter section
- of the Document that deals exclusively with the relationship of the
- publishers or authors of the Document to the Document's overall
- subject (or to related matters) and contains nothing that could
- fall directly within that overall subject. (Thus, if the Document
- is in part a textbook of mathematics, a Secondary Section may not
- explain any mathematics.) The relationship could be a matter of
- historical connection with the subject or with related matters, or
- of legal, commercial, philosophical, ethical or political position
- regarding them.
-
- The "Invariant Sections" are certain Secondary Sections whose
- titles are designated, as being those of Invariant Sections, in the
- notice that says that the Document is released under this License.
- If a section does not fit the above definition of Secondary then it
- is not allowed to be designated as Invariant. The Document may
- contain zero Invariant Sections. If the Document does not identify
- any Invariant Sections then there are none.
-
- The "Cover Texts" are certain short passages of text that are
- listed, as Front-Cover Texts or Back-Cover Texts, in the notice
- that says that the Document is released under this License. A
- Front-Cover Text may be at most 5 words, and a Back-Cover Text may
- be at most 25 words.
-
- A "Transparent" copy of the Document means a machine-readable copy,
- represented in a format whose specification is available to the
- general public, that is suitable for revising the document
- straightforwardly with generic text editors or (for images composed
- of pixels) generic paint programs or (for drawings) some widely
- available drawing editor, and that is suitable for input to text
- formatters or for automatic translation to a variety of formats
- suitable for input to text formatters. A copy made in an otherwise
- Transparent file format whose markup, or absence of markup, has
- been arranged to thwart or discourage subsequent modification by
- readers is not Transparent. An image format is not Transparent if
- used for any substantial amount of text. A copy that is not
- "Transparent" is called "Opaque".
-
- Examples of suitable formats for Transparent copies include plain
- ASCII without markup, Texinfo input format, LaTeX input format,
- SGML or XML using a publicly available DTD, and standard-conforming
- simple HTML, PostScript or PDF designed for human modification.
- Examples of transparent image formats include PNG, XCF and JPG.
- Opaque formats include proprietary formats that can be read and
- edited only by proprietary word processors, SGML or XML for which
- the DTD and/or processing tools are not generally available, and
- the machine-generated HTML, PostScript or PDF produced by some word
- processors for output purposes only.
-
- The "Title Page" means, for a printed book, the title page itself,
- plus such following pages as are needed to hold, legibly, the
- material this License requires to appear in the title page. For
- works in formats which do not have any title page as such, "Title
- Page" means the text near the most prominent appearance of the
- work's title, preceding the beginning of the body of the text.
-
- The "publisher" means any person or entity that distributes copies
- of the Document to the public.
-
- A section "Entitled XYZ" means a named subunit of the Document
- whose title either is precisely XYZ or contains XYZ in parentheses
- following text that translates XYZ in another language. (Here XYZ
- stands for a specific section name mentioned below, such as
- "Acknowledgements", "Dedications", "Endorsements", or "History".)
- To "Preserve the Title" of such a section when you modify the
- Document means that it remains a section "Entitled XYZ" according
- to this definition.
-
- The Document may include Warranty Disclaimers next to the notice
- which states that this License applies to the Document. These
- Warranty Disclaimers are considered to be included by reference in
- this License, but only as regards disclaiming warranties: any other
- implication that these Warranty Disclaimers may have is void and
- has no effect on the meaning of this License.
-
- 2. VERBATIM COPYING
-
- You may copy and distribute the Document in any medium, either
- commercially or noncommercially, provided that this License, the
- copyright notices, and the license notice saying this License
- applies to the Document are reproduced in all copies, and that you
- add no other conditions whatsoever to those of this License. You
- may not use technical measures to obstruct or control the reading
- or further copying of the copies you make or distribute. However,
- you may accept compensation in exchange for copies. If you
- distribute a large enough number of copies you must also follow the
- conditions in section 3.
-
- You may also lend copies, under the same conditions stated above,
- and you may publicly display copies.
-
- 3. COPYING IN QUANTITY
-
- If you publish printed copies (or copies in media that commonly
- have printed covers) of the Document, numbering more than 100, and
- the Document's license notice requires Cover Texts, you must
- enclose the copies in covers that carry, clearly and legibly, all
- these Cover Texts: Front-Cover Texts on the front cover, and
- Back-Cover Texts on the back cover. Both covers must also clearly
- and legibly identify you as the publisher of these copies. The
- front cover must present the full title with all words of the title
- equally prominent and visible. You may add other material on the
- covers in addition. Copying with changes limited to the covers, as
- long as they preserve the title of the Document and satisfy these
- conditions, can be treated as verbatim copying in other respects.
-
- If the required texts for either cover are too voluminous to fit
- legibly, you should put the first ones listed (as many as fit
- reasonably) on the actual cover, and continue the rest onto
- adjacent pages.
-
- If you publish or distribute Opaque copies of the Document
- numbering more than 100, you must either include a machine-readable
- Transparent copy along with each Opaque copy, or state in or with
- each Opaque copy a computer-network location from which the general
- network-using public has access to download using public-standard
- network protocols a complete Transparent copy of the Document, free
- of added material. If you use the latter option, you must take
- reasonably prudent steps, when you begin distribution of Opaque
- copies in quantity, to ensure that this Transparent copy will
- remain thus accessible at the stated location until at least one
- year after the last time you distribute an Opaque copy (directly or
- through your agents or retailers) of that edition to the public.
-
- It is requested, but not required, that you contact the authors of
- the Document well before redistributing any large number of copies,
- to give them a chance to provide you with an updated version of the
- Document.
-
- 4. MODIFICATIONS
-
- You may copy and distribute a Modified Version of the Document
- under the conditions of sections 2 and 3 above, provided that you
- release the Modified Version under precisely this License, with the
- Modified Version filling the role of the Document, thus licensing
- distribution and modification of the Modified Version to whoever
- possesses a copy of it. In addition, you must do these things in
- the Modified Version:
-
- A. Use in the Title Page (and on the covers, if any) a title
- distinct from that of the Document, and from those of previous
- versions (which should, if there were any, be listed in the
- History section of the Document). You may use the same title
- as a previous version if the original publisher of that
- version gives permission.
-
- B. List on the Title Page, as authors, one or more persons or
- entities responsible for authorship of the modifications in
- the Modified Version, together with at least five of the
- principal authors of the Document (all of its principal
- authors, if it has fewer than five), unless they release you
- from this requirement.
-
- C. State on the Title page the name of the publisher of the
- Modified Version, as the publisher.
-
- D. Preserve all the copyright notices of the Document.
-
- E. Add an appropriate copyright notice for your modifications
- adjacent to the other copyright notices.
-
- F. Include, immediately after the copyright notices, a license
- notice giving the public permission to use the Modified
- Version under the terms of this License, in the form shown in
- the Addendum below.
-
- G. Preserve in that license notice the full lists of Invariant
- Sections and required Cover Texts given in the Document's
- license notice.
-
- H. Include an unaltered copy of this License.
-
- I. Preserve the section Entitled "History", Preserve its Title,
- and add to it an item stating at least the title, year, new
- authors, and publisher of the Modified Version as given on the
- Title Page. If there is no section Entitled "History" in the
- Document, create one stating the title, year, authors, and
- publisher of the Document as given on its Title Page, then add
- an item describing the Modified Version as stated in the
- previous sentence.
-
- J. Preserve the network location, if any, given in the Document
- for public access to a Transparent copy of the Document, and
- likewise the network locations given in the Document for
- previous versions it was based on. These may be placed in the
- "History" section. You may omit a network location for a work
- that was published at least four years before the Document
- itself, or if the original publisher of the version it refers
- to gives permission.
-
- K. For any section Entitled "Acknowledgements" or "Dedications",
- Preserve the Title of the section, and preserve in the section
- all the substance and tone of each of the contributor
- acknowledgements and/or dedications given therein.
-
- L. Preserve all the Invariant Sections of the Document, unaltered
- in their text and in their titles. Section numbers or the
- equivalent are not considered part of the section titles.
-
- M. Delete any section Entitled "Endorsements". Such a section
- may not be included in the Modified Version.
-
- N. Do not retitle any existing section to be Entitled
- "Endorsements" or to conflict in title with any Invariant
- Section.
-
- O. Preserve any Warranty Disclaimers.
-
- If the Modified Version includes new front-matter sections or
- appendices that qualify as Secondary Sections and contain no
- material copied from the Document, you may at your option designate
- some or all of these sections as invariant. To do this, add their
- titles to the list of Invariant Sections in the Modified Version's
- license notice. These titles must be distinct from any other
- section titles.
-
- You may add a section Entitled "Endorsements", provided it contains
- nothing but endorsements of your Modified Version by various
- parties--for example, statements of peer review or that the text
- has been approved by an organization as the authoritative
- definition of a standard.
-
- You may add a passage of up to five words as a Front-Cover Text,
- and a passage of up to 25 words as a Back-Cover Text, to the end of
- the list of Cover Texts in the Modified Version. Only one passage
- of Front-Cover Text and one of Back-Cover Text may be added by (or
- through arrangements made by) any one entity. If the Document
- already includes a cover text for the same cover, previously added
- by you or by arrangement made by the same entity you are acting on
- behalf of, you may not add another; but you may replace the old
- one, on explicit permission from the previous publisher that added
- the old one.
-
- The author(s) and publisher(s) of the Document do not by this
- License give permission to use their names for publicity for or to
- assert or imply endorsement of any Modified Version.
-
- 5. COMBINING DOCUMENTS
-
- You may combine the Document with other documents released under
- this License, under the terms defined in section 4 above for
- modified versions, provided that you include in the combination all
- of the Invariant Sections of all of the original documents,
- unmodified, and list them all as Invariant Sections of your
- combined work in its license notice, and that you preserve all
- their Warranty Disclaimers.
-
- The combined work need only contain one copy of this License, and
- multiple identical Invariant Sections may be replaced with a single
- copy. If there are multiple Invariant Sections with the same name
- but different contents, make the title of each such section unique
- by adding at the end of it, in parentheses, the name of the
- original author or publisher of that section if known, or else a
- unique number. Make the same adjustment to the section titles in
- the list of Invariant Sections in the license notice of the
- combined work.
-
- In the combination, you must combine any sections Entitled
- "History" in the various original documents, forming one section
- Entitled "History"; likewise combine any sections Entitled
- "Acknowledgements", and any sections Entitled "Dedications". You
- must delete all sections Entitled "Endorsements."
-
- 6. COLLECTIONS OF DOCUMENTS
-
- You may make a collection consisting of the Document and other
- documents released under this License, and replace the individual
- copies of this License in the various documents with a single copy
- that is included in the collection, provided that you follow the
- rules of this License for verbatim copying of each of the documents
- in all other respects.
-
- You may extract a single document from such a collection, and
- distribute it individually under this License, provided you insert
- a copy of this License into the extracted document, and follow this
- License in all other respects regarding verbatim copying of that
- document.
-
- 7. AGGREGATION WITH INDEPENDENT WORKS
-
- A compilation of the Document or its derivatives with other
- separate and independent documents or works, in or on a volume of a
- storage or distribution medium, is called an "aggregate" if the
- copyright resulting from the compilation is not used to limit the
- legal rights of the compilation's users beyond what the individual
- works permit. When the Document is included in an aggregate, this
- License does not apply to the other works in the aggregate which
- are not themselves derivative works of the Document.
-
- If the Cover Text requirement of section 3 is applicable to these
- copies of the Document, then if the Document is less than one half
- of the entire aggregate, the Document's Cover Texts may be placed
- on covers that bracket the Document within the aggregate, or the
- electronic equivalent of covers if the Document is in electronic
- form. Otherwise they must appear on printed covers that bracket
- the whole aggregate.
-
- 8. TRANSLATION
-
- Translation is considered a kind of modification, so you may
- distribute translations of the Document under the terms of section
- 4. Replacing Invariant Sections with translations requires special
- permission from their copyright holders, but you may include
- translations of some or all Invariant Sections in addition to the
- original versions of these Invariant Sections. You may include a
- translation of this License, and all the license notices in the
- Document, and any Warranty Disclaimers, provided that you also
- include the original English version of this License and the
- original versions of those notices and disclaimers. In case of a
- disagreement between the translation and the original version of
- this License or a notice or disclaimer, the original version will
- prevail.
-
- If a section in the Document is Entitled "Acknowledgements",
- "Dedications", or "History", the requirement (section 4) to
- Preserve its Title (section 1) will typically require changing the
- actual title.
-
- 9. TERMINATION
-
- You may not copy, modify, sublicense, or distribute the Document
- except as expressly provided under this License. Any attempt
- otherwise to copy, modify, sublicense, or distribute it is void,
- and will automatically terminate your rights under this License.
-
- However, if you cease all violation of this License, then your
- license from a particular copyright holder is reinstated (a)
- provisionally, unless and until the copyright holder explicitly and
- finally terminates your license, and (b) permanently, if the
- copyright holder fails to notify you of the violation by some
- reasonable means prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
- reinstated permanently if the copyright holder notifies you of the
- violation by some reasonable means, this is the first time you have
- received notice of violation of this License (for any work) from
- that copyright holder, and you cure the violation prior to 30 days
- after your receipt of the notice.
-
- Termination of your rights under this section does not terminate
- the licenses of parties who have received copies or rights from you
- under this License. If your rights have been terminated and not
- permanently reinstated, receipt of a copy of some or all of the
- same material does not give you any rights to use it.
-
- 10. FUTURE REVISIONS OF THIS LICENSE
-
- The Free Software Foundation may publish new, revised versions of
- the GNU Free Documentation License from time to time. Such new
- versions will be similar in spirit to the present version, but may
- differ in detail to address new problems or concerns. See
- <http://www.gnu.org/copyleft/>.
-
- Each version of the License is given a distinguishing version
- number. If the Document specifies that a particular numbered
- version of this License "or any later version" applies to it, you
- have the option of following the terms and conditions either of
- that specified version or of any later version that has been
- published (not as a draft) by the Free Software Foundation. If the
- Document does not specify a version number of this License, you may
- choose any version ever published (not as a draft) by the Free
- Software Foundation. If the Document specifies that a proxy can
- decide which future versions of this License can be used, that
- proxy's public statement of acceptance of a version permanently
- authorizes you to choose that version for the Document.
-
- 11. RELICENSING
-
- "Massive Multiauthor Collaboration Site" (or "MMC Site") means any
- World Wide Web server that publishes copyrightable works and also
- provides prominent facilities for anybody to edit those works. A
- public wiki that anybody can edit is an example of such a server.
- A "Massive Multiauthor Collaboration" (or "MMC") contained in the
- site means any set of copyrightable works thus published on the MMC
- site.
-
- "CC-BY-SA" means the Creative Commons Attribution-Share Alike 3.0
- license published by Creative Commons Corporation, a not-for-profit
- corporation with a principal place of business in San Francisco,
- California, as well as future copyleft versions of that license
- published by that same organization.
-
- "Incorporate" means to publish or republish a Document, in whole or
- in part, as part of another Document.
-
- An MMC is "eligible for relicensing" if it is licensed under this
- License, and if all works that were first published under this
- License somewhere other than this MMC, and subsequently
- incorporated in whole or in part into the MMC, (1) had no cover
- texts or invariant sections, and (2) were thus incorporated prior
- to November 1, 2008.
-
- The operator of an MMC Site may republish an MMC contained in the
- site under CC-BY-SA on the same site at any time before August 1,
- 2009, provided the MMC is eligible for relicensing.
-
-ADDENDUM: How to use this License for your documents
-====================================================
-
-To use this License in a document you have written, include a copy of
-the License in the document and put the following copyright and license
-notices just after the title page:
-
- Copyright (C) YEAR YOUR NAME.
- Permission is granted to copy, distribute and/or modify this document
- under the terms of the GNU Free Documentation License, Version 1.3
- or any later version published by the Free Software Foundation;
- with no Invariant Sections, no Front-Cover Texts, and no Back-Cover
- Texts. A copy of the license is included in the section entitled ``GNU
- Free Documentation License''.
-
- If you have Invariant Sections, Front-Cover Texts and Back-Cover
-Texts, replace the "with...Texts." line with this:
-
- with the Invariant Sections being LIST THEIR TITLES, with
- the Front-Cover Texts being LIST, and with the Back-Cover Texts
- being LIST.
-
- If you have Invariant Sections without Cover Texts, or some other
-combination of the three, merge those two alternatives to suit the
-situation.
-
- If your document contains nontrivial examples of program code, we
-recommend releasing these examples in parallel under your choice of free
-software license, such as the GNU General Public License, to permit
-their use in free software.
-
-
-File: libgomp.info, Node: Funding, Next: Library Index, Prev: GNU Free Documentation License, Up: Top
-
-Funding Free Software
-*********************
-
-If you want to have more free software a few years from now, it makes
-sense for you to help encourage people to contribute funds for its
-development. The most effective approach known is to encourage
-commercial redistributors to donate.
-
- Users of free software systems can boost the pace of development by
-encouraging for-a-fee distributors to donate part of their selling price
-to free software developers--the Free Software Foundation, and others.
-
- The way to convince distributors to do this is to demand it and
-expect it from them. So when you compare distributors, judge them
-partly by how much they give to free software development. Show
-distributors they must compete to be the one who gives the most.
-
- To make this approach work, you must insist on numbers that you can
-compare, such as, "We will donate ten dollars to the Frobnitz project
-for each disk sold." Don't be satisfied with a vague promise, such as
-"A portion of the profits are donated," since it doesn't give a basis
-for comparison.
-
- Even a precise fraction "of the profits from this disk" is not very
-meaningful, since creative accounting and unrelated business decisions
-can greatly alter what fraction of the sales price counts as profit. If
-the price you pay is $50, ten percent of the profit is probably less
-than a dollar; it might be a few cents, or nothing at all.
-
- Some redistributors do development work themselves. This is useful
-too; but to keep everyone honest, you need to inquire how much they do,
-and what kind. Some kinds of development make much more long-term
-difference than others. For example, maintaining a separate version of
-a program contributes very little; maintaining the standard version of a
-program for the whole community contributes much. Easy new ports
-contribute little, since someone else would surely do them; difficult
-ports such as adding a new CPU to the GNU Compiler Collection contribute
-more; major new features or packages contribute the most.
-
- By establishing the idea that supporting further development is "the
-proper thing to do" when distributing free software for a fee, we can
-assure a steady flow of resources into making more free software.
-
- Copyright (C) 1994 Free Software Foundation, Inc.
- Verbatim copying and redistribution of this section is permitted
- without royalty; alteration is not permitted.
-
-
-File: libgomp.info, Node: Library Index, Prev: Funding, Up: Top
-
-Library Index
-*************
-
-
-* Menu:
-
-* Environment Variable: OMP_CANCELLATION. (line 6)
-* Environment Variable <1>: OMP_DISPLAY_ENV. (line 6)
-* Environment Variable <2>: OMP_DEFAULT_DEVICE. (line 6)
-* Environment Variable <3>: OMP_DYNAMIC. (line 6)
-* Environment Variable <4>: OMP_MAX_ACTIVE_LEVELS. (line 6)
-* Environment Variable <5>: OMP_NESTED. (line 6)
-* Environment Variable <6>: OMP_NUM_THREADS. (line 6)
-* Environment Variable <7>: OMP_PLACES. (line 6)
-* Environment Variable <8>: OMP_PROC_BIND. (line 6)
-* Environment Variable <9>: OMP_SCHEDULE. (line 6)
-* Environment Variable <10>: OMP_STACKSIZE. (line 6)
-* Environment Variable <11>: OMP_THREAD_LIMIT. (line 6)
-* Environment Variable <12>: OMP_WAIT_POLICY. (line 6)
-* Environment Variable <13>: GOMP_CPU_AFFINITY. (line 6)
-* Environment Variable <14>: GOMP_STACKSIZE. (line 6)
-* Environment Variable <15>: GOMP_SPINCOUNT. (line 6)
-* FDL, GNU Free Documentation License: GNU Free Documentation License.
- (line 6)
-* Implementation specific setting: OMP_NESTED. (line 6)
-* Implementation specific setting <1>: OMP_NUM_THREADS. (line 6)
-* Implementation specific setting <2>: OMP_SCHEDULE. (line 6)
-* Implementation specific setting <3>: GOMP_STACKSIZE. (line 6)
-* Implementation specific setting <4>: GOMP_SPINCOUNT. (line 6)
-* Introduction: Top. (line 6)
-
-
-
-Tag Table:
-Node: Top1992
-Node: Enabling OpenMP3186
-Node: Runtime Library Routines3973
-Node: omp_get_active_level6987
-Node: omp_get_ancestor_thread_num7687
-Node: omp_get_cancellation8617
-Node: omp_get_default_device9431
-Node: omp_get_dynamic10107
-Node: omp_get_level10982
-Node: omp_get_max_active_levels11602
-Node: omp_get_max_threads12301
-Node: omp_get_nested13058
-Node: omp_get_num_devices13970
-Node: omp_get_num_procs14491
-Node: omp_get_num_teams15030
-Node: omp_get_num_threads15546
-Node: omp_get_proc_bind16635
-Node: omp_get_schedule17556
-Node: omp_get_team_num18502
-Node: omp_get_team_size19001
-Node: omp_get_thread_limit19961
-Node: omp_get_thread_num20580
-Node: omp_in_parallel21451
-Node: omp_in_final22100
-Node: omp_is_initial_device22774
-Node: omp_set_default_device23467
-Node: omp_set_dynamic24258
-Node: omp_set_max_active_levels25144
-Node: omp_set_nested25921
-Node: omp_set_num_threads26813
-Node: omp_set_schedule27681
-Node: omp_init_lock28732
-Node: omp_set_lock29385
-Node: omp_test_lock30240
-Node: omp_unset_lock31216
-Node: omp_destroy_lock32147
-Node: omp_init_nest_lock32824
-Node: omp_set_nest_lock33559
-Node: omp_test_nest_lock34474
-Node: omp_unset_nest_lock35501
-Node: omp_destroy_nest_lock36516
-Node: omp_get_wtick37267
-Node: omp_get_wtime37859
-Node: Environment Variables38635
-Node: OMP_CANCELLATION39977
-Node: OMP_DISPLAY_ENV40510
-Node: OMP_DEFAULT_DEVICE41213
-Node: OMP_DYNAMIC41993
-Node: OMP_MAX_ACTIVE_LEVELS42589
-Node: OMP_NESTED43228
-Node: OMP_NUM_THREADS43833
-Node: OMP_PLACES44518
-Node: OMP_PROC_BIND46695
-Node: OMP_SCHEDULE47884
-Node: OMP_STACKSIZE48577
-Node: OMP_THREAD_LIMIT49407
-Node: OMP_WAIT_POLICY50008
-Node: GOMP_CPU_AFFINITY50700
-Node: GOMP_STACKSIZE52437
-Node: GOMP_SPINCOUNT53273
-Node: The libgomp ABI54445
-Node: Implementing MASTER construct55244
-Node: Implementing CRITICAL construct55658
-Node: Implementing ATOMIC construct56397
-Node: Implementing FLUSH construct56878
-Node: Implementing BARRIER construct57149
-Node: Implementing THREADPRIVATE construct57418
-Node: Implementing PRIVATE clause58071
-Node: Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses58652
-Node: Implementing REDUCTION clause59976
-Node: Implementing PARALLEL construct60533
-Node: Implementing FOR construct61790
-Node: Implementing ORDERED construct63788
-Node: Implementing SECTIONS construct64094
-Node: Implementing SINGLE construct64860
-Node: Reporting Bugs65522
-Node: Copying65832
-Node: GNU Free Documentation License103378
-Node: Funding128500
-Node: Library Index131026
-
-End Tag Table
diff --git a/gcc-4.9/libgomp/libgomp.texi b/gcc-4.9/libgomp/libgomp.texi
index 8461c5b0c..254be57b8 100644
--- a/gcc-4.9/libgomp/libgomp.texi
+++ b/gcc-4.9/libgomp/libgomp.texi
@@ -130,10 +130,10 @@ The runtime routines described here are defined by Section 3 of the OpenMP
specification in version 4.0. The routines are structured in following
three parts:
+@menu
Control threads, processors and the parallel environment. They have C
linkage, and do not throw exceptions.
-@menu
* omp_get_active_level:: Number of active parallel regions
* omp_get_ancestor_thread_num:: Ancestor thread ID
* omp_get_cancellation:: Whether cancellation support is enabled
@@ -162,11 +162,9 @@ linkage, and do not throw exceptions.
* omp_set_nested:: Enable/disable nested parallel regions
* omp_set_num_threads:: Set upper team size limit
* omp_set_schedule:: Set the runtime scheduling method
-@end menu
Initialize, set, test, unset and destroy simple and nested locks.
-@menu
* omp_init_lock:: Initialize simple lock
* omp_set_lock:: Wait for and set simple lock
* omp_test_lock:: Test and set simple lock if available
@@ -177,11 +175,9 @@ Initialize, set, test, unset and destroy simple and nested locks.
* omp_test_nest_lock:: Test and set nested lock if available
* omp_unset_nest_lock:: Unset nested lock
* omp_destroy_nest_lock:: Destroy nested lock
-@end menu
Portable, thread-based, wall clock timer.
-@menu
* omp_get_wtick:: Get timer precision.
* omp_get_wtime:: Elapsed wall clock time.
@end menu
@@ -1448,6 +1444,33 @@ level. If undefined one thread per CPU is used.
+@node OMP_PROC_BIND
+@section @env{OMP_PROC_BIND} -- Whether theads may be moved between CPUs
+@cindex Environment Variable
+@table @asis
+@item @emph{Description}:
+Specifies whether threads may be moved between processors. If set to
+@code{TRUE}, OpenMP theads should not be moved; if set to @code{FALSE}
+they may be moved. Alternatively, a comma separated list with the
+values @code{MASTER}, @code{CLOSE} and @code{SPREAD} can be used to specify
+the thread affinity policy for the corresponding nesting level. With
+@code{MASTER} the worker threads are in the same place partition as the
+master thread. With @code{CLOSE} those are kept close to the master thread
+in contiguous place partitions. And with @code{SPREAD} a sparse distribution
+across the place partitions is used.
+
+When undefined, @env{OMP_PROC_BIND} defaults to @code{TRUE} when
+@env{OMP_PLACES} or @env{GOMP_CPU_AFFINITY} is set and @code{FALSE} otherwise.
+
+@item @emph{See also}:
+@ref{OMP_PLACES}, @ref{GOMP_CPU_AFFINITY}, @ref{omp_get_proc_bind}
+
+@item @emph{Reference}:
+@uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.4
+@end table
+
+
+
@node OMP_PLACES
@section @env{OMP_PLACES} -- Specifies on which CPUs the theads should be placed
@cindex Environment Variable
@@ -1490,29 +1513,22 @@ between CPUs following no placement policy.
-@node OMP_PROC_BIND
-@section @env{OMP_PROC_BIND} -- Whether theads may be moved between CPUs
+@node OMP_STACKSIZE
+@section @env{OMP_STACKSIZE} -- Set default thread stack size
@cindex Environment Variable
@table @asis
@item @emph{Description}:
-Specifies whether threads may be moved between processors. If set to
-@code{TRUE}, OpenMP theads should not be moved; if set to @code{FALSE}
-they may be moved. Alternatively, a comma separated list with the
-values @code{MASTER}, @code{CLOSE} and @code{SPREAD} can be used to specify
-the thread affinity policy for the corresponding nesting level. With
-@code{MASTER} the worker threads are in the same place partition as the
-master thread. With @code{CLOSE} those are kept close to the master thread
-in contiguous place partitions. And with @code{SPREAD} a sparse distribution
-across the place partitions is used.
-
-When undefined, @env{OMP_PROC_BIND} defaults to @code{TRUE} when
-@env{OMP_PLACES} or @env{GOMP_CPU_AFFINITY} is set and @code{FALSE} otherwise.
-
-@item @emph{See also}:
-@ref{OMP_PLACES}, @ref{GOMP_CPU_AFFINITY}, @ref{omp_get_proc_bind}
+Set the default thread stack size in kilobytes, unless the number
+is suffixed by @code{B}, @code{K}, @code{M} or @code{G}, in which
+case the size is, respectively, in bytes, kilobytes, megabytes
+or gigabytes. This is different from @code{pthread_attr_setstacksize}
+which gets the number of bytes as an argument. If the stack size cannot
+be set due to system constraints, an error is reported and the initial
+stack size is left unchanged. If undefined, the stack size is system
+dependent.
-@item @emph{Reference}:
-@uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.4
+@item @emph{Reference}:
+@uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.7
@end table
@@ -1538,26 +1554,6 @@ dynamic scheduling and a chunk size of 1 is used.
-@node OMP_STACKSIZE
-@section @env{OMP_STACKSIZE} -- Set default thread stack size
-@cindex Environment Variable
-@table @asis
-@item @emph{Description}:
-Set the default thread stack size in kilobytes, unless the number
-is suffixed by @code{B}, @code{K}, @code{M} or @code{G}, in which
-case the size is, respectively, in bytes, kilobytes, megabytes
-or gigabytes. This is different from @code{pthread_attr_setstacksize}
-which gets the number of bytes as an argument. If the stack size cannot
-be set due to system constraints, an error is reported and the initial
-stack size is left unchanged. If undefined, the stack size is system
-dependent.
-
-@item @emph{Reference}:
-@uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.7
-@end table
-
-
-
@node OMP_THREAD_LIMIT
@section @env{OMP_THREAD_LIMIT} -- Set the maximum number of threads
@cindex Environment Variable
diff --git a/gcc-4.9/libgomp/omp_lib.f90.in b/gcc-4.9/libgomp/omp_lib.f90.in
index dda297a1d..757053c9f 100644
--- a/gcc-4.9/libgomp/omp_lib.f90.in
+++ b/gcc-4.9/libgomp/omp_lib.f90.in
@@ -42,7 +42,7 @@
module omp_lib
use omp_lib_kinds
implicit none
- integer, parameter :: openmp_version = 201107
+ integer, parameter :: openmp_version = 201307
interface
subroutine omp_init_lock (svar)
diff --git a/gcc-4.9/libgomp/omp_lib.h.in b/gcc-4.9/libgomp/omp_lib.h.in
index 7725396ac..691adb865 100644
--- a/gcc-4.9/libgomp/omp_lib.h.in
+++ b/gcc-4.9/libgomp/omp_lib.h.in
@@ -45,7 +45,7 @@
parameter (omp_proc_bind_master = 2)
parameter (omp_proc_bind_close = 3)
parameter (omp_proc_bind_spread = 4)
- parameter (openmp_version = 201107)
+ parameter (openmp_version = 201307)
external omp_init_lock, omp_init_nest_lock
external omp_destroy_lock, omp_destroy_nest_lock
diff --git a/gcc-4.9/libgomp/testsuite/config/default.exp b/gcc-4.9/libgomp/testsuite/config/default.exp
index 197327cd3..3a7f9de72 100644
--- a/gcc-4.9/libgomp/testsuite/config/default.exp
+++ b/gcc-4.9/libgomp/testsuite/config/default.exp
@@ -15,3 +15,4 @@
# <http://www.gnu.org/licenses/>.
load_lib "standard.exp"
+load_lib "gompconfig.exp"
diff --git a/gcc-4.9/libgomp/testsuite/gompconfig.exp.in b/gcc-4.9/libgomp/testsuite/gompconfig.exp.in
new file mode 100644
index 000000000..5a5b7ccf6
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/gompconfig.exp.in
@@ -0,0 +1,2 @@
+global GCC_UNDER_TEST
+set GCC_UNDER_TEST "@CC@"
diff --git a/gcc-4.9/libgomp/testsuite/lib/libgomp.exp b/gcc-4.9/libgomp/testsuite/lib/libgomp.exp
index c965147e4..71775d342 100644
--- a/gcc-4.9/libgomp/testsuite/lib/libgomp.exp
+++ b/gcc-4.9/libgomp/testsuite/lib/libgomp.exp
@@ -114,10 +114,9 @@ proc libgomp_init { args } {
append always_ld_library_path ":${gccdir}/pthread"
}
append always_ld_library_path ":${gccdir}"
- set compiler [lindex $GCC_UNDER_TEST 0]
- if { [is_remote host] == 0 && [which $compiler] != 0 } {
- foreach i "[exec $compiler --print-multi-lib]" {
+ if { [is_remote host] == 0 } {
+ foreach i "[eval "exec $GCC_UNDER_TEST --print-multi-lib"]" {
set mldir ""
regexp -- "\[a-z0-9=_/\.-\]*;" $i mldir
set mldir [string trimright $mldir "\;@"]
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c++/for-10.C b/gcc-4.9/libgomp/testsuite/libgomp.c++/for-10.C
index fb1a3e952..c67096ac9 100644
--- a/gcc-4.9/libgomp/testsuite/libgomp.c++/for-10.C
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c++/for-10.C
@@ -19,11 +19,14 @@ extern "C" void abort ();
#undef F
#undef G
+#undef SC
+#define SC static
#define F for simd
#define G f_simd
#include "../libgomp.c/for-1.h"
#undef F
#undef G
+#undef SC
int
main ()
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c++/simd-9.C b/gcc-4.9/libgomp/testsuite/libgomp.c++/simd-9.C
new file mode 100644
index 000000000..3c567b31c
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c++/simd-9.C
@@ -0,0 +1,52 @@
+// { dg-do run }
+// { dg-options "-O2" }
+// { dg-additional-options "-msse2" { target sse2_runtime } }
+// { dg-additional-options "-mavx" { target avx_runtime } }
+
+extern "C" void abort ();
+int a[1024] __attribute__((aligned (32))) = { 1 };
+#pragma omp declare reduction (foo:int:omp_out += omp_in) \
+ initializer (omp_priv = 0)
+
+__attribute__((noinline, noclone)) void
+foo (int &u, int &v)
+{
+ int i;
+ #pragma omp simd aligned(a : 32) reduction(foo:u) reduction(+:v)
+ for (i = 0; i < 1024; i++)
+ {
+ int x = a[i];
+ u += x;
+ v += x;
+ }
+}
+
+__attribute__((noinline, noclone)) void
+bar (int &u, int &v)
+{
+ int i;
+ #pragma omp simd aligned(a : 32) reduction(foo:u) reduction(+:v) \
+ safelen(1)
+ for (i = 0; i < 1024; i++)
+ {
+ int x = a[i];
+ u += x;
+ v += x;
+ }
+}
+
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 1024; i++)
+ a[i] = (i & 31) + (i / 128);
+ int u = 0, v = 0;
+ foo (u, v);
+ if (u != 19456 || v != 19456)
+ abort ();
+ u = 0; v = 0;
+ bar (u, v);
+ if (u != 19456 || v != 19456)
+ abort ();
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c++/simd10.C b/gcc-4.9/libgomp/testsuite/libgomp.c++/simd10.C
new file mode 100644
index 000000000..390e65ffe
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c++/simd10.C
@@ -0,0 +1,6 @@
+// { dg-do run }
+// { dg-options "-O2" }
+// { dg-additional-options "-msse2" { target sse2_runtime } }
+// { dg-additional-options "-mavx" { target avx_runtime } }
+
+#include "../libgomp.c/simd-14.c"
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c++/simd11.C b/gcc-4.9/libgomp/testsuite/libgomp.c++/simd11.C
new file mode 100644
index 000000000..b96686856
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c++/simd11.C
@@ -0,0 +1,6 @@
+// { dg-do run }
+// { dg-options "-O2" }
+// { dg-additional-options "-msse2" { target sse2_runtime } }
+// { dg-additional-options "-mavx" { target avx_runtime } }
+
+#include "../libgomp.c/simd-15.c"
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c++/simd12.C b/gcc-4.9/libgomp/testsuite/libgomp.c++/simd12.C
new file mode 100644
index 000000000..ecfc912ae
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c++/simd12.C
@@ -0,0 +1,6 @@
+// { dg-do run }
+// { dg-options "-O2" }
+// { dg-additional-options "-msse2" { target sse2_runtime } }
+// { dg-additional-options "-mavx" { target avx_runtime } }
+
+#include "../libgomp.c/simd-16.c"
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c++/simd13.C b/gcc-4.9/libgomp/testsuite/libgomp.c++/simd13.C
new file mode 100644
index 000000000..f74962096
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c++/simd13.C
@@ -0,0 +1,6 @@
+// { dg-do run }
+// { dg-options "-O2" }
+// { dg-additional-options "-msse2" { target sse2_runtime } }
+// { dg-additional-options "-mavx" { target avx_runtime } }
+
+#include "../libgomp.c/simd-17.c"
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/for-2.c b/gcc-4.9/libgomp/testsuite/libgomp.c/for-2.c
index f5a01ab05..ae8100874 100644
--- a/gcc-4.9/libgomp/testsuite/libgomp.c/for-2.c
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/for-2.c
@@ -21,11 +21,14 @@ extern void abort (void);
#undef F
#undef G
+#undef SC
+#define SC static
#define F for simd
#define G f_simd
#include "for-1.h"
#undef F
#undef G
+#undef SC
int
main ()
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/for-2.h b/gcc-4.9/libgomp/testsuite/libgomp.c/for-2.h
index 57c385ec8..920d23b52 100644
--- a/gcc-4.9/libgomp/testsuite/libgomp.c/for-2.h
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/for-2.h
@@ -8,6 +8,9 @@ noreturn (void)
for (;;);
}
#endif
+#ifndef SC
+#define SC
+#endif
__attribute__((noinline, noclone)) void
N(f0) (void)
@@ -57,7 +60,7 @@ __attribute__((noinline, noclone)) void
N(f5) (int n11, int n12, int n21, int n22, int n31, int n32,
int s1, int s2, int s3)
{
- int v1, v2, v3;
+ SC int v1, v2, v3;
#pragma omp F S collapse(3)
for (v1 = n11; v1 < n12; v1 += s1)
for (v2 = n21; v2 < n22; v2 += s2)
@@ -69,8 +72,8 @@ __attribute__((noinline, noclone)) void
N(f6) (int n11, int n12, int n21, int n22, long long n31, long long n32,
int s1, int s2, long long int s3)
{
- int v1, v2;
- long long v3;
+ SC int v1, v2;
+ SC long long v3;
#pragma omp F S collapse(3)
for (v1 = n11; v1 > n12; v1 += s1)
for (v2 = n21; v2 > n22; v2 += s2)
@@ -81,8 +84,8 @@ N(f6) (int n11, int n12, int n21, int n22, long long n31, long long n32,
__attribute__((noinline, noclone)) void
N(f7) (void)
{
- unsigned int v1, v3;
- unsigned long long v2;
+ SC unsigned int v1, v3;
+ SC unsigned long long v2;
#pragma omp F S collapse(3)
for (v1 = 0; v1 < 20; v1 += 2)
for (v2 = __LONG_LONG_MAX__ + 16ULL;
@@ -94,7 +97,7 @@ N(f7) (void)
__attribute__((noinline, noclone)) void
N(f8) (void)
{
- long long v1, v2, v3;
+ SC long long v1, v2, v3;
#pragma omp F S collapse(3)
for (v1 = 0; v1 < 20; v1 += 2)
for (v2 = 30; v2 < 20; v2++)
@@ -118,7 +121,7 @@ N(f9) (void)
__attribute__((noinline, noclone)) void
N(f10) (void)
{
- int i;
+ SC int i;
#pragma omp F S collapse(3)
for (i = 0; i < 10; i++)
for (int j = 10; j < 8; j++)
@@ -146,7 +149,7 @@ N(f11) (int n)
__attribute__((noinline, noclone)) void
N(f12) (int n)
{
- int i;
+ SC int i;
#pragma omp F S collapse(3)
for (i = 0; i < 10; i++)
for (int j = n; j < 8; j++)
@@ -170,7 +173,7 @@ N(f13) (void)
__attribute__((noinline, noclone)) void
N(f14) (void)
{
- float *i;
+ SC float *i;
#pragma omp F S collapse(3)
for (i = &b[0][0][0]; i < &b[0][0][10]; i++)
for (float *j = &b[0][15][0]; j > &b[0][0][0]; j -= 10)
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/loop-16.c b/gcc-4.9/libgomp/testsuite/libgomp.c/loop-16.c
new file mode 100644
index 000000000..3ef2f6489
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/loop-16.c
@@ -0,0 +1,27 @@
+/* { dg-do run } */
+
+extern void abort (void);
+
+volatile int count;
+static int test (void)
+{
+ return ++count > 0;
+}
+
+int i;
+
+int
+main ()
+{
+ #pragma omp for lastprivate (i)
+ for (i = 0; i < 10; ++i)
+ {
+ int *p = &i;
+ if (test ())
+ continue;
+ abort ();
+ }
+ if (i != count)
+ abort ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-10.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-10.c
new file mode 100644
index 000000000..70cd9f015
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-10.c
@@ -0,0 +1,26 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+int s = 0, i, u;
+
+void
+foo ()
+{
+ #pragma omp for simd schedule(static, 32) reduction(+:s) lastprivate(u)
+ for (i = 0; i < 128; i++)
+ {
+ s++;
+ u = i;
+ }
+ if (i != 128 || s != 128 || u != 127)
+ __builtin_abort ();
+}
+
+int
+main ()
+{
+ foo ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-11.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-11.c
new file mode 100644
index 000000000..b09f0dde8
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-11.c
@@ -0,0 +1,27 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+int s = 0, i, j, u;
+
+void
+foo ()
+{
+ #pragma omp for simd schedule(static, 32) reduction(+:s) lastprivate(u) collapse(2)
+ for (i = 0; i < 16; i++)
+ for (j = 0; j < 16; j++)
+ {
+ s++;
+ u = i + j;
+ }
+ if (i != 16 || j != 16 || s != 256 || u != 30)
+ __builtin_abort ();
+}
+
+int
+main ()
+{
+ foo ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-12.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-12.c
new file mode 100644
index 000000000..6685111a0
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-12.c
@@ -0,0 +1,19 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+int
+main ()
+{
+ int k = 0, i, s = 0;
+ #pragma omp parallel
+ #pragma omp for simd linear(k : 3) reduction(+: s) schedule (static, 16)
+ for (i = 0; i < 128; i++)
+ {
+ k = k + 3;
+ s = s + k;
+ }
+ if (s != 128 * 129 / 2 * 3) __builtin_abort ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-13.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-13.c
new file mode 100644
index 000000000..7c817b793
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-13.c
@@ -0,0 +1,18 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+int
+main ()
+{
+ int k = 0, i, s = 0;
+ #pragma omp parallel for simd linear(k : 3) reduction(+: s) schedule (static, 16)
+ for (i = 0; i < 128; i++)
+ {
+ k = k + 3;
+ s = s + k;
+ }
+ if (s != 128 * 129 / 2 * 3) __builtin_abort ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-14.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-14.c
new file mode 100644
index 000000000..50e8d5e90
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-14.c
@@ -0,0 +1,123 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+int
+main ()
+{
+ int i, j, b, c = 0;
+ i = 4; j = 4; b = 7;
+ #pragma omp simd linear(b:2) reduction(+:c)
+ for (i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ b = b + 2;
+ }
+ if (c || i != 64 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp simd linear(b:3) reduction(+:c)
+ for (i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ b = b + 3;
+ }
+ if (c || i != 64 || b != 7 + 16 * 3)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp simd linear(i) linear(b:2) reduction(+:c)
+ for (i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ b = b + 2;
+ }
+ if (c || i != 64 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp simd linear(i:4) linear(b:3) reduction(+:c)
+ for (i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ b = b + 3;
+ }
+ if (c || i != 64 || b != 7 + 16 * 3)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp simd collapse (2) linear(b:2) reduction(+:c)
+ for (i = 0; i < 8; i++)
+ for (j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ b = b + 2;
+ }
+ if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp simd collapse (2) lastprivate (i, j) linear(b:2) reduction(+:c)
+ for (i = 0; i < 8; i++)
+ for (j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ b = b + 2;
+ }
+ if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)
+ for (i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ b = b + 2;
+ }
+ if (c || i != 64 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)
+ for (i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ b = b + 3;
+ }
+ if (c || i != 64 || b != 7 + 16 * 3)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c)
+ for (i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ b = b + 2;
+ }
+ if (c || i != 64 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c)
+ for (i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ b = b + 3;
+ }
+ if (c || i != 64 || b != 7 + 16 * 3)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd lastprivate (i, j) collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)
+ for (i = 0; i < 8; i++)
+ for (j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ b = b + 2;
+ }
+ if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)
+ for (i = 0; i < 8; i++)
+ for (j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ b = b + 2;
+ }
+ if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-15.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-15.c
new file mode 100644
index 000000000..e474b81fa
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-15.c
@@ -0,0 +1,129 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+static inline void
+foo (int *b, int *i, int *j, int x)
+{
+ *b = *b + x + (*i - *i) + (*j - *j);
+}
+
+int
+main ()
+{
+ int i, j, b, c = 0;
+ i = 4; j = 4; b = 7;
+ #pragma omp simd linear(b:2) reduction(+:c)
+ for (i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ foo (&b, &i, &j, 2);
+ }
+ if (c || i != 64 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp simd linear(b:3) reduction(+:c)
+ for (i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ foo (&b, &i, &j, 3);
+ }
+ if (c || i != 64 || b != 7 + 16 * 3)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp simd linear(i) linear(b:2) reduction(+:c)
+ for (i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ foo (&b, &i, &j, 2);
+ }
+ if (c || i != 64 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp simd linear(i:4) linear(b:3) reduction(+:c)
+ for (i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ foo (&b, &i, &j, 3);
+ }
+ if (c || i != 64 || b != 7 + 16 * 3)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp simd collapse (2) linear(b:2) reduction(+:c)
+ for (i = 0; i < 8; i++)
+ for (j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ foo (&b, &i, &j, 2);
+ }
+ if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp simd collapse (2) lastprivate (i, j) linear(b:2) reduction(+:c)
+ for (i = 0; i < 8; i++)
+ for (j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ foo (&b, &i, &j, 2);
+ }
+ if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)
+ for (i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ foo (&b, &i, &j, 2);
+ }
+ if (c || i != 64 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)
+ for (i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ foo (&b, &i, &j, 3);
+ }
+ if (c || i != 64 || b != 7 + 16 * 3)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c)
+ for (i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ foo (&b, &i, &j, 2);
+ }
+ if (c || i != 64 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c)
+ for (i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ foo (&b, &i, &j, 3);
+ }
+ if (c || i != 64 || b != 7 + 16 * 3)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd lastprivate (i, j) collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)
+ for (i = 0; i < 8; i++)
+ for (j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ foo (&b, &i, &j, 2);
+ }
+ if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ i = 4; j = 4; b = 7;
+ #pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)
+ for (i = 0; i < 8; i++)
+ for (j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ foo (&b, &i, &j, 2);
+ }
+ if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-16.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-16.c
new file mode 100644
index 000000000..c8c29c744
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-16.c
@@ -0,0 +1,67 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -std=c99" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+int
+main ()
+{
+ int b, c = 0;
+ b = 7;
+ #pragma omp simd linear(b:2) reduction(+:c)
+ for (int i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ b = b + 2;
+ }
+ if (c || b != 7 + 64 * 2)
+ __builtin_abort ();
+ b = 7;
+ #pragma omp simd linear(b:3) reduction(+:c)
+ for (int i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ b = b + 3;
+ }
+ if (c || b != 7 + 16 * 3)
+ __builtin_abort ();
+ b = 7;
+ #pragma omp simd collapse (2) linear(b:2) reduction(+:c)
+ for (int i = 0; i < 8; i++)
+ for (int j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ b = b + 2;
+ }
+ if (c || b != 7 + 64 * 2)
+ __builtin_abort ();
+ b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)
+ for (int i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ b = b + 2;
+ }
+ if (c || b != 7 + 64 * 2)
+ __builtin_abort ();
+ b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)
+ for (int i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ b = b + 3;
+ }
+ if (c || b != 7 + 16 * 3)
+ __builtin_abort ();
+ b = 7;
+ #pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)
+ for (int i = 0; i < 8; i++)
+ for (int j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ b = b + 2;
+ }
+ if (c || b != 7 + 64 * 2)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-17.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-17.c
new file mode 100644
index 000000000..136e6e64a
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-17.c
@@ -0,0 +1,73 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -std=c99" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+static inline void
+foo (int *b, int *i, int *j, int x)
+{
+ *b = *b + x + (*i - *i) + (*j - *j);
+}
+
+int
+main ()
+{
+ int b, c = 0;
+ b = 7;
+ #pragma omp simd linear(b:2) reduction(+:c)
+ for (int i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ foo (&b, &i, &i, 2);
+ }
+ if (c || b != 7 + 64 * 2)
+ __builtin_abort ();
+ b = 7;
+ #pragma omp simd linear(b:3) reduction(+:c)
+ for (int i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ foo (&b, &i, &i, 3);
+ }
+ if (c || b != 7 + 16 * 3)
+ __builtin_abort ();
+ b = 7;
+ #pragma omp simd collapse (2) linear(b:2) reduction(+:c)
+ for (int i = 0; i < 8; i++)
+ for (int j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ foo (&b, &i, &j, 2);
+ }
+ if (c || b != 7 + 64 * 2)
+ __builtin_abort ();
+ b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)
+ for (int i = 0; i < 64; i++)
+ {
+ c = c + (b != 7 + 2 * i);
+ foo (&b, &i, &i, 2);
+ }
+ if (c || b != 7 + 64 * 2)
+ __builtin_abort ();
+ b = 7;
+ #pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)
+ for (int i = 0; i < 64; i += 4)
+ {
+ c = c + (b != 7 + i / 4 * 3);
+ foo (&b, &i, &i, 3);
+ }
+ if (c || b != 7 + 16 * 3)
+ __builtin_abort ();
+ b = 7;
+ #pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)
+ for (int i = 0; i < 8; i++)
+ for (int j = 0; j < 8; j++)
+ {
+ c = c + (b != 7 + 2 * j + 2 * 8 * i);
+ foo (&b, &i, &j, 2);
+ }
+ if (c || b != 7 + 64 * 2)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-7.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-7.c
new file mode 100644
index 000000000..ab04fee82
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-7.c
@@ -0,0 +1,96 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+extern void abort ();
+int a[1024] __attribute__((aligned (32))) = { 1 };
+int b[1024] __attribute__((aligned (32))) = { 1 };
+int k, m;
+struct U { int u; };
+struct V { int v; };
+
+__attribute__((noinline, noclone)) int
+foo (int *p)
+{
+ int i, s = 0;
+ struct U u;
+ struct V v;
+ #pragma omp simd aligned(a, p : 32) linear(k: m + 1) \
+ linear(i) reduction(+:s) lastprivate(u, v)
+ for (i = 0; i < 1024; i++)
+ {
+ int *q = &i;
+ a[i] *= p[i];
+ u.u = p[i] + k;
+ k += m + 1;
+ v.v = p[i] + k;
+ s += p[i] + k;
+ }
+ if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024 || i != 1024)
+ abort ();
+ return s;
+}
+
+__attribute__((noinline, noclone)) int
+bar (int *p)
+{
+ int i, s = 0;
+ struct U u;
+ struct V v;
+ #pragma omp simd aligned(a, p : 32) linear(k: m + 1) \
+ reduction(+:s) lastprivate(u, v)
+ for (i = 0; i < 1024; i++)
+ {
+ int *q = &i;
+ a[i] *= p[i];
+ u.u = p[i] + k;
+ k += m + 1;
+ v.v = p[i] + k;
+ s += p[i] + k;
+ }
+ if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024 || i != 1024)
+ abort ();
+ return s;
+}
+
+int
+main ()
+{
+#if __SIZEOF_INT__ >= 4
+ int i;
+ k = 4;
+ m = 2;
+ for (i = 0; i < 1024; i++)
+ {
+ a[i] = i - 512;
+ b[i] = (i - 51) % 39;
+ }
+ int s = foo (b);
+ for (i = 0; i < 1024; i++)
+ {
+ if (b[i] != (i - 51) % 39
+ || a[i] != (i - 512) * b[i])
+ abort ();
+ }
+ if (k != 4 + 3 * 1024 || s != 1596127)
+ abort ();
+ k = 4;
+ m = 2;
+ for (i = 0; i < 1024; i++)
+ {
+ a[i] = i - 512;
+ b[i] = (i - 51) % 39;
+ }
+ s = bar (b);
+ for (i = 0; i < 1024; i++)
+ {
+ if (b[i] != (i - 51) % 39
+ || a[i] != (i - 512) * b[i])
+ abort ();
+ }
+ if (k != 4 + 3 * 1024 || s != 1596127)
+ abort ();
+#endif
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-8.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-8.c
new file mode 100644
index 000000000..13f40d583
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-8.c
@@ -0,0 +1,44 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+extern void abort ();
+int a[32][32] __attribute__((aligned (32))) = { { 1 } };
+struct S { int s; };
+#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s)
+#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s)
+#pragma omp declare reduction (foo:int:omp_out += omp_in)
+
+__attribute__((noinline, noclone)) int
+foo (void)
+{
+ int i, j, u = 0;
+ struct S s, t;
+ s.s = 0; t.s = 0;
+ #pragma omp simd aligned(a : 32) reduction(+:s) reduction(foo:t, u) collapse(2)
+ for (i = 0; i < 32; i++)
+ for (j = 0; j < 32; j++)
+ {
+ int x = a[i][j];
+ s.s += x;
+ t.s += x;
+ u += x;
+ }
+ if (t.s != s.s || u != s.s)
+ abort ();
+ return s.s;
+}
+
+int
+main ()
+{
+ int i, j;
+ for (i = 0; i < 32; i++)
+ for (j = 0; j < 32; j++)
+ a[i][j] = j + (i / 4);
+ int s = foo ();
+ if (s != 19456)
+ abort ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/simd-9.c b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-9.c
new file mode 100644
index 000000000..b64dd2522
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/simd-9.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-msse2" { target sse2_runtime } } */
+/* { dg-additional-options "-mavx" { target avx_runtime } } */
+
+extern void abort ();
+int a[32][32] __attribute__((aligned (32))) = { { 1 } };
+struct S { int s; };
+#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s)
+#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s)
+#pragma omp declare reduction (foo:int:omp_out += omp_in)
+
+__attribute__((noinline, noclone)) int
+foo (void)
+{
+ int i, j, u = 0;
+ struct S s, t;
+ s.s = 0; t.s = 0;
+ #pragma omp simd aligned(a : 32) lastprivate (i, j) reduction(+:s) reduction(foo:t, u) collapse(2)
+ for (i = 0; i < 32; i++)
+ for (j = 0; j < 32; j++)
+ {
+ int *q = &i;
+ int *r = &j;
+ int x = a[i][j];
+ s.s += x;
+ t.s += x;
+ u += x;
+ }
+ if (t.s != s.s || u != s.s || i != 32 || j != 32)
+ abort ();
+ return s.s;
+}
+
+__attribute__((noinline, noclone)) int
+bar (void)
+{
+ int i, j, u = 0;
+ struct S s, t;
+ s.s = 0; t.s = 0;
+ #pragma omp simd aligned(a:32)reduction(+:s)reduction(foo:t,u)collapse(2)
+ for (i = 0; i < 32; i++)
+ for (j = 0; j < 32; j++)
+ {
+ int *q = &i;
+ int *r = &j;
+ int x = a[i][j];
+ s.s += x;
+ t.s += x;
+ u += x;
+ }
+ if (t.s != s.s || u != s.s || i != 32 || j != 32)
+ abort ();
+ return s.s;
+}
+
+int
+main ()
+{
+ int i, j;
+ for (i = 0; i < 32; i++)
+ for (j = 0; j < 32; j++)
+ a[i][j] = j + (i / 4);
+ int s = foo ();
+ if (s != 19456)
+ abort ();
+ if (bar () != 19456)
+ abort ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.c/target-8.c b/gcc-4.9/libgomp/testsuite/libgomp.c/target-8.c
new file mode 100644
index 000000000..350845753
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.c/target-8.c
@@ -0,0 +1,26 @@
+/* { dg-do run } */
+/* { dg-options "-fopenmp" } */
+
+void
+foo (int *p)
+{
+ int i;
+ #pragma omp parallel
+ #pragma omp single
+ #pragma omp target teams distribute parallel for map(p[0:24])
+ for (i = 0; i < 24; i++)
+ p[i] = p[i] + 1;
+}
+
+int
+main ()
+{
+ int p[24], i;
+ for (i = 0; i < 24; i++)
+ p[i] = i;
+ foo (p);
+ for (i = 0; i < 24; i++)
+ if (p[i] != i + 1)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/aligned1.f03 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/aligned1.f03
new file mode 100644
index 000000000..67a9ab404
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/aligned1.f03
@@ -0,0 +1,133 @@
+! { dg-do run }
+! { dg-options "-fopenmp -fcray-pointer" }
+
+ use iso_c_binding, only : c_ptr, c_ptrdiff_t, c_loc
+ interface
+ subroutine foo (x, y, z, w)
+ use iso_c_binding, only : c_ptr
+ real, pointer :: x(:), y(:), w(:)
+ type(c_ptr) :: z
+ end subroutine
+ subroutine bar (x, y, z, w)
+ use iso_c_binding, only : c_ptr
+ real, pointer :: x(:), y(:), w(:)
+ type(c_ptr) :: z
+ end subroutine
+ subroutine baz (x, c)
+ real, pointer :: x(:)
+ real, allocatable :: c(:)
+ end subroutine
+ end interface
+ type dt
+ real, allocatable :: a(:)
+ end type
+ type (dt) :: b(64)
+ real, target :: a(4096+63)
+ real, pointer :: p(:), q(:), r(:), s(:)
+ real, allocatable :: c(:)
+ integer(c_ptrdiff_t) :: o
+ integer :: i
+ o = 64 - mod (loc (a), 64)
+ if (o == 64) o = 0
+ o = o / sizeof(0.0)
+ p => a(o + 1:o + 1024)
+ q => a(o + 1025:o + 2048)
+ r => a(o + 2049:o + 3072)
+ s => a(o + 3073:o + 4096)
+ do i = 1, 1024
+ p(i) = i
+ q(i) = i
+ r(i) = i
+ s(i) = i
+ end do
+ call foo (p, q, c_loc (r(1)), s)
+ do i = 1, 1024
+ if (p(i) /= i * i + 3 * i + 2) call abort
+ p(i) = i
+ end do
+ call bar (p, q, c_loc (r(1)), s)
+ do i = 1, 1024
+ if (p(i) /= i * i + 3 * i + 2) call abort
+ end do
+ ! Attempt to create 64-byte aligned allocatable
+ do i = 1, 64
+ allocate (c(1023 + i))
+ if (iand (loc (c(1)), 63) == 0) exit
+ deallocate (c)
+ allocate (b(i)%a(1023 + i))
+ allocate (c(1023 + i))
+ if (iand (loc (c(1)), 63) == 0) exit
+ deallocate (c)
+ end do
+ if (allocated (c)) then
+ do i = 1, 1024
+ c(i) = 2 * i
+ end do
+ call baz (p, c)
+ do i = 1, 1024
+ if (p(i) /= i * i + 5 * i + 2) call abort
+ end do
+ end if
+end
+subroutine foo (x, y, z, w)
+ use iso_c_binding, only : c_ptr, c_f_pointer
+ real, pointer :: x(:), y(:), w(:), p(:)
+ type(c_ptr) :: z
+ integer :: i
+ real :: pt(1024)
+ pointer (ip, pt)
+ ip = loc (w)
+!$omp simd aligned (x, y : 64)
+ do i = 1, 1024
+ x(i) = x(i) * y(i) + 2.0
+ end do
+!$omp simd aligned (x, z : 64) private (p)
+ do i = 1, 1024
+ call c_f_pointer (z, p, shape=[1024])
+ x(i) = x(i) + p(i)
+ end do
+!$omp simd aligned (x, ip : 64)
+ do i = 1, 1024
+ x(i) = x(i) + 2 * pt(i)
+ end do
+!$omp end simd
+end subroutine
+subroutine bar (x, y, z, w)
+ use iso_c_binding, only : c_ptr, c_f_pointer
+ real, pointer :: x(:), y(:), w(:), a(:), b(:)
+ type(c_ptr) :: z, c
+ integer :: i
+ real :: pt(1024)
+ pointer (ip, pt)
+ ip = loc (w)
+ a => x
+ b => y
+ c = z
+!$omp simd aligned (a, b : 64)
+ do i = 1, 1024
+ a(i) = a(i) * b(i) + 2.0
+ end do
+!$omp simd aligned (a, c : 64)
+ do i = 1, 1024
+ block
+ real, pointer :: p(:)
+ call c_f_pointer (c, p, shape=[1024])
+ a(i) = a(i) + p(i)
+ end block
+ end do
+!$omp simd aligned (a, ip : 64)
+ do i = 1, 1024
+ a(i) = a(i) + 2 * pt(i)
+ end do
+!$omp end simd
+end subroutine
+subroutine baz (x, c)
+ real, pointer :: x(:)
+ real, allocatable :: c(:)
+ integer :: i
+!$omp simd aligned (x, c : 64)
+ do i = 1, 1024
+ x(i) = x(i) + c(i)
+ end do
+!$omp end simd
+end subroutine baz
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-1.f90
new file mode 100644
index 000000000..2a2a12ec8
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-1.f90
@@ -0,0 +1,328 @@
+! { dg-do run }
+! Don't cycle by default through all options, just test -O0 and -O2,
+! as this is quite large test.
+! { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O0" "-O2" } }
+
+module m
+ type dl
+ integer :: a, b
+ integer, allocatable :: c(:,:)
+ integer :: d, e
+ integer, allocatable :: f
+ end type
+ type dt
+ integer :: g
+ type (dl), allocatable :: h(:)
+ integer :: i
+ type (dl) :: j(2, 2)
+ type (dl), allocatable :: k
+ end type
+contains
+ subroutine ver_dl (obj, val, c, cl1, cu1, cl2, cu2, f)
+ type (dl), intent (in) :: obj
+ integer, intent (in) :: val, cl1, cu1, cl2, cu2
+ logical, intent (in) :: c, f
+ if ((c .neqv. allocated (obj%c)) .or. (f .neqv. allocated (obj%f))) call abort
+ if (c) then
+ if (lbound (obj%c, 1) /= cl1 .or. ubound (obj%c, 1) /= cu1) call abort
+ if (lbound (obj%c, 2) /= cl2 .or. ubound (obj%c, 2) /= cu2) call abort
+ end if
+ if (val /= 0) then
+ if (obj%a /= val .or. obj%b /= val) call abort
+ if (obj%d /= val .or. obj%e /= val) call abort
+ if (c) then
+ if (any (obj%c /= val)) call abort
+ end if
+ if (f) then
+ if (obj%f /= val) call abort
+ end if
+ end if
+ end subroutine ver_dl
+ subroutine ver_dt (obj, val, h, hl, hu, k, c, cl1, cu1, cl2, cu2, f)
+ type (dt), intent (in) :: obj
+ integer, intent (in) :: val, hl, hu, cl1, cu1, cl2, cu2
+ logical, intent (in) :: h, k, c, f
+ integer :: i, j
+ if ((h .neqv. allocated (obj%h)) .or. (k .neqv. allocated (obj%k))) call abort
+ if (h) then
+ if (lbound (obj%h, 1) /= hl .or. ubound (obj%h, 1) /= hu) call abort
+ do i = hl, hu
+ call ver_dl (obj%h(i), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end if
+ do i = 1, 2
+ do j = 1, 2
+ call ver_dl (obj%j(i, j), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end do
+ if (k) call ver_dl (obj%k, val, c, cl1, cu1, cl2, cu2, f)
+ if (val /= 0) then
+ if (obj%g /= val .or. obj%i /= val) call abort
+ end if
+ end subroutine ver_dt
+ subroutine alloc_dl (obj, val, c, cl1, cu1, cl2, cu2, f)
+ type (dl), intent (inout) :: obj
+ integer, intent (in) :: val, cl1, cu1, cl2, cu2
+ logical, intent (in) :: c, f
+ if (val /= 0) then
+ obj%a = val
+ obj%b = val
+ obj%d = val
+ obj%e = val
+ end if
+ if (allocated (obj%c)) deallocate (obj%c)
+ if (c) then
+ allocate (obj%c(cl1:cu1, cl2:cu2))
+ if (val /= 0) obj%c = val
+ end if
+ if (f) then
+ if (.not.allocated (obj%f)) allocate (obj%f)
+ if (val /= 0) obj%f = val
+ else
+ if (allocated (obj%f)) deallocate (obj%f)
+ end if
+ end subroutine alloc_dl
+ subroutine alloc_dt (obj, val, h, hl, hu, k, c, cl1, cu1, cl2, cu2, f)
+ type (dt), intent (inout) :: obj
+ integer, intent (in) :: val, hl, hu, cl1, cu1, cl2, cu2
+ logical, intent (in) :: h, k, c, f
+ integer :: i, j
+ if (val /= 0) then
+ obj%g = val
+ obj%i = val
+ end if
+ if (allocated (obj%h)) deallocate (obj%h)
+ if (h) then
+ allocate (obj%h(hl:hu))
+ do i = hl, hu
+ call alloc_dl (obj%h(i), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end if
+ do i = 1, 2
+ do j = 1, 2
+ call alloc_dl (obj%j(i, j), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end do
+ if (k) then
+ if (.not.allocated (obj%k)) allocate (obj%k)
+ call alloc_dl (obj%k, val, c, cl1, cu1, cl2, cu2, f)
+ else
+ if (allocated (obj%k)) deallocate (obj%k)
+ end if
+ end subroutine alloc_dt
+end module m
+ use m
+ type (dt) :: y
+ call foo (y)
+contains
+ subroutine foo (y)
+ use m
+ type (dt) :: x, y, z(-3:-3,2:3)
+ logical, parameter :: F = .false.
+ logical, parameter :: T = .true.
+ logical :: l
+ call ver_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+!$omp parallel private (x, y, z)
+ call ver_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y, 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (y, 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,3), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+!$omp end parallel
+ call ver_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y, 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+!$omp parallel private (x, y, z)
+ call ver_dt (x, 0, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y, 0, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ deallocate (x%h, x%k)
+ deallocate (y%h)
+ allocate (y%k)
+ call ver_dt (z(-3,2), 0, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 0, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ deallocate (z(-3,2)%h, z(-3,2)%k)
+ deallocate (z(-3,3)%h)
+ allocate (z(-3,3)%k)
+!$omp end parallel
+ call alloc_dt (x, 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (y, 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (z(-3,2), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (z(-3,3), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+!$omp parallel firstprivate (x, y, z)
+ call ver_dt (x, 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y, 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (y, 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+ call ver_dt (y, 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,2), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (z(-3,3), 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,3), 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+!$omp end parallel
+ call ver_dt (x, 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (x, 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y, 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (y, 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,2), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (z(-3,2), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (z(-3,3), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+!$omp parallel firstprivate (x, y, z)
+ call ver_dt (x, 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y, 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y, 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+ call ver_dt (y, 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+ call ver_dt (z(-3,2), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+ call ver_dt (z(-3,3), 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+!$omp end parallel
+ call ver_dt (x, 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y, 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y, 18, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ call ver_dt (z(-3,2), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 18, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ l = F
+!$omp parallel sections lastprivate (x, y, z) firstprivate (l)
+!$omp section
+ if (l) then
+ call ver_dt (x, 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y, 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ else
+ call ver_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y, 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ end if
+ l = T
+ call alloc_dt (x, 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (x, 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call alloc_dt (y, 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call ver_dt (y, 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call alloc_dt (z(-3,2), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (z(-3,2), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call alloc_dt (z(-3,3), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call ver_dt (z(-3,3), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+!$omp section
+ if (l) then
+ call ver_dt (x, 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (y, 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call ver_dt (z(-3,2), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (z(-3,3), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ else
+ call ver_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y, 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ end if
+ l = T
+ call alloc_dt (x, 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (x, 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call alloc_dt (y, 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (y, 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call alloc_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call alloc_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+!$omp section
+!$omp end parallel sections
+ call ver_dt (x, 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y, 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+!$omp parallel sections lastprivate (x, y, z) firstprivate (l)
+!$omp section
+ if (l) then
+ call ver_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ else
+ call ver_dt (x, 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y, 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ end if
+ l = T
+ call alloc_dt (x, 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (x, 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (y, 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (y, 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call alloc_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+!$omp section
+ if (l) then
+ call ver_dt (x, 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (y, 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ else
+ call ver_dt (x, 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y, 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ end if
+ l = T
+ call alloc_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call alloc_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call alloc_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call alloc_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp section
+!$omp end parallel sections
+ call ver_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp parallel private (x, y, z)
+ call ver_dt (x, 0, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y, 0, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 0, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 0, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp single
+ call alloc_dt (x, 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (y, 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call alloc_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+!$omp end single copyprivate (x, y, z)
+ call ver_dt (x, 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (y, 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+!$omp end parallel
+ call ver_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ end subroutine foo
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-2.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-2.f90
new file mode 100644
index 000000000..490ed24cf
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-2.f90
@@ -0,0 +1,367 @@
+! { dg-do run }
+! Don't cycle by default through all options, just test -O0 and -O2,
+! as this is quite large test.
+! { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O0" "-O2" } }
+
+module m
+ type dl
+ integer :: a, b
+ integer, allocatable :: c(:,:)
+ integer :: d, e
+ integer, allocatable :: f
+ end type
+ type dt
+ integer :: g
+ type (dl), allocatable :: h(:)
+ integer :: i
+ type (dl) :: j(2, 2)
+ type (dl), allocatable :: k
+ end type
+contains
+ subroutine ver_dl (obj, val, c, cl1, cu1, cl2, cu2, f)
+ type (dl), intent (in) :: obj
+ integer, intent (in) :: val, cl1, cu1, cl2, cu2
+ logical, intent (in) :: c, f
+ if ((c .neqv. allocated (obj%c)) .or. (f .neqv. allocated (obj%f))) call abort
+ if (c) then
+ if (lbound (obj%c, 1) /= cl1 .or. ubound (obj%c, 1) /= cu1) call abort
+ if (lbound (obj%c, 2) /= cl2 .or. ubound (obj%c, 2) /= cu2) call abort
+ end if
+ if (val /= 0) then
+ if (obj%a /= val .or. obj%b /= val) call abort
+ if (obj%d /= val .or. obj%e /= val) call abort
+ if (c) then
+ if (any (obj%c /= val)) call abort
+ end if
+ if (f) then
+ if (obj%f /= val) call abort
+ end if
+ end if
+ end subroutine ver_dl
+ subroutine ver_dt (obj, val, h, hl, hu, k, c, cl1, cu1, cl2, cu2, f)
+ type (dt), intent (in) :: obj
+ integer, intent (in) :: val, hl, hu, cl1, cu1, cl2, cu2
+ logical, intent (in) :: h, k, c, f
+ integer :: i, j
+ if ((h .neqv. allocated (obj%h)) .or. (k .neqv. allocated (obj%k))) call abort
+ if (h) then
+ if (lbound (obj%h, 1) /= hl .or. ubound (obj%h, 1) /= hu) call abort
+ do i = hl, hu
+ call ver_dl (obj%h(i), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end if
+ do i = 1, 2
+ do j = 1, 2
+ call ver_dl (obj%j(i, j), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end do
+ if (k) call ver_dl (obj%k, val, c, cl1, cu1, cl2, cu2, f)
+ if (val /= 0) then
+ if (obj%g /= val .or. obj%i /= val) call abort
+ end if
+ end subroutine ver_dt
+ subroutine alloc_dl (obj, val, c, cl1, cu1, cl2, cu2, f)
+ type (dl), intent (inout) :: obj
+ integer, intent (in) :: val, cl1, cu1, cl2, cu2
+ logical, intent (in) :: c, f
+ if (val /= 0) then
+ obj%a = val
+ obj%b = val
+ obj%d = val
+ obj%e = val
+ end if
+ if (allocated (obj%c)) deallocate (obj%c)
+ if (c) then
+ allocate (obj%c(cl1:cu1, cl2:cu2))
+ if (val /= 0) obj%c = val
+ end if
+ if (f) then
+ if (.not.allocated (obj%f)) allocate (obj%f)
+ if (val /= 0) obj%f = val
+ else
+ if (allocated (obj%f)) deallocate (obj%f)
+ end if
+ end subroutine alloc_dl
+ subroutine alloc_dt (obj, val, h, hl, hu, k, c, cl1, cu1, cl2, cu2, f)
+ type (dt), intent (inout) :: obj
+ integer, intent (in) :: val, hl, hu, cl1, cu1, cl2, cu2
+ logical, intent (in) :: h, k, c, f
+ integer :: i, j
+ if (val /= 0) then
+ obj%g = val
+ obj%i = val
+ end if
+ if (allocated (obj%h)) deallocate (obj%h)
+ if (h) then
+ allocate (obj%h(hl:hu))
+ do i = hl, hu
+ call alloc_dl (obj%h(i), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end if
+ do i = 1, 2
+ do j = 1, 2
+ call alloc_dl (obj%j(i, j), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end do
+ if (k) then
+ if (.not.allocated (obj%k)) allocate (obj%k)
+ call alloc_dl (obj%k, val, c, cl1, cu1, cl2, cu2, f)
+ else
+ if (allocated (obj%k)) deallocate (obj%k)
+ end if
+ end subroutine alloc_dt
+end module m
+ use m
+ type (dt), allocatable :: y
+ call foo (y)
+contains
+ subroutine foo (y)
+ use m
+ type (dt), allocatable :: x, y, z(:,:)
+ logical, parameter :: F = .false.
+ logical, parameter :: T = .true.
+ logical :: l
+!$omp parallel private (x, y, z)
+ if (allocated (x) .or. allocated (y) .or. allocated (z)) call abort
+!$omp end parallel
+!$omp parallel firstprivate (x, y, z)
+ if (allocated (x) .or. allocated (y) .or. allocated (z)) call abort
+!$omp end parallel
+ l = F
+!$omp parallel sections lastprivate (x, y, z) firstprivate (l)
+!$omp section
+ if (.not. l) then
+ if (allocated (x) .or. allocated (y) .or. allocated (z)) call abort
+ end if
+!$omp section
+ if (.not. l) then
+ if (allocated (x) .or. allocated (y) .or. allocated (z)) call abort
+ end if
+ allocate (x, y, z(-3:-3,2:3))
+ call alloc_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call alloc_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call alloc_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call alloc_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp section
+!$omp end parallel sections
+ if (.not.allocated (x) .or. .not.allocated (y)) call abort
+ if (.not.allocated (z)) call abort
+ if (lbound (z, 1) /= -3 .or. ubound (z, 1) /= -3) call abort
+ if (lbound (z, 2) /= 2 .or. ubound (z, 2) /= 3) call abort
+ call ver_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call alloc_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+!$omp parallel private (x, y, z)
+ call ver_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y, 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (y, 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,3), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+!$omp end parallel
+ call ver_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y, 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+!$omp parallel private (x, y, z)
+ call ver_dt (x, 0, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y, 0, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ deallocate (x%h, x%k)
+ deallocate (y%h)
+ allocate (y%k)
+ call ver_dt (z(-3,2), 0, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 0, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ deallocate (z(-3,2)%h, z(-3,2)%k)
+ deallocate (z(-3,3)%h)
+ allocate (z(-3,3)%k)
+!$omp end parallel
+ call alloc_dt (x, 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (y, 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (z(-3,2), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (z(-3,3), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+!$omp parallel firstprivate (x, y, z)
+ call ver_dt (x, 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y, 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (y, 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+ call ver_dt (y, 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,2), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (z(-3,3), 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,3), 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+!$omp end parallel
+ call ver_dt (x, 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (x, 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y, 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (y, 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,2), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (z(-3,2), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (z(-3,3), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+!$omp parallel firstprivate (x, y, z)
+ call ver_dt (x, 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (x, 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y, 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y, 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+ call ver_dt (y, 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+ call ver_dt (z(-3,2), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+ call ver_dt (z(-3,3), 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+!$omp end parallel
+ call ver_dt (x, 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y, 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y, 18, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ call ver_dt (z(-3,2), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 18, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ l = F
+!$omp parallel sections lastprivate (x, y, z) firstprivate (l)
+!$omp section
+ if (l) then
+ call ver_dt (x, 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y, 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ else
+ call ver_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y, 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ end if
+ l = T
+ call alloc_dt (x, 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (x, 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call alloc_dt (y, 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call ver_dt (y, 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call alloc_dt (z(-3,2), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (z(-3,2), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call alloc_dt (z(-3,3), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call ver_dt (z(-3,3), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+!$omp section
+ if (l) then
+ call ver_dt (x, 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (y, 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call ver_dt (z(-3,2), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (z(-3,3), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ else
+ call ver_dt (x, 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y, 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ end if
+ l = T
+ call alloc_dt (x, 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (x, 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call alloc_dt (y, 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (y, 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call alloc_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call alloc_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+!$omp section
+!$omp end parallel sections
+ call ver_dt (x, 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y, 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+!$omp parallel sections lastprivate (x, y, z) firstprivate (l)
+!$omp section
+ if (l) then
+ call ver_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ else
+ call ver_dt (x, 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y, 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ end if
+ l = T
+ call alloc_dt (x, 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (x, 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (y, 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (y, 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call alloc_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+!$omp section
+ if (l) then
+ call ver_dt (x, 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (y, 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ else
+ call ver_dt (x, 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y, 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ end if
+ l = T
+ call alloc_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call alloc_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call alloc_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call alloc_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp section
+!$omp end parallel sections
+ call ver_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp parallel private (x, y, z)
+ call ver_dt (x, 0, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y, 0, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 0, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 0, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp single
+ call alloc_dt (x, 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (y, 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call alloc_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+!$omp end single copyprivate (x, y, z)
+ call ver_dt (x, 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (y, 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+!$omp end parallel
+ call ver_dt (x, 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y, 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ end subroutine foo
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-3.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-3.f90
new file mode 100644
index 000000000..20f13144a
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/alloc-comp-3.f90
@@ -0,0 +1,372 @@
+! { dg-do run }
+! Don't cycle by default through all options, just test -O0 and -O2,
+! as this is quite large test.
+! { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O0" "-O2" } }
+
+module m
+ type dl
+ integer :: a, b
+ integer, allocatable :: c(:,:)
+ integer :: d, e
+ integer, allocatable :: f
+ end type
+ type dt
+ integer :: g
+ type (dl), allocatable :: h(:)
+ integer :: i
+ type (dl) :: j(2, 2)
+ type (dl), allocatable :: k
+ end type
+contains
+ subroutine ver_dl (obj, val, c, cl1, cu1, cl2, cu2, f)
+ type (dl), intent (in) :: obj
+ integer, intent (in) :: val, cl1, cu1, cl2, cu2
+ logical, intent (in) :: c, f
+ if ((c .neqv. allocated (obj%c)) .or. (f .neqv. allocated (obj%f))) call abort
+ if (c) then
+ if (lbound (obj%c, 1) /= cl1 .or. ubound (obj%c, 1) /= cu1) call abort
+ if (lbound (obj%c, 2) /= cl2 .or. ubound (obj%c, 2) /= cu2) call abort
+ end if
+ if (val /= 0) then
+ if (obj%a /= val .or. obj%b /= val) call abort
+ if (obj%d /= val .or. obj%e /= val) call abort
+ if (c) then
+ if (any (obj%c /= val)) call abort
+ end if
+ if (f) then
+ if (obj%f /= val) call abort
+ end if
+ end if
+ end subroutine ver_dl
+ subroutine ver_dt (obj, val, h, hl, hu, k, c, cl1, cu1, cl2, cu2, f)
+ type (dt), intent (in) :: obj
+ integer, intent (in) :: val, hl, hu, cl1, cu1, cl2, cu2
+ logical, intent (in) :: h, k, c, f
+ integer :: i, j
+ if ((h .neqv. allocated (obj%h)) .or. (k .neqv. allocated (obj%k))) call abort
+ if (h) then
+ if (lbound (obj%h, 1) /= hl .or. ubound (obj%h, 1) /= hu) call abort
+ do i = hl, hu
+ call ver_dl (obj%h(i), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end if
+ do i = 1, 2
+ do j = 1, 2
+ call ver_dl (obj%j(i, j), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end do
+ if (k) call ver_dl (obj%k, val, c, cl1, cu1, cl2, cu2, f)
+ if (val /= 0) then
+ if (obj%g /= val .or. obj%i /= val) call abort
+ end if
+ end subroutine ver_dt
+ subroutine alloc_dl (obj, val, c, cl1, cu1, cl2, cu2, f)
+ type (dl), intent (inout) :: obj
+ integer, intent (in) :: val, cl1, cu1, cl2, cu2
+ logical, intent (in) :: c, f
+ if (val /= 0) then
+ obj%a = val
+ obj%b = val
+ obj%d = val
+ obj%e = val
+ end if
+ if (allocated (obj%c)) deallocate (obj%c)
+ if (c) then
+ allocate (obj%c(cl1:cu1, cl2:cu2))
+ if (val /= 0) obj%c = val
+ end if
+ if (f) then
+ if (.not.allocated (obj%f)) allocate (obj%f)
+ if (val /= 0) obj%f = val
+ else
+ if (allocated (obj%f)) deallocate (obj%f)
+ end if
+ end subroutine alloc_dl
+ subroutine alloc_dt (obj, val, h, hl, hu, k, c, cl1, cu1, cl2, cu2, f)
+ type (dt), intent (inout) :: obj
+ integer, intent (in) :: val, hl, hu, cl1, cu1, cl2, cu2
+ logical, intent (in) :: h, k, c, f
+ integer :: i, j
+ if (val /= 0) then
+ obj%g = val
+ obj%i = val
+ end if
+ if (allocated (obj%h)) deallocate (obj%h)
+ if (h) then
+ allocate (obj%h(hl:hu))
+ do i = hl, hu
+ call alloc_dl (obj%h(i), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end if
+ do i = 1, 2
+ do j = 1, 2
+ call alloc_dl (obj%j(i, j), val, c, cl1, cu1, cl2, cu2, f)
+ end do
+ end do
+ if (k) then
+ if (.not.allocated (obj%k)) allocate (obj%k)
+ call alloc_dl (obj%k, val, c, cl1, cu1, cl2, cu2, f)
+ else
+ if (allocated (obj%k)) deallocate (obj%k)
+ end if
+ end subroutine alloc_dt
+end module m
+ use m
+ type (dt), allocatable :: z(:,:)
+ type (dt) :: y(2:3)
+ call foo (y, z, 4)
+contains
+ subroutine foo (y, z, n)
+ use m
+ integer :: n
+ type (dt) :: x(2:n), y(3:)
+ type (dt), allocatable :: z(:,:)
+ logical, parameter :: F = .false.
+ logical, parameter :: T = .true.
+ logical :: l
+ if (lbound (x, 1) /= 2 .or. ubound (x, 1) /= 4) call abort
+ if (lbound (y, 1) /= 3 .or. ubound (y, 1) /= 4) call abort
+ call ver_dt (x(2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (x(n), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y(3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y(4), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+!$omp parallel private (z)
+ if (allocated (z)) call abort
+!$omp end parallel
+!$omp parallel firstprivate (z)
+ if (allocated (z)) call abort
+!$omp end parallel
+ l = F
+!$omp parallel sections lastprivate (z) firstprivate (l)
+!$omp section
+ if (.not. l) then
+ if (allocated (z)) call abort
+ end if
+!$omp section
+ if (.not. l) then
+ if (allocated (z)) call abort
+ end if
+ allocate (z(-3:-3,2:3))
+ call alloc_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call alloc_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp section
+!$omp end parallel sections
+ if (.not.allocated (z)) call abort
+ if (lbound (z, 1) /= -3 .or. ubound (z, 1) /= -3) call abort
+ if (lbound (z, 2) /= 2 .or. ubound (z, 2) /= 3) call abort
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (x(n - 1), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y(4), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+!$omp parallel private (x, y, z)
+ call ver_dt (x(n - 1), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (x(n - 1), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (x(n - 1), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y(4), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y(4), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (y(4), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,3), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+!$omp end parallel
+ call ver_dt (x(n - 1), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (x(n - 1), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y(4), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y(4), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 14, T, 3, 4, F, T, 1, 1, 2, 4, T)
+!$omp parallel private (x, y, z)
+ call ver_dt (x(n - 1), 0, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y(4), 0, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ deallocate (x(n - 1)%h, x(n - 1)%k)
+ deallocate (y(4)%h)
+ allocate (y(4)%k)
+ call ver_dt (z(-3,2), 0, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 0, T, 3, 4, F, T, 1, 1, 2, 4, T)
+ deallocate (z(-3,2)%h, z(-3,2)%k)
+ deallocate (z(-3,3)%h)
+ allocate (z(-3,3)%k)
+!$omp end parallel
+ call alloc_dt (x(n - 1), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (y(4), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (z(-3,2), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (z(-3,3), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+!$omp parallel firstprivate (x, y, z)
+ if (lbound (x, 1) /= 2 .or. ubound (x, 1) /= 4) call abort
+ if (lbound (y, 1) /= 3 .or. ubound (y, 1) /= 4) call abort
+ call ver_dt (x(n - 1), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (x(n - 1), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (x(n - 1), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y(4), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (y(4), 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+ call ver_dt (y(4), 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,2), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (z(-3,3), 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+ call ver_dt (z(-3,3), 4, T, 3, 4, T, T, 1, 1, 2, 4, T)
+!$omp end parallel
+ call ver_dt (x(n - 1), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (x(n - 1), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y(4), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (y(4), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,2), 5, T, 1, 2, F, T, 2, 3, -2, -2, F)
+ call alloc_dt (z(-3,2), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 15, F, 0, 0, T, T, 2, 2, 2, 2, T)
+ call alloc_dt (z(-3,3), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+!$omp parallel firstprivate (x, y, z)
+ call ver_dt (x(n - 1), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (x(n - 1), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (x(n - 1), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (y(4), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y(4), 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+ call ver_dt (y(4), 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+ call ver_dt (z(-3,2), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,2), 4, T, -3, -1, T, T, -1, -1, 2, 3, T)
+ call ver_dt (z(-3,3), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+ call ver_dt (z(-3,3), 17, T, 1, 2, F, T, 2, 2, 3, 3, F)
+!$omp end parallel
+ call ver_dt (x(n - 1), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y(4), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (y(4), 18, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ call ver_dt (z(-3,2), 4, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 16, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call alloc_dt (z(-3,3), 18, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ l = F
+!$omp parallel sections lastprivate (x, y, z) firstprivate (l)
+!$omp section
+ if (l) then
+ call ver_dt (x(n - 1), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y(4), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ else
+ call ver_dt (x(n - 1), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y(4), 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ end if
+ l = T
+ call alloc_dt (x(n - 1), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (x(n - 1), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call alloc_dt (y(4), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call ver_dt (y(4), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call alloc_dt (z(-3,2), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (z(-3,2), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call alloc_dt (z(-3,3), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call ver_dt (z(-3,3), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+!$omp section
+ if (l) then
+ call ver_dt (x(n - 1), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (y(4), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ call ver_dt (z(-3,2), 7, T, 1, 1, T, T, 1, 2, 3, 3, T)
+ call ver_dt (z(-3,3), 20, T, 0, 0, F, T, 2, 2, 3, 4, F)
+ else
+ call ver_dt (x(n - 1), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y(4), 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ call ver_dt (z(-3,2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (z(-3,3), 0, T, 0, 1, T, T, 0, 1, 0, 1, T)
+ end if
+ l = T
+ call alloc_dt (x(n - 1), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (x(n - 1), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call alloc_dt (y(4), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (y(4), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call alloc_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call alloc_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+!$omp section
+!$omp end parallel sections
+ call ver_dt (x(n - 1), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y(4), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 9, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 21, F, 0, 0, T, T, 1, 2, 3, 4, T)
+!$omp parallel sections lastprivate (x, y, z) firstprivate (l)
+!$omp section
+ if (l) then
+ call ver_dt (x(n - 1), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y(4), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ else
+ call ver_dt (x(n - 1), 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y(4), 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ end if
+ l = T
+ call alloc_dt (x(n - 1), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (x(n - 1), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (y(4), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (y(4), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call alloc_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+!$omp section
+ if (l) then
+ call ver_dt (x(n - 1), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (y(4), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ else
+ call ver_dt (x(n - 1), 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (y(4), 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ call ver_dt (z(-3,2), 0, T, 1, 1, F, F, 0, 0, 0, 0, T)
+ call ver_dt (z(-3,3), 0, F, 0, 0, T, T, 1, 2, 3, 4, T)
+ end if
+ l = T
+ call alloc_dt (x(n - 1), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (x(n - 1), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call alloc_dt (y(4), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (y(4), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call alloc_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call alloc_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp section
+!$omp end parallel sections
+ call ver_dt (x(n - 1), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y(4), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp parallel private (x, y, z)
+ call ver_dt (x(n - 1), 0, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y(4), 0, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 0, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 0, T, 0, 1, T, T, 2, 2, 2, 2, F)
+!$omp single
+ call alloc_dt (x(n - 1), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (y(4), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call alloc_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call alloc_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+!$omp end single copyprivate (x, y, z)
+ call ver_dt (x(n - 1), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (y(4), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+ call ver_dt (z(-3,2), 3, F, 0, 0, T, T, 0, 1, 0, 1, F)
+ call ver_dt (z(-3,3), 22, T, 5, 5, F, T, 2, 3, 2, 2, T)
+!$omp end parallel
+ call ver_dt (x(n - 1), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (y(4), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (z(-3,2), 5, F, 0, 0, T, T, -1, -1, -1, -1, T)
+ call ver_dt (z(-3,3), 23, T, 0, 1, T, T, 2, 2, 2, 2, F)
+ call ver_dt (x(2), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (x(n), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ call ver_dt (y(3), 0, F, 0, 0, F, F, 0, 0, 0, 0, F)
+ end subroutine foo
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable10.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable10.f90
new file mode 100644
index 000000000..54eed617b
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable10.f90
@@ -0,0 +1,112 @@
+! { dg-do run }
+
+ integer, allocatable :: a, b(:), c(:,:)
+ integer :: i
+!$omp declare reduction (foo : integer : omp_out = omp_out + omp_in) &
+!$omp & initializer (omp_priv = 0)
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+ allocate (a, b(6:9), c(3, 8:9))
+ a = 0
+ b = 0
+ c = 0
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+!$omp parallel do reduction (+:a, b, c)
+ do i = 1, 10
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ a = a + i
+ b = b + 2 * i
+ c = c + 3 * i
+ end do
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ if (a /= 55 .or. any (b /= 110) .or. any (c /= 165)) call abort
+ a = 0
+ b = 0
+ c = 0
+!$omp parallel do reduction (foo : a, b, c)
+ do i = 1, 10
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ a = a + i
+ b = b + 2 * i
+ c = c + 3 * i
+ end do
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ if (a /= 55 .or. any (b /= 110) .or. any (c /= 165)) call abort
+ a = 0
+ b = 0
+ c = 0
+!$omp simd reduction (+:a, b, c)
+ do i = 1, 10
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ a = a + i
+ b = b + 2 * i
+ c = c + 3 * i
+ end do
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ if (a /= 55 .or. any (b /= 110) .or. any (c /= 165)) call abort
+ a = 0
+ b = 0
+ c = 0
+!$omp simd reduction (foo : a, b, c)
+ do i = 1, 10
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ a = a + i
+ b = b + 2 * i
+ c = c + 3 * i
+ end do
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ if (a /= 55 .or. any (b /= 110) .or. any (c /= 165)) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable11.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable11.f90
new file mode 100644
index 000000000..479f6041b
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable11.f90
@@ -0,0 +1,72 @@
+! { dg-do run }
+! { dg-require-effective-target tls_runtime }
+
+ use omp_lib
+ integer, allocatable, save :: a, b(:), c(:,:)
+ integer :: p
+!$omp threadprivate (a, b, c)
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+
+ call omp_set_dynamic (.false.)
+ call omp_set_num_threads (4)
+
+!$omp parallel num_threads (4)
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+!$omp end parallel
+
+ allocate (a, b(6:9), c(3, 8:9))
+ a = 4
+ b = 5
+ c = 6
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+
+!$omp parallel num_threads (4) copyin (a, b, c) private (p)
+ p = omp_get_thread_num ()
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ if (a /= 4 .or. any (b /= 5) .or. any (c /= 6)) call abort
+ deallocate (a, b, c)
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+ allocate (a, b(p:9), c(3, p:7))
+ a = p
+ b = p
+ c = p
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= (10 - p)) call abort
+ if (lbound (b, 1) /= p .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= (3 * (8 - p))) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= (8 - p)) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= p .or. ubound (c, 2) /= 7) call abort
+ if (a /= p .or. any (b /= p) .or. any (c /= p)) call abort
+!$omp end parallel
+
+!$omp parallel num_threads (4) copyin (a, b, c)
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 10) call abort
+ if (lbound (b, 1) /= 0 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 24) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 8) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 0 .or. ubound (c, 2) /= 7) call abort
+ if (a /= 0 .or. any (b /= 0) .or. any (c /= 0)) call abort
+!$omp end parallel
+
+ deallocate (a, b, c)
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+
+!$omp parallel num_threads (4) copyin (a, b, c)
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+!$omp end parallel
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable12.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable12.f90
new file mode 100644
index 000000000..533ab7cd8
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable12.f90
@@ -0,0 +1,74 @@
+! { dg-do run }
+
+ integer, allocatable :: a, b(:), c(:,:)
+ logical :: l
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+
+!$omp parallel private (a, b, c, l)
+ l = .false.
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+
+!$omp single
+ allocate (a, b(6:9), c(3, 8:9))
+ a = 4
+ b = 5
+ c = 6
+!$omp end single copyprivate (a, b, c)
+
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ if (a /= 4 .or. any (b /= 5) .or. any (c /= 6)) call abort
+
+!$omp single
+ deallocate (a, b, c)
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+ allocate (a, b(0:4), c(3, 2:7))
+ a = 1
+ b = 2
+ c = 3
+!$omp end single copyprivate (a, b, c)
+
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 5) call abort
+ if (lbound (b, 1) /= 0 .or. ubound (b, 1) /= 4) call abort
+ if (.not.allocated (c) .or. size (c) /= 18) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 6) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 2 .or. ubound (c, 2) /= 7) call abort
+ if (a /= 1 .or. any (b /= 2) .or. any (c /= 3)) call abort
+
+!$omp single
+ l = .true.
+ deallocate (a, b, c)
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+ allocate (a, b(2:6), c(3:5, 3:8))
+ a = 7
+ b = 8
+ c = 9
+!$omp end single copyprivate (a, b, c)
+
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 5) call abort
+ if (l) then
+ if (lbound (b, 1) /= 2 .or. ubound (b, 1) /= 6) call abort
+ else
+ if (lbound (b, 1) /= 0 .or. ubound (b, 1) /= 4) call abort
+ end if
+ if (.not.allocated (c) .or. size (c) /= 18) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 6) call abort
+ if (l) then
+ if (lbound (c, 1) /= 3 .or. ubound (c, 1) /= 5) call abort
+ if (lbound (c, 2) /= 3 .or. ubound (c, 2) /= 8) call abort
+ else
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 2 .or. ubound (c, 2) /= 7) call abort
+ end if
+ if (a /= 7 .or. any (b /= 8) .or. any (c /= 9)) call abort
+
+!$omp end parallel
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable9.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable9.f90
new file mode 100644
index 000000000..80bf5d389
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/allocatable9.f90
@@ -0,0 +1,156 @@
+! { dg-do run }
+
+ integer, allocatable :: a, b(:), c(:,:)
+ logical :: l
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+!$omp parallel private (a, b, c)
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+ allocate (a, b(-7:-1), c(2:3, 3:5))
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 7) call abort
+ if (lbound (b, 1) /= -7 .or. ubound (b, 1) /= -1) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 2 .or. size (c, 2) /= 3) call abort
+ if (lbound (c, 1) /= 2 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 3 .or. ubound (c, 2) /= 5) call abort
+ a = 4
+ b = 3
+ c = 2
+!$omp end parallel
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+!$omp parallel firstprivate (a, b, c)
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+ allocate (a, b(-7:-1), c(2:3, 3:5))
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 7) call abort
+ if (lbound (b, 1) /= -7 .or. ubound (b, 1) /= -1) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 2 .or. size (c, 2) /= 3) call abort
+ if (lbound (c, 1) /= 2 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 3 .or. ubound (c, 2) /= 5) call abort
+ a = 4
+ b = 3
+ c = 2
+!$omp end parallel
+ if (allocated (a) .or. allocated (b) .or. allocated (c)) call abort
+ allocate (a, b(6:9), c(3, 8:9))
+ a = 2
+ b = 4
+ c = 5
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+!$omp parallel firstprivate (a, b, c)
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ if (a /= 2 .or. any (b .ne. 4) .or. any (c .ne. 5)) call abort
+ deallocate (a)
+ if (allocated (a)) call abort
+ allocate (a)
+ a = 8
+ b = (/ 1, 2, 3 /)
+ c = reshape ((/ 1, 2, 3, 4, 5, 6, 7, 8 /), (/ 2, 4 /))
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 3) call abort
+ if (lbound (b, 1) /= 1 .or. ubound (b, 1) /= 3) call abort
+ if (.not.allocated (c) .or. size (c) /= 8) call abort
+ if (size (c, 1) /= 2 .or. size (c, 2) /= 4) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 2) call abort
+ if (lbound (c, 2) /= 1 .or. ubound (c, 2) /= 4) call abort
+ if (a /= 8 .or. b(2) /= 2 .or. c(1, 2) /= 3) call abort
+!$omp end parallel
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ if (a /= 2 .or. any (b .ne. 4) .or. any (c .ne. 5)) call abort
+ l = .false.
+!$omp parallel sections lastprivate (a, b, c) firstprivate (l)
+!$omp section
+ if (.not.allocated (a)) call abort
+ if (l) then
+ if (.not.allocated (b) .or. size (b) /= 6) call abort
+ if (lbound (b, 1) /= 1 .or. ubound (b, 1) /= 6) call abort
+ if (.not.allocated (c) .or. size (c) /= 8) call abort
+ if (size (c, 1) /= 4 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 4) call abort
+ if (lbound (c, 2) /= 1 .or. ubound (c, 2) /= 2) call abort
+ if (a /= 12 .or. b(2) /= 8 .or. c(1, 2) /= 5) call abort
+ else
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ end if
+ l = .true.
+ deallocate (a)
+ if (allocated (a)) call abort
+ allocate (a)
+ a = 8
+ b = (/ 1, 2, 3 /)
+ c = reshape ((/ 1, 2, 3, 4, 5, 6, 7, 8 /), (/ 2, 4 /))
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 3) call abort
+ if (lbound (b, 1) /= 1 .or. ubound (b, 1) /= 3) call abort
+ if (.not.allocated (c) .or. size (c) /= 8) call abort
+ if (size (c, 1) /= 2 .or. size (c, 2) /= 4) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 2) call abort
+ if (lbound (c, 2) /= 1 .or. ubound (c, 2) /= 4) call abort
+ if (a /= 8 .or. b(2) /= 2 .or. c(1, 2) /= 3) call abort
+!$omp section
+ if (.not.allocated (a)) call abort
+ if (l) then
+ if (.not.allocated (b) .or. size (b) /= 3) call abort
+ if (lbound (b, 1) /= 1 .or. ubound (b, 1) /= 3) call abort
+ if (.not.allocated (c) .or. size (c) /= 8) call abort
+ if (size (c, 1) /= 2 .or. size (c, 2) /= 4) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 2) call abort
+ if (lbound (c, 2) /= 1 .or. ubound (c, 2) /= 4) call abort
+ if (a /= 8 .or. b(2) /= 2 .or. c(1, 2) /= 3) call abort
+ else
+ if (.not.allocated (b) .or. size (b) /= 4) call abort
+ if (lbound (b, 1) /= 6 .or. ubound (b, 1) /= 9) call abort
+ if (.not.allocated (c) .or. size (c) /= 6) call abort
+ if (size (c, 1) /= 3 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 3) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 9) call abort
+ end if
+ l = .true.
+ deallocate (a)
+ if (allocated (a)) call abort
+ allocate (a)
+ a = 12
+ b = (/ 9, 8, 7, 6, 5, 4 /)
+ c = reshape ((/ 1, 2, 3, 4, 5, 6, 7, 8 /), (/ 4, 2 /))
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 6) call abort
+ if (lbound (b, 1) /= 1 .or. ubound (b, 1) /= 6) call abort
+ if (.not.allocated (c) .or. size (c) /= 8) call abort
+ if (size (c, 1) /= 4 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 4) call abort
+ if (lbound (c, 2) /= 1 .or. ubound (c, 2) /= 2) call abort
+ if (a /= 12 .or. b(2) /= 8 .or. c(1, 2) /= 5) call abort
+!$omp end parallel sections
+ if (.not.allocated (a)) call abort
+ if (.not.allocated (b) .or. size (b) /= 6) call abort
+ if (lbound (b, 1) /= 1 .or. ubound (b, 1) /= 6) call abort
+ if (.not.allocated (c) .or. size (c) /= 8) call abort
+ if (size (c, 1) /= 4 .or. size (c, 2) /= 2) call abort
+ if (lbound (c, 1) /= 1 .or. ubound (c, 1) /= 4) call abort
+ if (lbound (c, 2) /= 1 .or. ubound (c, 2) /= 2) call abort
+ if (a /= 12 .or. b(2) /= 8 .or. c(1, 2) /= 5) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/associate1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/associate1.f90
new file mode 100644
index 000000000..e40995515
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/associate1.f90
@@ -0,0 +1,23 @@
+! { dg-do run }
+
+program associate1
+ integer :: v, i, j
+ real :: a(3, 3)
+ v = 15
+ a = 4.5
+ a(2,1) = 3.5
+ i = 2
+ j = 1
+ associate(u => v, b => a(i, j))
+!$omp parallel private(v, a) default(none)
+ v = -1
+ a = 2.5
+ if (v /= -1 .or. u /= 15) call abort
+ if (a(2,1) /= 2.5 .or. b /= 3.5) call abort
+ associate(u => v, b => a(2, 1))
+ if (u /= -1 .or. b /= 2.5) call abort
+ end associate
+ if (u /= 15 .or. b /= 3.5) call abort
+!$omp end parallel
+ end associate
+end program
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/associate2.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/associate2.f90
new file mode 100644
index 000000000..dee8496e1
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/associate2.f90
@@ -0,0 +1,46 @@
+! { dg-do run }
+
+program associate2
+ type dl
+ integer :: i
+ end type
+ type dt
+ integer :: i
+ real :: a(3, 3)
+ type(dl) :: c(3, 3)
+ end type
+ integer :: v(4), i, j, k, l
+ type (dt) :: a(3, 3)
+ v = 15
+ forall (k = 1:3, l = 1:3) a(k, l)%a(:,:) = 4.5
+ a(2,1)%a(1,2) = 3.5
+ i = 2
+ j = 1
+ associate(u => v, b => a(i, j)%a)
+!$omp parallel private(v, a) default(none)
+ v = -1
+ forall (k = 1:3, l = 1:3) a(k, l)%a(:,:) = 2.5
+ if (v(3) /= -1 .or. u(3) /= 15) call abort
+ if (a(2,1)%a(1,2) /= 2.5 .or. b(1,2) /= 3.5) call abort
+ associate(u => v, b => a(2, 1)%a)
+ if (u(3) /= -1 .or. b(1,2) /= 2.5) call abort
+ end associate
+ if (u(3) /= 15 .or. b(1,2) /= 3.5) call abort
+!$omp end parallel
+ end associate
+ forall (k = 1:3, l = 1:3) a(k, l)%c(:,:)%i = 7
+ a(1,2)%c(2,1)%i = 9
+ i = 1
+ j = 2
+ associate(d => a(i, j)%c(2,:)%i)
+!$omp parallel private(a) default(none)
+ forall (k = 1:3, l = 1:3) a(k, l)%c(:,:)%i = 15
+ if (a(1,2)%c(2,1)%i /= 15 .or. d(1) /= 9) call abort
+ if (a(1,2)%c(2,2)%i /= 15 .or. d(2) /= 7) call abort
+ associate(d => a(2,1)%c(2,:)%i)
+ if (d(1) /= 15 .or. d(2) /= 15) call abort
+ end associate
+ if (d(1) /= 9 .or. d(2) /= 7) call abort
+!$omp end parallel
+ end associate
+end program
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-do-1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-do-1.f90
new file mode 100644
index 000000000..61713c4dd
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-do-1.f90
@@ -0,0 +1,14 @@
+! { dg-do run }
+! { dg-set-target-env-var OMP_CANCELLATION "true" }
+
+ use omp_lib
+ integer :: i
+
+ !$omp parallel num_threads(32)
+ !$omp do
+ do i = 0, 999
+ !$omp cancel do
+ if (omp_get_cancellation ()) call abort
+ enddo
+ !$omp endparallel
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-do-2.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-do-2.f90
new file mode 100644
index 000000000..c748800ca
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-do-2.f90
@@ -0,0 +1,90 @@
+! { dg-do run }
+! { dg-options "-fno-inline -fno-ipa-sra -fno-ipa-cp -fno-ipa-cp-clone" }
+! { dg-set-target-env-var OMP_CANCELLATION "true" }
+
+ use omp_lib
+ integer :: i
+ logical :: x(5)
+
+ x(:) = .false.
+ x(1) = .true.
+ x(3) = .true.
+ if (omp_get_cancellation ()) call foo (x)
+contains
+ subroutine foo (x)
+ use omp_lib
+ logical :: x(5)
+ integer :: v, w, i
+
+ v = 0
+ w = 0
+ !$omp parallel num_threads (32) shared (v, w)
+ !$omp do
+ do i = 0, 999
+ !$omp cancel do if (x(1))
+ call abort
+ end do
+ !$omp do
+ do i = 0, 999
+ !$omp cancel do if (x(2))
+ !$omp atomic
+ v = v + 1
+ !$omp endatomic
+ enddo
+ !$omp do
+ do i = 0, 999
+ !$omp cancel do if (x(3))
+ !$omp atomic
+ w = w + 8
+ !$omp end atomic
+ end do
+ !$omp do
+ do i = 0, 999
+ !$omp cancel do if (x(4))
+ !$omp atomic
+ v = v + 2
+ !$omp end atomic
+ end do
+ !$omp end do
+ !$omp end parallel
+ if (v.ne.3000.or.w.ne.0) call abort
+ !$omp parallel num_threads (32) shared (v, w)
+ ! None of these cancel directives should actually cancel anything,
+ ! but the compiler shouldn't know that and thus should use cancellable
+ ! barriers at the end of all the workshares.
+ !$omp cancel parallel if (omp_get_thread_num ().eq.1.and.x(5))
+ !$omp do
+ do i = 0, 999
+ !$omp cancel do if (x(1))
+ call abort
+ end do
+ !$omp cancel parallel if (omp_get_thread_num ().eq.2.and.x(5))
+ !$omp do
+ do i = 0, 999
+ !$omp cancel do if (x(2))
+ !$omp atomic
+ v = v + 1
+ !$omp endatomic
+ enddo
+ !$omp cancel parallel if (omp_get_thread_num ().eq.3.and.x(5))
+ !$omp do
+ do i = 0, 999
+ !$omp cancel do if (x(3))
+ !$omp atomic
+ w = w + 8
+ !$omp end atomic
+ end do
+ !$omp cancel parallel if (omp_get_thread_num ().eq.4.and.x(5))
+ !$omp do
+ do i = 0, 999
+ !$omp cancel do if (x(4))
+ !$omp atomic
+ v = v + 2
+ !$omp end atomic
+ end do
+ !$omp end do
+ !$omp cancel parallel if (omp_get_thread_num ().eq.5.and.x(5))
+ !$omp end parallel
+ if (v.ne.6000.or.w.ne.0) call abort
+ end subroutine
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-parallel-1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-parallel-1.f90
new file mode 100644
index 000000000..7d91ff5c1
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-parallel-1.f90
@@ -0,0 +1,10 @@
+! { dg-do run }
+! { dg-set-target-env-var OMP_CANCELLATION "true" }
+
+ use omp_lib
+
+ !$omp parallel num_threads(32)
+ !$omp cancel parallel
+ if (omp_get_cancellation ()) call abort
+ !$omp end parallel
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-parallel-3.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-parallel-3.f90
new file mode 100644
index 000000000..9d5ba8ffa
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-parallel-3.f90
@@ -0,0 +1,38 @@
+! { dg-do run }
+! { dg-options "-fno-inline -fno-ipa-sra -fno-ipa-cp -fno-ipa-cp-clone" }
+! { dg-set-target-env-var OMP_CANCELLATION "true" }
+
+ use omp_lib
+ integer :: x, i, j
+ common /x/ x
+
+ call omp_set_dynamic (.false.)
+ call omp_set_schedule (omp_sched_static, 1)
+ !$omp parallel num_threads(16) private (i, j)
+ call do_some_work
+ !$omp barrier
+ if (omp_get_thread_num ().eq.1) then
+ call sleep (2)
+ !$omp cancellation point parallel
+ end if
+ do j = 3, 16
+ !$omp do schedule(runtime)
+ do i = 0, j - 1
+ call do_some_work
+ end do
+ !$omp enddo nowait
+ end do
+ if (omp_get_thread_num ().eq.0) then
+ call sleep (1)
+ !$omp cancel parallel
+ end if
+ !$omp end parallel
+contains
+ subroutine do_some_work
+ integer :: x
+ common /x/ x
+ !$omp atomic
+ x = x + 1
+ !$omp end atomic
+ endsubroutine do_some_work
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-sections-1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-sections-1.f90
new file mode 100644
index 000000000..9ba8af846
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-sections-1.f90
@@ -0,0 +1,23 @@
+! { dg-do run }
+! { dg-set-target-env-var OMP_CANCELLATION "true" }
+
+ use omp_lib
+
+ if (omp_get_cancellation ()) then
+ !$omp parallel num_threads(32)
+ !$omp sections
+ !$omp cancel sections
+ call abort
+ !$omp section
+ !$omp cancel sections
+ call abort
+ !$omp section
+ !$omp cancel sections
+ call abort
+ !$omp section
+ !$omp cancel sections
+ call abort
+ !$omp end sections
+ !$omp end parallel
+ end if
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-taskgroup-2.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-taskgroup-2.f90
new file mode 100644
index 000000000..c727a20ae
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/cancel-taskgroup-2.f90
@@ -0,0 +1,28 @@
+! { dg-do run }
+! { dg-set-target-env-var OMP_CANCELLATION "true" }
+
+ use omp_lib
+ integer :: i
+
+ !$omp parallel
+ !$omp taskgroup
+ !$omp task
+ !$omp cancel taskgroup
+ call abort
+ !$omp endtask
+ !$omp endtaskgroup
+ !$omp endparallel
+ !$omp parallel private (i)
+ !$omp barrier
+ !$omp single
+ !$omp taskgroup
+ do i = 0, 49
+ !$omp task
+ !$omp cancellation point taskgroup
+ !$omp cancel taskgroup if (i.gt.5)
+ !$omp end task
+ end do
+ !$omp end taskgroup
+ !$omp endsingle
+ !$omp end parallel
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-1.f90
new file mode 100644
index 000000000..5cd592c09
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-1.f90
@@ -0,0 +1,95 @@
+! { dg-do run { target vect_simd_clones } }
+! { dg-options "-fno-inline" }
+! { dg-additional-options "-msse2" { target sse2_runtime } }
+! { dg-additional-options "-mavx" { target avx_runtime } }
+
+module declare_simd_1_mod
+ contains
+ real function foo (a, b, c)
+ !$omp declare simd (foo) simdlen (4) uniform (a) linear (b : 5) &
+ !$omp & notinbranch
+ double precision, value :: a
+ real, value :: c
+ !$omp declare simd (foo)
+ integer, value :: b
+ foo = a + b * c
+ end function foo
+end module declare_simd_1_mod
+ use declare_simd_1_mod
+ interface
+ function bar (a, b, c)
+ !$omp declare simd (bar)
+ integer, value :: b
+ real, value :: c
+ real :: bar
+ !$omp declare simd (bar) simdlen (4) linear (b : 2)
+ !$omp declare simd (bar) simdlen (16) inbranch
+ double precision, value :: a
+ end function bar
+ end interface
+ integer :: i
+ double precision :: a(128)
+ real :: b(128), d(128)
+ data d /171., 414., 745., 1164., 1671., 2266., 2949., 3720., 4579., &
+ & 5526., 6561., 7684., 8895., 10194., 11581., 13056., 14619., &
+ & 16270., 18009., 19836., 21751., 23754., 25845., 28024., &
+ & 30291., 32646., 35089., 37620., 40239., 42946., 45741., &
+ & 48624., 51595., 54654., 57801., 61036., 64359., 67770., &
+ & 71269., 74856., 78531., 82294., 86145., 90084., 94111., &
+ & 98226., 102429., 106720., 111099., 115566., 120121., 124764., &
+ & 129495., 134314., 139221., 144216., 149299., 154470., 159729., &
+ & 165076., 170511., 176034., 181645., 187344., 193131., 199006., &
+ & 204969., 211020., 217159., 223386., 229701., 236104., 242595., &
+ & 249174., 255841., 262596., 269439., 276370., 283389., 290496., &
+ & 297691., 304974., 312345., 319804., 327351., 334986., 342709., &
+ & 350520., 358419., 366406., 374481., 382644., 390895., 399234., &
+ & 407661., 416176., 424779., 433470., 442249., 451116., 460071., &
+ & 469114., 478245., 487464., 496771., 506166., 515649., 525220., &
+ & 534879., 544626., 554461., 564384., 574395., 584494., 594681., &
+ & 604956., 615319., 625770., 636309., 646936., 657651., 668454., &
+ & 679345., 690324., 701391., 712546., 723789., 735120./
+ !$omp simd
+ do i = 1, 128
+ a(i) = 7.0 * i + 16.0
+ b(i) = 5.0 * i + 12.0
+ end do
+ !$omp simd
+ do i = 1, 128
+ b(i) = foo (a(i), 3, b(i))
+ end do
+ !$omp simd
+ do i = 1, 128
+ b(i) = bar (a(i), 2 * i, b(i))
+ end do
+ if (any (b.ne.d)) call abort
+ !$omp simd
+ do i = 1, 128
+ b(i) = i * 2.0
+ end do
+ !$omp simd
+ do i = 1, 128
+ b(i) = baz (7.0_8, 2, b(i))
+ end do
+ do i = 1, 128
+ if (b(i).ne.(7.0 + 4.0 * i)) call abort
+ end do
+contains
+ function baz (x, y, z)
+ !$omp declare simd (baz) simdlen (8) uniform (x, y)
+ !$omp declare simd (baz)
+ integer, value :: y
+ real, value :: z
+ real :: baz
+ double precision, value :: x
+ baz = x + y * z
+ end function baz
+end
+function bar (a, b, c)
+ integer, value :: b
+ real, value :: c
+ real :: bar
+ double precision, value :: a
+ !$omp declare simd (bar)
+ !$omp declare simd (bar) simdlen (4) linear (b : 2)
+ bar = a + b * c
+end function bar
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-2.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-2.f90
new file mode 100644
index 000000000..30c63f706
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-2.f90
@@ -0,0 +1,25 @@
+! { dg-do run { target vect_simd_clones } }
+! { dg-options "-fno-inline" }
+! { dg-additional-sources declare-simd-3.f90 }
+! { dg-additional-options "-msse2" { target sse2_runtime } }
+! { dg-additional-options "-mavx" { target avx_runtime } }
+
+module declare_simd_2_mod
+ contains
+ real function foo (a, b, c)
+ !$omp declare simd (foo) simdlen (4) uniform (a) linear (b : 5)
+ double precision, value :: a
+ real, value :: c
+ !$omp declare simd (foo)
+ integer, value :: b
+ foo = a + b * c
+ end function foo
+end module declare_simd_2_mod
+
+ interface
+ subroutine bar ()
+ end subroutine bar
+ end interface
+
+ call bar ()
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-3.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-3.f90
new file mode 100644
index 000000000..031625ec4
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/declare-simd-3.f90
@@ -0,0 +1,22 @@
+! Don't compile this anywhere, it is just auxiliary
+! file compiled together with declare-simd-2.f90
+! to verify inter-CU module handling of omp declare simd.
+! { dg-do compile { target { lp64 && { ! lp64 } } } }
+
+subroutine bar
+ use declare_simd_2_mod
+ real :: b(128)
+ integer :: i
+
+ !$omp simd
+ do i = 1, 128
+ b(i) = i * 2.0
+ end do
+ !$omp simd
+ do i = 1, 128
+ b(i) = foo (7.0_8, 5 * i, b(i))
+ end do
+ do i = 1, 128
+ if (b(i).ne.(7.0 + 10.0 * i * i)) call abort
+ end do
+end subroutine bar
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-1.f90
new file mode 100644
index 000000000..030d3fb6a
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-1.f90
@@ -0,0 +1,203 @@
+! { dg-do run }
+
+ call dep ()
+ call dep2 ()
+ call dep3 ()
+ call firstpriv ()
+ call antidep ()
+ call antidep2 ()
+ call antidep3 ()
+ call outdep ()
+ call concurrent ()
+ call concurrent2 ()
+ call concurrent3 ()
+contains
+ subroutine dep
+ integer :: x
+ x = 1
+ !$omp parallel
+ !$omp single
+ !$omp task shared (x) depend(out: x)
+ x = 2
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp end single
+ !$omp end parallel
+ end subroutine dep
+
+ subroutine dep2
+ integer :: x
+ !$omp parallel
+ !$omp single private (x)
+ x = 1
+ !$omp task shared (x) depend(out: x)
+ x = 2
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp taskwait
+ !$omp end single
+ !$omp end parallel
+ end subroutine dep2
+
+ subroutine dep3
+ integer :: x
+ !$omp parallel private (x)
+ x = 1
+ !$omp single
+ !$omp task shared (x) depend(out: x)
+ x = 2
+ !$omp endtask
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp endtask
+ !$omp endsingle
+ !$omp endparallel
+ end subroutine dep3
+
+ subroutine firstpriv
+ integer :: x
+ !$omp parallel private (x)
+ !$omp single
+ x = 1
+ !$omp task depend(out: x)
+ x = 2
+ !$omp end task
+ !$omp task depend(in: x)
+ if (x.ne.1) call abort
+ !$omp end task
+ !$omp end single
+ !$omp end parallel
+ end subroutine firstpriv
+
+ subroutine antidep
+ integer :: x
+ x = 1
+ !$omp parallel
+ !$omp single
+ !$omp task shared(x) depend(in: x)
+ if (x.ne.1) call abort
+ !$omp end task
+ !$omp task shared(x) depend(out: x)
+ x = 2
+ !$omp end task
+ !$omp end single
+ !$omp end parallel
+ end subroutine antidep
+
+ subroutine antidep2
+ integer :: x
+ !$omp parallel private (x)
+ !$omp single
+ x = 1
+ !$omp taskgroup
+ !$omp task shared(x) depend(in: x)
+ if (x.ne.1) call abort
+ !$omp end task
+ !$omp task shared(x) depend(out: x)
+ x = 2
+ !$omp end task
+ !$omp end taskgroup
+ !$omp end single
+ !$omp end parallel
+ end subroutine antidep2
+
+ subroutine antidep3
+ integer :: x
+ !$omp parallel
+ x = 1
+ !$omp single
+ !$omp task shared(x) depend(in: x)
+ if (x.ne.1) call abort
+ !$omp end task
+ !$omp task shared(x) depend(out: x)
+ x = 2
+ !$omp end task
+ !$omp end single
+ !$omp end parallel
+ end subroutine antidep3
+
+ subroutine outdep
+ integer :: x
+ !$omp parallel private (x)
+ !$omp single
+ x = 0
+ !$omp task shared(x) depend(out: x)
+ x = 1
+ !$omp end task
+ !$omp task shared(x) depend(out: x)
+ x = 2
+ !$omp end task
+ !$omp taskwait
+ if (x.ne.2) call abort
+ !$omp end single
+ !$omp end parallel
+ end subroutine outdep
+
+ subroutine concurrent
+ integer :: x
+ x = 1
+ !$omp parallel
+ !$omp single
+ !$omp task shared (x) depend(out: x)
+ x = 2
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp end single
+ !$omp end parallel
+ end subroutine concurrent
+
+ subroutine concurrent2
+ integer :: x
+ !$omp parallel private (x)
+ !$omp single
+ x = 1
+ !$omp task shared (x) depend(out: x)
+ x = 2;
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp taskwait
+ !$omp end single
+ !$omp end parallel
+ end subroutine concurrent2
+
+ subroutine concurrent3
+ integer :: x
+ !$omp parallel private (x)
+ x = 1
+ !$omp single
+ !$omp task shared (x) depend(out: x)
+ x = 2
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp task shared (x) depend(in: x)
+ if (x.ne.2) call abort
+ !$omp end task
+ !$omp end single
+ !$omp end parallel
+ end subroutine concurrent3
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-2.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-2.f90
new file mode 100644
index 000000000..0694ce742
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-2.f90
@@ -0,0 +1,34 @@
+! { dg-do run }
+
+ integer :: x(3:6, 7:12), y
+ y = 1
+ !$omp parallel shared (x, y)
+ !$omp single
+ !$omp taskgroup
+ !$omp task depend(in: x(:, :))
+ if (y.ne.1) call abort
+ !$omp end task
+ !$omp task depend(out: x(:, :))
+ y = 2
+ !$omp end task
+ !$omp end taskgroup
+ !$omp taskgroup
+ !$omp task depend(in: x(4, 7))
+ if (y.ne.2) call abort
+ !$omp end task
+ !$omp task depend(out: x(4:4, 7:7))
+ y = 3
+ !$omp end task
+ !$omp end taskgroup
+ !$omp taskgroup
+ !$omp task depend(in: x(4:, 8:))
+ if (y.ne.3) call abort
+ !$omp end task
+ !$omp task depend(out: x(4:6, 8:12))
+ y = 4
+ !$omp end task
+ !$omp end taskgroup
+ !$omp end single
+ !$omp end parallel
+ if (y.ne.4) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-3.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-3.f90
new file mode 100644
index 000000000..11be64106
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/depend-3.f90
@@ -0,0 +1,42 @@
+! { dg-do run }
+
+ integer :: x(2, 3)
+ integer, allocatable :: z(:, :)
+ allocate (z(-2:3, 2:4))
+ call foo (x, z)
+contains
+ subroutine foo (x, z)
+ integer :: x(:, :), y
+ integer, allocatable :: z(:, :)
+ y = 1
+ !$omp parallel shared (x, y, z)
+ !$omp single
+ !$omp taskgroup
+ !$omp task depend(in: x)
+ if (y.ne.1) call abort
+ !$omp end task
+ !$omp task depend(out: x(1:2, 1:3))
+ y = 2
+ !$omp end task
+ !$omp end taskgroup
+ !$omp taskgroup
+ !$omp task depend(in: z)
+ if (y.ne.2) call abort
+ !$omp end task
+ !$omp task depend(out: z(-2:3, 2:4))
+ y = 3
+ !$omp end task
+ !$omp end taskgroup
+ !$omp taskgroup
+ !$omp task depend(in: x)
+ if (y.ne.3) call abort
+ !$omp end task
+ !$omp task depend(out: x(1:, 1:))
+ y = 4
+ !$omp end task
+ !$omp end taskgroup
+ !$omp end single
+ !$omp end parallel
+ if (y.ne.4) call abort
+ end subroutine
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/nestedfn5.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/nestedfn5.f90
new file mode 100644
index 000000000..f67bd47e1
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/nestedfn5.f90
@@ -0,0 +1,96 @@
+! { dg-do run }
+
+ interface
+ subroutine bar (q)
+ integer :: q(19:)
+ end subroutine
+ end interface
+ integer :: q(7:15)
+ q(:) = 5
+ call bar (q)
+end
+subroutine bar (q)
+ use iso_c_binding, only: c_ptr, c_loc, c_int
+ integer :: a, b, c, d(2:3,4:5), q(19:), h, k, m, n, o, p
+ integer(c_int), target :: e(64)
+ type (c_ptr) :: f, g(64)
+ logical :: l
+ a = 1
+ b = 2
+ c = 3
+ d = 4
+ l = .false.
+ f = c_loc (e)
+ call foo
+contains
+ subroutine foo
+ use iso_c_binding, only: c_sizeof
+!$omp simd linear(a:2) linear(b:1)
+ do a = 1, 20, 2
+ b = b + 1
+ end do
+!$omp end simd
+ if (a /= 21 .or. b /= 12) call abort
+!$omp simd aligned(f : c_sizeof (e(1)))
+ do b = 1, 64
+ g(b) = f
+ end do
+!$omp end simd
+!$omp parallel
+!$omp single
+!$omp taskgroup
+!$omp task depend(out : a, d(2:2,4:5))
+ a = a + 1
+ d(2:2,4:5) = d(2:2,4:5) + 1
+!$omp end task
+!$omp task depend(in : a, d(2:2,4:5))
+ if (a /= 22) call abort
+ if (any (d(2:2,4:5) /= 5)) call abort
+!$omp end task
+!$omp end taskgroup
+!$omp end single
+!$omp end parallel
+ b = 10
+!$omp target data map (tofrom: a, d(2:3,4:4), q) map (from: l)
+!$omp target map (tofrom: b, d(2:3,4:4))
+ l = .false.
+ if (a /= 22 .or. any (q /= 5)) l = .true.
+ if (lbound (q, 1) /= 19 .or. ubound (q, 1) /= 27) l = .true.
+ if (d(2,4) /= 5 .or. d(3,4) /= 4) l = .true.
+ l = l .or. (b /= 10)
+ a = 6
+ b = 11
+ q = 8
+ d(2:3,4:4) = 9
+!$omp end target
+!$omp target update from (a, q, d(2:3,4:4), l)
+ if (a /= 6 .or. l .or. b /= 11 .or. any (q /= 8)) call abort
+ if (any (d(2:3,4:4) /= 9) .or. d(2,5) /= 5 .or. d(3,5) /= 4) call abort
+ a = 12
+ b = 13
+ q = 14
+ d = 15
+!$omp target update to (a, q, d(2:3,4:4))
+!$omp target map (tofrom: b, d(2:3,4:4))
+ if (a /= 12 .or. b /= 13 .or. any (q /= 14)) l = .true.
+ l = l .or. any (d(2:3,4:4) /= 15)
+!$omp end target
+ a = 0
+ b = 1
+ c = 100
+ h = 8
+ m = 0
+ n = 64
+ o = 16
+ if (l) call abort
+!$omp target teams distribute parallel do simd if (.not.l) device(a) &
+!$omp & num_teams(b) dist_schedule(static, c) num_threads (h) &
+!$omp & reduction (+: m) safelen (n) schedule(static, o)
+ do p = 1, 64
+ m = m + 1
+ end do
+!$omp end target teams distribute parallel do simd
+ if (m /= 64) call abort
+!$omp end target data
+ end subroutine foo
+end subroutine bar
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/omp_atomic5.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/omp_atomic5.f90
new file mode 100644
index 000000000..8e0641592
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/omp_atomic5.f90
@@ -0,0 +1,59 @@
+! { dg-do run }
+ integer (kind = 4) :: a, a2
+ integer (kind = 2) :: b, b2
+ real :: c
+ double precision :: d, d2, c2
+ integer, dimension (10) :: e
+ e(:) = 5
+ e(7) = 9
+!$omp atomic write seq_cst
+ a = 1
+!$omp atomic seq_cst, write
+ b = 2
+!$omp atomic write, seq_cst
+ c = 3
+!$omp atomic seq_cst write
+ d = 4
+!$omp atomic capture seq_cst
+ a2 = a
+ a = a + 4
+!$omp end atomic
+!$omp atomic capture, seq_cst
+ b = b - 18
+ b2 = b
+!$omp end atomic
+!$omp atomic seq_cst, capture
+ c2 = c
+ c = 2.0 * c
+!$omp end atomic
+!$omp atomic seq_cst capture
+ d = d / 2.0
+ d2 = d
+!$omp end atomic
+ if (a2 .ne. 1 .or. b2 .ne. -16 .or. c2 .ne. 3 .or. d2 .ne. 2) call abort
+!$omp atomic read seq_cst
+ a2 = a
+!$omp atomic seq_cst, read
+ c2 = c
+ if (a2 .ne. 5 .or. b2 .ne. -16 .or. c2 .ne. 6 .or. d2 .ne. 2) call abort
+ a2 = 10
+ if (a2 .ne. 10) call abort
+!$omp atomic capture
+ a2 = a
+ a = e(1) + e(6) + e(7) * 2
+!$omp endatomic
+ if (a2 .ne. 5) call abort
+!$omp atomic read
+ a2 = a
+!$omp end atomic
+ if (a2 .ne. 28) call abort
+!$omp atomic capture seq_cst
+ b2 = b
+ b = e(1) + e(7) + e(5) * 2
+!$omp end atomic
+ if (b2 .ne. -16) call abort
+!$omp atomic seq_cst, read
+ b2 = b
+!$omp end atomic
+ if (b2 .ne. 24) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/openmp_version-1.f b/gcc-4.9/libgomp/testsuite/libgomp.fortran/openmp_version-1.f
index aaa888189..be24adcca 100644
--- a/gcc-4.9/libgomp/testsuite/libgomp.fortran/openmp_version-1.f
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/openmp_version-1.f
@@ -4,6 +4,6 @@
implicit none
include "omp_lib.h"
- if (openmp_version .ne. 201107) call abort;
+ if (openmp_version .ne. 201307) call abort;
end program main
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/openmp_version-2.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/openmp_version-2.f90
index b2d1d261f..62712c7d2 100644
--- a/gcc-4.9/libgomp/testsuite/libgomp.fortran/openmp_version-2.f90
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/openmp_version-2.f90
@@ -4,6 +4,6 @@ program main
use omp_lib
implicit none
- if (openmp_version .ne. 201107) call abort;
+ if (openmp_version .ne. 201307) call abort;
end program main
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/procptr1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/procptr1.f90
new file mode 100644
index 000000000..418773982
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/procptr1.f90
@@ -0,0 +1,42 @@
+! { dg-do run }
+ interface
+ integer function foo ()
+ end function
+ integer function bar ()
+ end function
+ integer function baz ()
+ end function
+ end interface
+ procedure(foo), pointer :: ptr
+ integer :: i
+ ptr => foo
+!$omp parallel shared (ptr)
+ if (ptr () /= 1) call abort
+!$omp end parallel
+ ptr => bar
+!$omp parallel firstprivate (ptr)
+ if (ptr () /= 2) call abort
+!$omp end parallel
+!$omp parallel sections lastprivate (ptr)
+!$omp section
+ ptr => foo
+ if (ptr () /= 1) call abort
+!$omp section
+ ptr => bar
+ if (ptr () /= 2) call abort
+!$omp section
+ ptr => baz
+ if (ptr () /= 3) call abort
+!$omp end parallel sections
+ if (ptr () /= 3) call abort
+ if (.not.associated (ptr, baz)) call abort
+end
+integer function foo ()
+ foo = 1
+end function
+integer function bar ()
+ bar = 2
+end function
+integer function baz ()
+ baz = 3
+end function
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd1.f90
new file mode 100644
index 000000000..b97d27f8d
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd1.f90
@@ -0,0 +1,35 @@
+! { dg-do run }
+! { dg-additional-options "-msse2" { target sse2_runtime } }
+! { dg-additional-options "-mavx" { target avx_runtime } }
+
+ type dt
+ integer :: x = 0
+ end type
+ type (dt) :: t
+ integer :: i, j, k, l, r, s, a(30)
+ integer, target :: q(30)
+ integer, pointer :: p(:)
+ !$omp declare reduction (foo : integer : &
+ !$omp & omp_out = omp_out + omp_in) initializer (omp_priv = 0)
+ !$omp declare reduction (+ : dt : omp_out%x = omp_out%x &
+ !$omp & + omp_in%x)
+ a(:) = 1
+ q(:) = 1
+ p => q
+ r = 0
+ j = 10
+ k = 20
+ s = 0
+ !$omp simd safelen (8) reduction(+:r, t) linear(j, k : 2) &
+ !$omp& private (l) aligned(p : 4) reduction(foo:s)
+ do i = 1, 30
+ l = j + k + a(i) + p(i)
+ r = r + l
+ j = j + 2
+ k = k + 2
+ s = s + l
+ t%x = t%x + l
+ end do
+ if (r.ne.2700.or.j.ne.70.or.k.ne.80.or.s.ne.2700) call abort
+ if (t%x.ne.2700) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd2.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd2.f90
new file mode 100644
index 000000000..9b90bcdd0
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd2.f90
@@ -0,0 +1,101 @@
+! { dg-do run }
+! { dg-additional-options "-msse2" { target sse2_runtime } }
+! { dg-additional-options "-mavx" { target avx_runtime } }
+
+ integer :: a(1024), b(1024), k, m, i, s, t
+ k = 4
+ m = 2
+ t = 1
+ do i = 1, 1024
+ a(i) = i - 513
+ b(i) = modulo (i - 52, 39)
+ if (i.lt.52.and.b(i).ne.0) b(i) = b(i) - 39
+ end do
+ s = foo (b)
+ do i = 1, 1024
+ if (a(i).ne.((i - 513) * b(i))) call abort
+ if (i.lt.52.and.modulo (i - 52, 39).ne.0) then
+ if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort
+ else
+ if (b(i).ne.(modulo (i - 52, 39))) call abort
+ end if
+ a(i) = i - 513
+ end do
+ if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort
+ k = 4
+ m = 2
+ t = 1
+ s = bar (b)
+ do i = 1, 1024
+ if (a(i).ne.((i - 513) * b(i))) call abort
+ if (i.lt.52.and.modulo (i - 52, 39).ne.0) then
+ if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort
+ else
+ if (b(i).ne.(modulo (i - 52, 39))) call abort
+ end if
+ a(i) = i - 513
+ end do
+ if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort
+ k = 4
+ m = 2
+ t = 1
+ s = baz (b)
+ do i = 1, 1024
+ if (a(i).ne.((i - 513) * b(i))) call abort
+ if (i.lt.52.and.modulo (i - 52, 39).ne.0) then
+ if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort
+ else
+ if (b(i).ne.(modulo (i - 52, 39))) call abort
+ end if
+ end do
+ if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort
+contains
+ function foo (p)
+ integer :: p(1024), u, v, i, s, foo
+ s = 0
+ !$omp simd linear(k : m + 1) reduction(+: s) lastprivate(u, v)
+ do i = 1, 1024
+ a(i) = a(i) * p(i)
+ u = p(i) + k
+ k = k + m + 1
+ v = p(i) + k
+ s = s + p(i) + k
+ end do
+ !$omp end simd
+ if (i.ne.1025) call abort
+ if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort
+ foo = s
+ end function foo
+ function bar (p)
+ integer :: p(1024), u, v, i, s, bar
+ s = 0
+ !$omp simd linear(k : m + 1) reduction(+: s) lastprivate(u, v)
+ do i = 1, 1024, t
+ a(i) = a(i) * p(i)
+ u = p(i) + k
+ k = k + m + 1
+ v = p(i) + k
+ s = s + p(i) + k
+ end do
+ !$omp end simd
+ if (i.ne.1025) call abort
+ if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort
+ bar = s
+ end function bar
+ function baz (p)
+ integer :: p(1024), u, v, i, s, baz
+ s = 0
+ !$omp simd linear(k : m + 1) reduction(+: s) lastprivate(u, v) &
+ !$omp & linear(i : t)
+ do i = 1, 1024, t
+ a(i) = a(i) * p(i)
+ u = p(i) + k
+ k = k + m + 1
+ v = p(i) + k
+ s = s + p(i) + k
+ end do
+ if (i.ne.1025) call abort
+ if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort
+ baz = s
+ end function baz
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd3.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd3.f90
new file mode 100644
index 000000000..df9f4cac3
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd3.f90
@@ -0,0 +1,109 @@
+! { dg-do run }
+! { dg-additional-options "-msse2" { target sse2_runtime } }
+! { dg-additional-options "-mavx" { target avx_runtime } }
+
+ integer :: a(1024), b(1024), k, m, i, s, t
+ k = 4
+ m = 2
+ t = 1
+ do i = 1, 1024
+ a(i) = i - 513
+ b(i) = modulo (i - 52, 39)
+ if (i.lt.52.and.b(i).ne.0) b(i) = b(i) - 39
+ end do
+ s = foo (b)
+ do i = 1, 1024
+ if (a(i).ne.((i - 513) * b(i))) call abort
+ if (i.lt.52.and.modulo (i - 52, 39).ne.0) then
+ if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort
+ else
+ if (b(i).ne.(modulo (i - 52, 39))) call abort
+ end if
+ a(i) = i - 513
+ end do
+ if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort
+ k = 4
+ m = 2
+ t = 1
+ s = bar (b)
+ do i = 1, 1024
+ if (a(i).ne.((i - 513) * b(i))) call abort
+ if (i.lt.52.and.modulo (i - 52, 39).ne.0) then
+ if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort
+ else
+ if (b(i).ne.(modulo (i - 52, 39))) call abort
+ end if
+ a(i) = i - 513
+ end do
+ if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort
+ k = 4
+ m = 2
+ t = 1
+ s = baz (b)
+ do i = 1, 1024
+ if (a(i).ne.((i - 513) * b(i))) call abort
+ if (i.lt.52.and.modulo (i - 52, 39).ne.0) then
+ if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort
+ else
+ if (b(i).ne.(modulo (i - 52, 39))) call abort
+ end if
+ end do
+ if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort
+contains
+ function foo (p)
+ integer :: p(1024), u, v, i, s, foo
+ s = 0
+ !$omp parallel
+ !$omp do simd linear(k : m + 1) reduction(+: s) lastprivate(u, v) &
+ !$omp & schedule (static, 32)
+ do i = 1, 1024
+ a(i) = a(i) * p(i)
+ u = p(i) + k
+ k = k + m + 1
+ v = p(i) + k
+ s = s + p(i) + k
+ end do
+ !$omp end do simd
+ !$omp end parallel
+ if (i.ne.1025) call abort
+ if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort
+ foo = s
+ end function foo
+ function bar (p)
+ integer :: p(1024), u, v, i, s, bar
+ s = 0
+ !$omp parallel
+ !$omp do simd linear(k : m + 1) reduction(+: s) lastprivate(u, v) &
+ !$omp & schedule (dynamic, 32)
+ do i = 1, 1024, t
+ a(i) = a(i) * p(i)
+ u = p(i) + k
+ k = k + m + 1
+ v = p(i) + k
+ s = s + p(i) + k
+ end do
+ !$omp end do simd
+ !$omp endparallel
+ if (i.ne.1025) call abort
+ if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort
+ bar = s
+ end function bar
+ function baz (p)
+ integer :: p(1024), u, v, i, s, baz
+ s = 0
+ !$omp parallel
+ !$omp do simd linear(k : m + 1) reduction(+: s) lastprivate(u, v) &
+ !$omp & linear(i : t) schedule (static, 8)
+ do i = 1, 1024, t
+ a(i) = a(i) * p(i)
+ u = p(i) + k
+ k = k + m + 1
+ v = p(i) + k
+ s = s + p(i) + k
+ end do
+ !$omp end parallel
+ if (i.ne.1025) call abort
+ if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort
+ baz = s
+ end function baz
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd4.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd4.f90
new file mode 100644
index 000000000..a5b8ba0ba
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd4.f90
@@ -0,0 +1,103 @@
+! { dg-do run }
+! { dg-additional-options "-msse2" { target sse2_runtime } }
+! { dg-additional-options "-mavx" { target avx_runtime } }
+
+ integer :: a(1024), b(1024), k, m, i, s, t
+ k = 4
+ m = 2
+ t = 1
+ do i = 1, 1024
+ a(i) = i - 513
+ b(i) = modulo (i - 52, 39)
+ if (i.lt.52.and.b(i).ne.0) b(i) = b(i) - 39
+ end do
+ s = foo (b)
+ do i = 1, 1024
+ if (a(i).ne.((i - 513) * b(i))) call abort
+ if (i.lt.52.and.modulo (i - 52, 39).ne.0) then
+ if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort
+ else
+ if (b(i).ne.(modulo (i - 52, 39))) call abort
+ end if
+ a(i) = i - 513
+ end do
+ if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort
+ k = 4
+ m = 2
+ t = 1
+ s = bar (b)
+ do i = 1, 1024
+ if (a(i).ne.((i - 513) * b(i))) call abort
+ if (i.lt.52.and.modulo (i - 52, 39).ne.0) then
+ if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort
+ else
+ if (b(i).ne.(modulo (i - 52, 39))) call abort
+ end if
+ a(i) = i - 513
+ end do
+ if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort
+ k = 4
+ m = 2
+ t = 1
+ s = baz (b)
+ do i = 1, 1024
+ if (a(i).ne.((i - 513) * b(i))) call abort
+ if (i.lt.52.and.modulo (i - 52, 39).ne.0) then
+ if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort
+ else
+ if (b(i).ne.(modulo (i - 52, 39))) call abort
+ end if
+ end do
+ if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort
+contains
+ function foo (p)
+ integer :: p(1024), u, v, i, s, foo
+ s = 0
+ !$omp parallel do simd linear(k : m + 1) reduction(+: s) &
+ !$omp & lastprivate(u, v) schedule (static, 32)
+ do i = 1, 1024
+ a(i) = a(i) * p(i)
+ u = p(i) + k
+ k = k + m + 1
+ v = p(i) + k
+ s = s + p(i) + k
+ end do
+ !$omp end parallel do simd
+ if (i.ne.1025) call abort
+ if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort
+ foo = s
+ end function foo
+ function bar (p)
+ integer :: p(1024), u, v, i, s, bar
+ s = 0
+ !$omp parallel do simd linear(k : m + 1) reduction(+: s) &
+ !$omp & lastprivate(u, v) schedule (dynamic, 32)
+ do i = 1, 1024, t
+ a(i) = a(i) * p(i)
+ u = p(i) + k
+ k = k + m + 1
+ v = p(i) + k
+ s = s + p(i) + k
+ end do
+ !$omp endparalleldosimd
+ if (i.ne.1025) call abort
+ if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort
+ bar = s
+ end function bar
+ function baz (p)
+ integer :: p(1024), u, v, i, s, baz
+ s = 0
+ !$omp parallel do simd linear(k : m + 1) reduction(+: s) &
+ !$omp & lastprivate(u, v) linear(i : t) schedule (static, 8)
+ do i = 1, 1024, t
+ a(i) = a(i) * p(i)
+ u = p(i) + k
+ k = k + m + 1
+ v = p(i) + k
+ s = s + p(i) + k
+ end do
+ if (i.ne.1025) call abort
+ if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort
+ baz = s
+ end function baz
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd5.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd5.f90
new file mode 100644
index 000000000..7a5efecac
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd5.f90
@@ -0,0 +1,124 @@
+! { dg-do run }
+! { dg-additional-options "-msse2" { target sse2_runtime } }
+! { dg-additional-options "-mavx" { target avx_runtime } }
+
+ integer :: i, j, b, c
+ c = 0
+ i = 4
+ j = 4
+ b = 7
+!$omp simd linear(b:2) reduction(+:c)
+ do i = 0, 63
+ c = c + b - (7 + 2 * i)
+ b = b + 2
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp simd linear(b:3) reduction(+:c)
+ do i = 0, 63, 4
+ c = c + b - (7 + i / 4 * 3)
+ b = b + 3
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 16 * 3) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp simd linear(i) linear(b:2) reduction(+:c)
+ do i = 0, 63
+ c = c + b - (7 + 2 * i)
+ b = b + 2
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp simd linear(i:4) linear(b:3) reduction(+:c)
+ do i = 0, 63, 4
+ c = c + b - (7 + i / 4 * 3)
+ b = b + 3
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 16 * 3) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp simd collapse(2) linear(b:2) reduction(+:c)
+ do i = 0, 7
+ do j = 0, 7
+ c = c + b - (7 + 2 * j + 2 * 8 * i)
+ b = b + 2
+ end do
+ end do
+ if (c /= 0 .or. i /= 8 .or. j /= 8 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp simd collapse(2) linear(b:2) reduction(+:c) lastprivate (i, j)
+ do i = 0, 7
+ do j = 0, 7
+ c = c + b - (7 + 2 * j + 2 * 8 * i)
+ b = b + 2
+ end do
+ end do
+ if (c /= 0 .or. i /= 8 .or. j /= 8 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) linear(b:2) reduction(+:c)
+ do i = 0, 63
+ c = c + b - (7 + 2 * i)
+ b = b + 2
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) linear(b:3) reduction(+:c)
+ do i = 0, 63, 4
+ c = c + b - (7 + i / 4 * 3)
+ b = b + 3
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 16 * 3) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c)
+ do i = 0, 63
+ c = c + b - (7 + 2 * i)
+ b = b + 2
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c)
+ do i = 0, 63, 4
+ c = c + b - (7 + i / 4 * 3)
+ b = b + 3
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 16 * 3) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) collapse(2) linear(b:2) reduction(+:c)
+ do i = 0, 7
+ do j = 0, 7
+ c = c + b - (7 + 2 * j + 2 * 8 * i)
+ b = b + 2
+ end do
+ end do
+ if (c /= 0 .or. i /= 8 .or. j /= 8 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) collapse(2) linear(b:2) &
+!$omp & reduction(+:c) lastprivate (i, j)
+ do i = 0, 7
+ do j = 0, 7
+ c = c + b - (7 + 2 * j + 2 * 8 * i)
+ b = b + 2
+ end do
+ end do
+ if (c /= 0 .or. i /= 8 .or. j /= 8 .or. b /= 7 + 64 * 2) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd6.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd6.f90
new file mode 100644
index 000000000..881a8fb8d
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd6.f90
@@ -0,0 +1,135 @@
+! { dg-do run }
+! { dg-additional-options "-msse2" { target sse2_runtime } }
+! { dg-additional-options "-mavx" { target avx_runtime } }
+
+ interface
+ subroutine foo (b, i, j, x)
+ integer, intent (inout) :: b
+ integer, intent (in) :: i, j, x
+ end subroutine
+ end interface
+ integer :: i, j, b, c
+ c = 0
+ i = 4
+ j = 4
+ b = 7
+!$omp simd linear(b:2) reduction(+:c)
+ do i = 0, 63
+ c = c + b - (7 + 2 * i)
+ call foo (b, i, j, 2)
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp simd linear(b:3) reduction(+:c)
+ do i = 0, 63, 4
+ c = c + b - (7 + i / 4 * 3)
+ call foo (b, i, j, 3)
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 16 * 3) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp simd linear(i) linear(b:2) reduction(+:c)
+ do i = 0, 63
+ c = c + b - (7 + 2 * i)
+ call foo (b, i, j, 2)
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp simd linear(i:4) linear(b:3) reduction(+:c)
+ do i = 0, 63, 4
+ c = c + b - (7 + i / 4 * 3)
+ call foo (b, i, j, 3)
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 16 * 3) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp simd collapse(2) linear(b:2) reduction(+:c)
+ do i = 0, 7
+ do j = 0, 7
+ c = c + b - (7 + 2 * j + 2 * 8 * i)
+ call foo (b, i, j, 2)
+ end do
+ end do
+ if (c /= 0 .or. i /= 8 .or. j /= 8 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp simd collapse(2) linear(b:2) reduction(+:c) lastprivate (i, j)
+ do i = 0, 7
+ do j = 0, 7
+ c = c + b - (7 + 2 * j + 2 * 8 * i)
+ call foo (b, i, j, 2)
+ end do
+ end do
+ if (c /= 0 .or. i /= 8 .or. j /= 8 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) linear(b:2) reduction(+:c)
+ do i = 0, 63
+ c = c + b - (7 + 2 * i)
+ call foo (b, i, j, 2)
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) linear(b:3) reduction(+:c)
+ do i = 0, 63, 4
+ c = c + b - (7 + i / 4 * 3)
+ call foo (b, i, j, 3)
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 16 * 3) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c)
+ do i = 0, 63
+ c = c + b - (7 + 2 * i)
+ call foo (b, i, j, 2)
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c)
+ do i = 0, 63, 4
+ c = c + b - (7 + i / 4 * 3)
+ call foo (b, i, j, 3)
+ end do
+ if (c /= 0 .or. i /= 64 .or. b /= 7 + 16 * 3) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) collapse(2) linear(b:2) reduction(+:c)
+ do i = 0, 7
+ do j = 0, 7
+ c = c + b - (7 + 2 * j + 2 * 8 * i)
+ call foo (b, i, j, 2)
+ end do
+ end do
+ if (c /= 0 .or. i /= 8 .or. j /= 8 .or. b /= 7 + 64 * 2) call abort
+ i = 4
+ j = 4
+ b = 7
+!$omp parallel do simd schedule (static, 4) collapse(2) linear(b:2) &
+!$omp & reduction(+:c) lastprivate (i, j)
+ do i = 0, 7
+ do j = 0, 7
+ c = c + b - (7 + 2 * j + 2 * 8 * i)
+ call foo (b, i, j, 2)
+ end do
+ end do
+ if (c /= 0 .or. i /= 8 .or. j /= 8 .or. b /= 7 + 64 * 2) call abort
+end
+subroutine foo (b, i, j, x)
+ integer, intent (inout) :: b
+ integer, intent (in) :: i, j, x
+ b = b + (i - i) + (j - j) + x
+end subroutine
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd7.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd7.f90
new file mode 100644
index 000000000..b0473faa9
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/simd7.f90
@@ -0,0 +1,172 @@
+! { dg-do run }
+! { dg-additional-options "-msse2" { target sse2_runtime } }
+! { dg-additional-options "-mavx" { target avx_runtime } }
+
+subroutine foo (d, e, f, g, m, n)
+ integer :: i, j, b(2:9), c(3:n), d(:), e(2:n), f(2:,3:), n
+ integer, allocatable :: g(:), h(:), k, m
+ logical :: l
+ l = .false.
+ allocate (h(2:7))
+ i = 4; j = 4; b = 7; c = 8; d = 9; e = 10; f = 11; g = 12; h = 13; k = 14; m = 15
+!$omp simd linear(b)linear(c:2)linear(d:3)linear(e:4)linear(f:5)linear(g:6) &
+!$omp & linear(h:7)linear(k:8)linear(m:9) reduction(.or.:l)
+ do i = 0, 63
+ l = l .or. .not.allocated (g) .or. .not.allocated (h)
+ l = l .or. .not.allocated (k) .or. .not.allocated (m)
+ l = l .or. any (b /= 7 + i) .or. any (c /= 8 + 2 * i)
+ l = l .or. any (d /= 9 + 3 * i) .or. any (e /= 10 + 4 * i)
+ l = l .or. any (f /= 11 + 5 * i) .or. any (g /= 12 + 6 * i)
+ l = l .or. any (h /= 13 + 7 * i) .or. (k /= 14 + 8 * i)
+ l = l .or. (m /= 15 + 9 * i)
+ l = l .or. (lbound (b, 1) /= 2) .or. (ubound (b, 1) /= 9)
+ l = l .or. (lbound (c, 1) /= 3) .or. (ubound (c, 1) /= n)
+ l = l .or. (lbound (d, 1) /= 1) .or. (ubound (d, 1) /= 17)
+ l = l .or. (lbound (e, 1) /= 2) .or. (ubound (e, 1) /= n)
+ l = l .or. (lbound (f, 1) /= 2) .or. (ubound (f, 1) /= 3)
+ l = l .or. (lbound (f, 2) /= 3) .or. (ubound (f, 2) /= 5)
+ l = l .or. (lbound (g, 1) /= 7) .or. (ubound (g, 1) /= 10)
+ l = l .or. (lbound (h, 1) /= 2) .or. (ubound (h, 1) /= 7)
+ b = b + 1; c = c + 2; d = d + 3; e = e + 4; f = f + 5; g = g + 6
+ h = h + 7; k = k + 8; m = m + 9
+ end do
+ if (l .or. i /= 64) call abort
+ if (any (b /= 7 + 64) .or. any (c /= 8 + 2 * 64)) call abort
+ if (any (d /= 9 + 3 * 64) .or. any (e /= 10 + 4 * 64)) call abort
+ if (any (f /= 11 + 5 * 64) .or. any (g /= 12 + 6 * 64)) call abort
+ if (any (h /= 13 + 7 * 64) .or. (k /= 14 + 8 * 64)) call abort
+ if (m /= 15 + 9 * 64) call abort
+ if ((lbound (b, 1) /= 2) .or. (ubound (b, 1) /= 9)) call abort
+ if ((lbound (c, 1) /= 3) .or. (ubound (c, 1) /= n)) call abort
+ if ((lbound (d, 1) /= 1) .or. (ubound (d, 1) /= 17)) call abort
+ if ((lbound (e, 1) /= 2) .or. (ubound (e, 1) /= n)) call abort
+ if ((lbound (f, 1) /= 2) .or. (ubound (f, 1) /= 3)) call abort
+ if ((lbound (f, 2) /= 3) .or. (ubound (f, 2) /= 5)) call abort
+ if ((lbound (g, 1) /= 7) .or. (ubound (g, 1) /= 10)) call abort
+ if ((lbound (h, 1) /= 2) .or. (ubound (h, 1) /= 7)) call abort
+ i = 4; j = 4; b = 7; c = 8; d = 9; e = 10; f = 11; g = 12; h = 13; k = 14; m = 15
+!$omp simd linear(b)linear(c:2)linear(d:3)linear(e:4)linear(f:5)linear(g:6) &
+!$omp & linear(h:7)linear(k:8)linear(m:9) reduction(.or.:l) collapse(2)
+ do i = 0, 7
+ do j = 0, 7
+ l = l .or. .not.allocated (g) .or. .not.allocated (h)
+ l = l .or. .not.allocated (k) .or. .not.allocated (m)
+ l = l .or. any (b /= 7 + (8 * i + j)) .or. any (c /= 8 + 2 * (8 * i + j))
+ l = l .or. any (d /= 9 + 3 * (8 * i + j)) .or. any (e /= 10 + 4 * (8 * i + j))
+ l = l .or. any (f /= 11 + 5 * (8 * i + j)) .or. any (g /= 12 + 6 * (8 * i + j))
+ l = l .or. any (h /= 13 + 7 * (8 * i + j)) .or. (k /= 14 + 8 * (8 * i + j))
+ l = l .or. (m /= 15 + 9 * (8 * i + j))
+ l = l .or. (lbound (b, 1) /= 2) .or. (ubound (b, 1) /= 9)
+ l = l .or. (lbound (c, 1) /= 3) .or. (ubound (c, 1) /= n)
+ l = l .or. (lbound (d, 1) /= 1) .or. (ubound (d, 1) /= 17)
+ l = l .or. (lbound (e, 1) /= 2) .or. (ubound (e, 1) /= n)
+ l = l .or. (lbound (f, 1) /= 2) .or. (ubound (f, 1) /= 3)
+ l = l .or. (lbound (f, 2) /= 3) .or. (ubound (f, 2) /= 5)
+ l = l .or. (lbound (g, 1) /= 7) .or. (ubound (g, 1) /= 10)
+ l = l .or. (lbound (h, 1) /= 2) .or. (ubound (h, 1) /= 7)
+ b = b + 1; c = c + 2; d = d + 3; e = e + 4; f = f + 5; g = g + 6
+ h = h + 7; k = k + 8; m = m + 9
+ end do
+ end do
+ if (l .or. i /= 8 .or. j /= 8) call abort
+ if (any (b /= 7 + 64) .or. any (c /= 8 + 2 * 64)) call abort
+ if (any (d /= 9 + 3 * 64) .or. any (e /= 10 + 4 * 64)) call abort
+ if (any (f /= 11 + 5 * 64) .or. any (g /= 12 + 6 * 64)) call abort
+ if (any (h /= 13 + 7 * 64) .or. (k /= 14 + 8 * 64)) call abort
+ if (m /= 15 + 9 * 64) call abort
+ if ((lbound (b, 1) /= 2) .or. (ubound (b, 1) /= 9)) call abort
+ if ((lbound (c, 1) /= 3) .or. (ubound (c, 1) /= n)) call abort
+ if ((lbound (d, 1) /= 1) .or. (ubound (d, 1) /= 17)) call abort
+ if ((lbound (e, 1) /= 2) .or. (ubound (e, 1) /= n)) call abort
+ if ((lbound (f, 1) /= 2) .or. (ubound (f, 1) /= 3)) call abort
+ if ((lbound (f, 2) /= 3) .or. (ubound (f, 2) /= 5)) call abort
+ if ((lbound (g, 1) /= 7) .or. (ubound (g, 1) /= 10)) call abort
+ if ((lbound (h, 1) /= 2) .or. (ubound (h, 1) /= 7)) call abort
+ i = 4; j = 4; b = 7; c = 8; d = 9; e = 10; f = 11; g = 12; h = 13; k = 14; m = 15
+!$omp parallel do simd linear(b)linear(c:2)linear(d:3)linear(e:4)linear(f:5) &
+!$omp & linear(g:6)linear(h:7)linear(k:8)linear(m:9) reduction(.or.:l)
+ do i = 0, 63
+ l = l .or. .not.allocated (g) .or. .not.allocated (h)
+ l = l .or. .not.allocated (k) .or. .not.allocated (m)
+ l = l .or. any (b /= 7 + i) .or. any (c /= 8 + 2 * i)
+ l = l .or. any (d /= 9 + 3 * i) .or. any (e /= 10 + 4 * i)
+ l = l .or. any (f /= 11 + 5 * i) .or. any (g /= 12 + 6 * i)
+ l = l .or. any (h /= 13 + 7 * i) .or. (k /= 14 + 8 * i)
+ l = l .or. (m /= 15 + 9 * i)
+ l = l .or. (lbound (b, 1) /= 2) .or. (ubound (b, 1) /= 9)
+ l = l .or. (lbound (c, 1) /= 3) .or. (ubound (c, 1) /= n)
+ l = l .or. (lbound (d, 1) /= 1) .or. (ubound (d, 1) /= 17)
+ l = l .or. (lbound (e, 1) /= 2) .or. (ubound (e, 1) /= n)
+ l = l .or. (lbound (f, 1) /= 2) .or. (ubound (f, 1) /= 3)
+ l = l .or. (lbound (f, 2) /= 3) .or. (ubound (f, 2) /= 5)
+ l = l .or. (lbound (g, 1) /= 7) .or. (ubound (g, 1) /= 10)
+ l = l .or. (lbound (h, 1) /= 2) .or. (ubound (h, 1) /= 7)
+ b = b + 1; c = c + 2; d = d + 3; e = e + 4; f = f + 5; g = g + 6
+ h = h + 7; k = k + 8; m = m + 9
+ end do
+ if (l .or. i /= 64) call abort
+ if (any (b /= 7 + 64) .or. any (c /= 8 + 2 * 64)) call abort
+ if (any (d /= 9 + 3 * 64) .or. any (e /= 10 + 4 * 64)) call abort
+ if (any (f /= 11 + 5 * 64) .or. any (g /= 12 + 6 * 64)) call abort
+ if (any (h /= 13 + 7 * 64) .or. (k /= 14 + 8 * 64)) call abort
+ if (m /= 15 + 9 * 64) call abort
+ if ((lbound (b, 1) /= 2) .or. (ubound (b, 1) /= 9)) call abort
+ if ((lbound (c, 1) /= 3) .or. (ubound (c, 1) /= n)) call abort
+ if ((lbound (d, 1) /= 1) .or. (ubound (d, 1) /= 17)) call abort
+ if ((lbound (e, 1) /= 2) .or. (ubound (e, 1) /= n)) call abort
+ if ((lbound (f, 1) /= 2) .or. (ubound (f, 1) /= 3)) call abort
+ if ((lbound (f, 2) /= 3) .or. (ubound (f, 2) /= 5)) call abort
+ if ((lbound (g, 1) /= 7) .or. (ubound (g, 1) /= 10)) call abort
+ if ((lbound (h, 1) /= 2) .or. (ubound (h, 1) /= 7)) call abort
+ i = 4; j = 4; b = 7; c = 8; d = 9; e = 10; f = 11; g = 12; h = 13; k = 14; m = 15
+!$omp parallel do simd linear(b)linear(c:2)linear(d:3)linear(e:4)linear(f:5) &
+!$omp & linear(g:6)linear(h:7)linear(k:8)linear(m:9) reduction(.or.:l) collapse(2)
+ do i = 0, 7
+ do j = 0, 7
+ l = l .or. .not.allocated (g) .or. .not.allocated (h)
+ l = l .or. .not.allocated (k) .or. .not.allocated (m)
+ l = l .or. any (b /= 7 + (8 * i + j)) .or. any (c /= 8 + 2 * (8 * i + j))
+ l = l .or. any (d /= 9 + 3 * (8 * i + j)) .or. any (e /= 10 + 4 * (8 * i + j))
+ l = l .or. any (f /= 11 + 5 * (8 * i + j)) .or. any (g /= 12 + 6 * (8 * i + j))
+ l = l .or. any (h /= 13 + 7 * (8 * i + j)) .or. (k /= 14 + 8 * (8 * i + j))
+ l = l .or. (m /= 15 + 9 * (8 * i + j))
+ l = l .or. (lbound (b, 1) /= 2) .or. (ubound (b, 1) /= 9)
+ l = l .or. (lbound (c, 1) /= 3) .or. (ubound (c, 1) /= n)
+ l = l .or. (lbound (d, 1) /= 1) .or. (ubound (d, 1) /= 17)
+ l = l .or. (lbound (e, 1) /= 2) .or. (ubound (e, 1) /= n)
+ l = l .or. (lbound (f, 1) /= 2) .or. (ubound (f, 1) /= 3)
+ l = l .or. (lbound (f, 2) /= 3) .or. (ubound (f, 2) /= 5)
+ l = l .or. (lbound (g, 1) /= 7) .or. (ubound (g, 1) /= 10)
+ l = l .or. (lbound (h, 1) /= 2) .or. (ubound (h, 1) /= 7)
+ b = b + 1; c = c + 2; d = d + 3; e = e + 4; f = f + 5; g = g + 6
+ h = h + 7; k = k + 8; m = m + 9
+ end do
+ end do
+ if (l .or. i /= 8 .or. j /= 8) call abort
+ if (any (b /= 7 + 64) .or. any (c /= 8 + 2 * 64)) call abort
+ if (any (d /= 9 + 3 * 64) .or. any (e /= 10 + 4 * 64)) call abort
+ if (any (f /= 11 + 5 * 64) .or. any (g /= 12 + 6 * 64)) call abort
+ if (any (h /= 13 + 7 * 64) .or. (k /= 14 + 8 * 64)) call abort
+ if (m /= 15 + 9 * 64) call abort
+ if ((lbound (b, 1) /= 2) .or. (ubound (b, 1) /= 9)) call abort
+ if ((lbound (c, 1) /= 3) .or. (ubound (c, 1) /= n)) call abort
+ if ((lbound (d, 1) /= 1) .or. (ubound (d, 1) /= 17)) call abort
+ if ((lbound (e, 1) /= 2) .or. (ubound (e, 1) /= n)) call abort
+ if ((lbound (f, 1) /= 2) .or. (ubound (f, 1) /= 3)) call abort
+ if ((lbound (f, 2) /= 3) .or. (ubound (f, 2) /= 5)) call abort
+ if ((lbound (g, 1) /= 7) .or. (ubound (g, 1) /= 10)) call abort
+ if ((lbound (h, 1) /= 2) .or. (ubound (h, 1) /= 7)) call abort
+end subroutine
+
+ interface
+ subroutine foo (d, e, f, g, m, n)
+ integer :: d(:), e(2:n), f(2:,3:), n
+ integer, allocatable :: g(:), m
+ end subroutine
+ end interface
+ integer, parameter :: n = 8
+ integer :: d(2:18), e(3:n+1), f(5:6,7:9)
+ integer, allocatable :: g(:), m
+ allocate (g(7:10))
+ call foo (d, e, f, g, m, n)
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/target1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target1.f90
new file mode 100644
index 000000000..c70daace4
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target1.f90
@@ -0,0 +1,58 @@
+! { dg-do run }
+
+module target1
+contains
+ subroutine foo (p, v, w, n)
+ double precision, pointer :: p(:), v(:), w(:)
+ double precision :: q(n)
+ integer :: i, n
+ !$omp target if (n > 256) map (to: v(1:n), w(:n)) map (from: p(1:n), q)
+ !$omp parallel do simd
+ do i = 1, n
+ p(i) = v(i) * w(i)
+ q(i) = p(i)
+ end do
+ !$omp end target
+ if (any (p /= q)) call abort
+ do i = 1, n
+ if (p(i) /= i * iand (i, 63)) call abort
+ end do
+ !$omp target data if (n > 256) map (to: v(1:n), w) map (from: p, q)
+ !$omp target if (n > 256)
+ do i = 1, n
+ p(i) = 1.0
+ q(i) = 2.0
+ end do
+ !$omp end target
+ !$omp target if (n > 256)
+ do i = 1, n
+ p(i) = p(i) + v(i) * w(i)
+ q(i) = q(i) + v(i) * w(i)
+ end do
+ !$omp end target
+ !$omp target if (n > 256)
+ !$omp teams distribute parallel do simd linear(i:1)
+ do i = 1, n
+ p(i) = p(i) + 2.0
+ q(i) = q(i) + 3.0
+ end do
+ !$omp end target
+ !$omp end target data
+ if (any (p + 2.0 /= q)) call abort
+ end subroutine
+end module target1
+ use target1, only : foo
+ integer :: n, i
+ double precision, pointer :: p(:), v(:), w(:)
+ n = 10000
+ allocate (p(n), v(n), w(n))
+ do i = 1, n
+ v(i) = i
+ w(i) = iand (i, 63)
+ end do
+ call foo (p, v, w, n)
+ do i = 1, n
+ if (p(i) /= i * iand (i, 63) + 3) call abort
+ end do
+ deallocate (p, v, w)
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/target2.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target2.f90
new file mode 100644
index 000000000..42f704f2b
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target2.f90
@@ -0,0 +1,96 @@
+! { dg-do run }
+! { dg-options "-fopenmp -ffree-line-length-160" }
+
+module target2
+contains
+ subroutine foo (a, b, c, d, e, f, g, n, q)
+ integer :: n, q
+ integer :: a, b(3:n), c(5:), d(2:*), e(:,:)
+ integer, pointer :: f, g(:)
+ integer :: h, i(3:n)
+ integer, pointer :: j, k(:)
+ logical :: r
+ allocate (j, k(4:n))
+ h = 14
+ i = 15
+ j = 16
+ k = 17
+ !$omp target map (to: a, b, c, d(2:n+1), e, f, g, h, i, j, k, n) map (from: r)
+ r = a /= 7
+ r = r .or. (any (b /= 8)) .or. (lbound (b, 1) /= 3) .or. (ubound (b, 1) /= n)
+ r = r .or. (any (c /= 9)) .or. (lbound (c, 1) /= 5) .or. (ubound (c, 1) /= n + 4)
+ r = r .or. (any (d(2:n+1) /= 10)) .or. (lbound (d, 1) /= 2)
+ r = r .or. (any (e /= 11)) .or. (lbound (e, 1) /= 1) .or. (ubound (e, 1) /= 2)
+ r = r .or. (lbound (e, 2) /= 1) .or. (ubound (e, 2) /= 2)
+ r = r .or. (f /= 12)
+ r = r .or. (any (g /= 13)) .or. (lbound (g, 1) /= 3) .or. (ubound (g, 1) /= n)
+ r = r .or. (h /= 14)
+ r = r .or. (any (i /= 15)) .or. (lbound (i, 1) /= 3) .or. (ubound (i, 1) /= n)
+ r = r .or. (j /= 16)
+ r = r .or. (any (k /= 17)) .or. (lbound (k, 1) /= 4) .or. (ubound (k, 1) /= n)
+ !$omp end target
+ if (r) call abort
+ !$omp target map (to: b(3:n), c(5:n+4), d(2:n+1), e(1:,:2), g(3:n), i(3:n), k(4:n), n) map (from: r)
+ r = (any (b /= 8)) .or. (lbound (b, 1) /= 3) .or. (ubound (b, 1) /= n)
+ r = r .or. (any (c /= 9)) .or. (lbound (c, 1) /= 5) .or. (ubound (c, 1) /= n + 4)
+ r = r .or. (any (d(2:n+1) /= 10)) .or. (lbound (d, 1) /= 2)
+ r = r .or. (any (e /= 11)) .or. (lbound (e, 1) /= 1) .or. (ubound (e, 1) /= 2)
+ r = r .or. (lbound (e, 2) /= 1) .or. (ubound (e, 2) /= 2)
+ r = r .or. (any (g /= 13)) .or. (lbound (g, 1) /= 3) .or. (ubound (g, 1) /= n)
+ r = r .or. (any (i /= 15)) .or. (lbound (i, 1) /= 3) .or. (ubound (i, 1) /= n)
+ r = r .or. (any (k /= 17)) .or. (lbound (k, 1) /= 4) .or. (ubound (k, 1) /= n)
+ !$omp end target
+ if (r) call abort
+ !$omp target map (to: b(5:n-2), c(7:n), d(4:n-2), e(1:,2:), g(5:n-3), i(6:n-4), k(5:n-5), n) map (from: r)
+ r = (any (b(5:n-2) /= 8)) .or. (lbound (b, 1) /= 3) .or. (ubound (b, 1) /= n)
+ r = r .or. (any (c(7:n) /= 9)) .or. (lbound (c, 1) /= 5) .or. (ubound (c, 1) /= n + 4)
+ r = r .or. (any (d(4:n-2) /= 10)) .or. (lbound (d, 1) /= 2)
+ r = r .or. (any (e(1:,2:) /= 11)) .or. (lbound (e, 1) /= 1) .or. (ubound (e, 1) /= 2)
+ r = r .or. (lbound (e, 2) /= 1) .or. (ubound (e, 2) /= 2)
+ r = r .or. (any (g(5:n-3) /= 13)) .or. (lbound (g, 1) /= 3) .or. (ubound (g, 1) /= n)
+ r = r .or. (any (i(6:n-4) /= 15)) .or. (lbound (i, 1) /= 3) .or. (ubound (i, 1) /= n)
+ r = r .or. (any (k(5:n-5) /= 17)) .or. (lbound (k, 1) /= 4) .or. (ubound (k, 1) /= n)
+ !$omp end target
+ !$omp target map (to: b(q+5:n-2+q), c(q+7:q+n), d(q+4:q+n-2), e(1:q+2,2:q+2), g(5+q:n-3+q), &
+ !$omp & i(6+q:n-4+q), k(5+q:n-5+q), n) map (from: r)
+ r = (any (b(5:n-2) /= 8)) .or. (lbound (b, 1) /= 3) .or. (ubound (b, 1) /= n)
+ r = r .or. (any (c(7:n) /= 9)) .or. (lbound (c, 1) /= 5) .or. (ubound (c, 1) /= n + 4)
+ r = r .or. (any (d(4:n-2) /= 10)) .or. (lbound (d, 1) /= 2)
+ r = r .or. (any (e(1:,2:) /= 11)) .or. (lbound (e, 1) /= 1) .or. (ubound (e, 1) /= 2)
+ r = r .or. (lbound (e, 2) /= 1) .or. (ubound (e, 2) /= 2)
+ r = r .or. (any (g(5:n-3) /= 13)) .or. (lbound (g, 1) /= 3) .or. (ubound (g, 1) /= n)
+ r = r .or. (any (i(6:n-4) /= 15)) .or. (lbound (i, 1) /= 3) .or. (ubound (i, 1) /= n)
+ r = r .or. (any (k(5:n-5) /= 17)) .or. (lbound (k, 1) /= 4) .or. (ubound (k, 1) /= n)
+ !$omp end target
+ if (r) call abort
+ !$omp target map (to: d(2:n+1), n)
+ r = a /= 7
+ r = r .or. (any (b /= 8)) .or. (lbound (b, 1) /= 3) .or. (ubound (b, 1) /= n)
+ r = r .or. (any (c /= 9)) .or. (lbound (c, 1) /= 5) .or. (ubound (c, 1) /= n + 4)
+ r = r .or. (any (d(2:n+1) /= 10)) .or. (lbound (d, 1) /= 2)
+ r = r .or. (any (e /= 11)) .or. (lbound (e, 1) /= 1) .or. (ubound (e, 1) /= 2)
+ r = r .or. (lbound (e, 2) /= 1) .or. (ubound (e, 2) /= 2)
+ r = r .or. (f /= 12)
+ r = r .or. (any (g /= 13)) .or. (lbound (g, 1) /= 3) .or. (ubound (g, 1) /= n)
+ r = r .or. (h /= 14)
+ r = r .or. (any (i /= 15)) .or. (lbound (i, 1) /= 3) .or. (ubound (i, 1) /= n)
+ r = r .or. (j /= 16)
+ r = r .or. (any (k /= 17)) .or. (lbound (k, 1) /= 4) .or. (ubound (k, 1) /= n)
+ !$omp end target
+ if (r) call abort
+ end subroutine foo
+end module target2
+ use target2, only : foo
+ integer, parameter :: n = 15, q = 0
+ integer :: a, b(2:n-1), c(n), d(n), e(3:4, 3:4)
+ integer, pointer :: f, g(:)
+ allocate (f, g(3:n))
+ a = 7
+ b = 8
+ c = 9
+ d = 10
+ e = 11
+ f = 12
+ g = 13
+ call foo (a, b, c, d, e, f, g, n, q)
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/target3.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target3.f90
new file mode 100644
index 000000000..1f197acde
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target3.f90
@@ -0,0 +1,29 @@
+! { dg-do run }
+
+module target3
+contains
+ subroutine foo (f, g)
+ integer :: n
+ integer, pointer :: f, g(:)
+ integer, pointer :: j, k(:)
+ logical :: r
+ nullify (j)
+ k => null ()
+ !$omp target map (tofrom: f, g, j, k) map (from: r)
+ r = associated (f) .or. associated (g)
+ r = r .or. associated (j) .or. associated (k)
+ !$omp end target
+ if (r) call abort
+ !$omp target
+ r = associated (f) .or. associated (g)
+ r = r .or. associated (j) .or. associated (k)
+ !$omp end target
+ if (r) call abort
+ end subroutine foo
+end module target3
+ use target3, only : foo
+ integer, pointer :: f, g(:)
+ f => null ()
+ nullify (g)
+ call foo (f, g)
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/target4.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target4.f90
new file mode 100644
index 000000000..aa2f0a5ac
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target4.f90
@@ -0,0 +1,48 @@
+! { dg-do run }
+
+module target4
+contains
+ subroutine foo (a,m,n)
+ integer :: m,n,i,j
+ double precision :: a(m, n), t
+ !$omp target data map(a) map(to: m, n)
+ do i=1,n
+ t = 0.0d0
+ !$omp target
+ !$omp parallel do reduction(+:t)
+ do j=1,m
+ t = t + a(j,i) * a(j,i)
+ end do
+ !$omp end target
+ t = 2.0d0 * t
+ !$omp target
+ !$omp parallel do
+ do j=1,m
+ a(j,i) = a(j,i) * t
+ end do
+ !$omp end target
+ end do
+ !$omp end target data
+ end subroutine foo
+end module target4
+ use target4, only : foo
+ integer :: i, j
+ double precision :: a(8, 9), res(8, 9)
+ do i = 1, 8
+ do j = 1, 9
+ a(i, j) = i + j
+ end do
+ end do
+ call foo (a, 8, 9)
+ res = reshape ((/ 1136.0d0, 1704.0d0, 2272.0d0, 2840.0d0, 3408.0d0, 3976.0d0, &
+& 4544.0d0, 5112.0d0, 2280.0d0, 3040.0d0, 3800.0d0, 4560.0d0, 5320.0d0, 6080.0d0, &
+& 6840.0d0, 7600.0d0, 3936.0d0, 4920.0d0, 5904.0d0, 6888.0d0, 7872.0d0, 8856.0d0, &
+& 9840.0d0, 10824.0d0, 6200.0d0, 7440.0d0, 8680.0d0, 9920.0d0, 11160.0d0, 12400.0d0, &
+& 13640.0d0, 14880.0d0, 9168.0d0, 10696.0d0, 12224.0d0, 13752.0d0, 15280.0d0, 16808.0d0, &
+& 18336.0d0, 19864.0d0, 12936.0d0, 14784.0d0, 16632.0d0, 18480.0d0, 20328.0d0, 22176.0d0, &
+& 24024.0d0, 25872.0d0, 17600.0d0, 19800.0d0, 22000.0d0, 24200.0d0, 26400.0d0, 28600.0d0, &
+& 30800.0d0, 33000.0d0, 23256.0d0, 25840.0d0, 28424.0d0, 31008.0d0, 33592.0d0, 36176.0d0, &
+& 38760.0d0, 41344.0d0, 30000.0d0, 33000.0d0, 36000.0d0, 39000.0d0, 42000.0d0, 45000.0d0, &
+& 48000.0d0, 51000.0d0 /), (/ 8, 9 /))
+ if (any (a /= res)) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/target5.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target5.f90
new file mode 100644
index 000000000..c46faf226
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target5.f90
@@ -0,0 +1,21 @@
+! { dg-do compile }
+! { dg-options "-fopenmp" }
+
+ integer :: r
+ r = 0
+ call foo (r)
+ if (r /= 11) call abort
+contains
+ subroutine foo (r)
+ integer :: i, r
+ !$omp parallel
+ !$omp single
+ !$omp target teams distribute parallel do reduction (+: r)
+ do i = 1, 10
+ r = r + 1
+ end do
+ r = r + 1
+ !$omp end single
+ !$omp end parallel
+ end subroutine
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/target6.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target6.f90
new file mode 100644
index 000000000..13f5a52ed
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target6.f90
@@ -0,0 +1,50 @@
+! { dg-do run }
+
+module target6
+contains
+ subroutine foo (p, v, w, n)
+ double precision, pointer :: p(:), v(:), w(:)
+ double precision :: q(n)
+ integer :: i, n
+ !$omp target data if (n > 256) map (to: v(1:n), w(:n)) map (from: p(1:n), q)
+ !$omp target if (n > 256)
+ !$omp parallel do simd
+ do i = 1, n
+ p(i) = v(i) * w(i)
+ q(i) = p(i)
+ end do
+ !$omp end target
+ !$omp target update if (n > 256) from (p)
+ do i = 1, n
+ if (p(i) /= i * iand (i, 63)) call abort
+ v(i) = v(i) + 1
+ end do
+ !$omp target update if (n > 256) to (v(1:n))
+ !$omp target if (n > 256)
+ !$omp parallel do simd
+ do i = 1, n
+ p(i) = v(i) * w(i)
+ end do
+ !$omp end target
+ !$omp end target data
+ do i = 1, n
+ if (q(i) /= (v(i) - 1) * w(i)) call abort
+ if (p(i) /= q(i) + w(i)) call abort
+ end do
+ end subroutine
+end module target6
+ use target6, only : foo
+ integer :: n, i
+ double precision, pointer :: p(:), v(:), w(:)
+ n = 10000
+ allocate (p(n), v(n), w(n))
+ do i = 1, n
+ v(i) = i
+ w(i) = iand (i, 63)
+ end do
+ call foo (p, v, w, n)
+ do i = 1, n
+ if (p(i) /= (i + 1) * iand (i, 63)) call abort
+ end do
+ deallocate (p, v, w)
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/target7.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target7.f90
new file mode 100644
index 000000000..0c977c44a
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target7.f90
@@ -0,0 +1,38 @@
+! { dg-do run }
+
+ interface
+ real function foo (x)
+ !$omp declare target
+ real, intent(in) :: x
+ end function foo
+ end interface
+ integer, parameter :: n = 1000
+ integer, parameter :: c = 100
+ integer :: i, j
+ real :: a(n)
+ do i = 1, n
+ a(i) = i
+ end do
+ !$omp parallel
+ !$omp single
+ do i = 1, n, c
+ !$omp task shared(a)
+ !$omp target map(a(i:i+c-1))
+ !$omp parallel do
+ do j = i, i + c - 1
+ a(j) = foo (a(j))
+ end do
+ !$omp end target
+ !$omp end task
+ end do
+ !$omp end single
+ !$omp end parallel
+ do i = 1, n
+ if (a(i) /= i + 1) call abort
+ end do
+end
+real function foo (x)
+ !$omp declare target
+ real, intent(in) :: x
+ foo = x + 1
+end function foo
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/target8.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target8.f90
new file mode 100644
index 000000000..0564e90e0
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/target8.f90
@@ -0,0 +1,33 @@
+! { dg-do run }
+
+ integer, parameter :: n = 1000
+ integer, parameter :: c = 100
+ integer :: i, j
+ real :: a(n)
+ do i = 1, n
+ a(i) = i
+ end do
+ !$omp parallel
+ !$omp single
+ do i = 1, n, c
+ !$omp task shared(a)
+ !$omp target map(a(i:i+c-1))
+ !$omp parallel do
+ do j = i, i + c - 1
+ a(j) = foo (a(j))
+ end do
+ !$omp end target
+ !$omp end task
+ end do
+ !$omp end single
+ !$omp end parallel
+ do i = 1, n
+ if (a(i) /= i + 1) call abort
+ end do
+contains
+ real function foo (x)
+ !$omp declare target
+ real, intent(in) :: x
+ foo = x + 1
+ end function foo
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/taskgroup1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/taskgroup1.f90
new file mode 100644
index 000000000..018d3e83b
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/taskgroup1.f90
@@ -0,0 +1,80 @@
+ integer :: v(16), i
+ do i = 1, 16
+ v(i) = i
+ end do
+
+ !$omp parallel num_threads (4)
+ !$omp single
+ !$omp taskgroup
+ do i = 1, 16, 2
+ !$omp task
+ !$omp task
+ v(i) = v(i) + 1
+ !$omp end task
+ !$omp task
+ v(i + 1) = v(i + 1) + 1
+ !$omp end task
+ !$omp end task
+ end do
+ !$omp end taskgroup
+ do i = 1, 16
+ if (v(i).ne.(i + 1)) call abort
+ end do
+ !$omp taskgroup
+ do i = 1, 16, 2
+ !$omp task
+ !$omp task
+ v(i) = v(i) + 1
+ !$omp endtask
+ !$omp task
+ v(i + 1) = v(i + 1) + 1
+ !$omp endtask
+ !$omp taskwait
+ !$omp endtask
+ end do
+ !$omp endtaskgroup
+ do i = 1, 16
+ if (v(i).ne.(i + 2)) call abort
+ end do
+ !$omp taskgroup
+ do i = 1, 16, 2
+ !$omp task
+ !$omp task
+ v(i) = v(i) + 1
+ !$omp end task
+ v(i + 1) = v(i + 1) + 1
+ !$omp end task
+ end do
+ !$omp taskwait
+ do i = 1, 16, 2
+ !$omp task
+ v(i + 1) = v(i + 1) + 1
+ !$omp end task
+ end do
+ !$omp end taskgroup
+ do i = 1, 16, 2
+ if (v(i).ne.(i + 3)) call abort
+ if (v(i + 1).ne.(i + 5)) call abort
+ end do
+ !$omp taskgroup
+ do i = 1, 16, 2
+ !$omp taskgroup
+ !$omp task
+ v(i) = v(i) + 1
+ !$omp end task
+ !$omp task
+ v(i + 1) = v(i + 1) + 1
+ !$omp end task
+ !$omp end taskgroup
+ if (v(i).ne.(i + 4).or.v(i + 1).ne.(i + 6)) call abort
+ !$omp task
+ v(i) = v(i) + 1
+ !$omp end task
+ end do
+ !$omp end taskgroup
+ do i = 1, 16
+ if (v(i).ne.(i + 5)) call abort
+ end do
+ !$omp end single
+ !$omp end parallel
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr1.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr1.f90
new file mode 100644
index 000000000..5b8044fbe
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr1.f90
@@ -0,0 +1,51 @@
+! { dg-do run }
+
+module udr1
+ type dt
+ integer :: x = 7
+ integer :: y = 9
+ end type
+end module udr1
+ use udr1, only : dt
+!$omp declare reduction (foo : integer : omp_out = omp_out + omp_in)
+ integer :: i, j
+!$omp declare reduction (bar : integer : &
+!$omp & omp_out = omp_out + iand (omp_in, -4)) initializer (omp_priv = 3)
+ type (dt) :: d
+!$omp declare reduction (+ : dt : omp_out%x = omp_out%x &
+!$omp & + iand (omp_in%x, -8))
+!$omp declare reduction (foo : dt : omp_out%x = iand (omp_in%x, -8) &
+!$omp & + omp_out%x) initializer (omp_priv = dt (5, 21))
+ interface operator (+)
+ function notdefined(x, y)
+ use udr1, only : dt
+ type(dt), intent (in) :: x, y
+ type(dt) :: notdefined
+ end function
+ end interface
+ j = 0
+!$omp parallel do reduction (foo : j)
+ do i = 1, 100
+ j = j + i
+ end do
+ if (j .ne. 5050) call abort
+ j = 3
+!$omp parallel do reduction (bar : j)
+ do i = 1, 100
+ j = j + 4 * i
+ end do
+ if (j .ne. (5050 * 4 + 3)) call abort
+!$omp parallel do reduction (+ : d)
+ do i = 1, 100
+ if (d%y .ne. 9) call abort
+ d%x = d%x + 8 * i
+ end do
+ if (d%x .ne. (5050 * 8 + 7) .or. d%y .ne. 9) call abort
+ d = dt (5, 21)
+!$omp parallel do reduction (foo : d)
+ do i = 1, 100
+ if (d%y .ne. 21) call abort
+ d%x = d%x + 8 * i
+ end do
+ if (d%x .ne. (5050 * 8 + 5) .or. d%y .ne. 21) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr10.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr10.f90
new file mode 100644
index 000000000..b64b4f488
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr10.f90
@@ -0,0 +1,32 @@
+! { dg-do run }
+
+module udr10m
+ type dt
+ integer :: x = 0
+ end type
+!$omp declare reduction(.add.:dt:omp_out=omp_out.add.omp_in)
+!$omp declare reduction(+:dt:omp_out=omp_out+omp_in)
+ interface operator(+)
+ module procedure addme
+ end interface
+ interface operator(.add.)
+ module procedure addme
+ end interface
+contains
+ type(dt) function addme (x, y)
+ type (dt), intent (in) :: x, y
+ addme%x = x%x + y%x
+ end function addme
+end module udr10m
+program udr10
+ use udr10m, only : operator(.localadd.) => operator(.add.), &
+& operator(+), dl => dt
+ type(dl) :: j, k
+ integer :: i
+!$omp parallel do reduction(+:j) reduction(.localadd.:k)
+ do i = 1, 100
+ j = j .localadd. dl(i)
+ k = k + dl(i * 2)
+ end do
+ if (j%x /= 5050 .or. k%x /= 10100) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr11.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr11.f90
new file mode 100644
index 000000000..61fb19610
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr11.f90
@@ -0,0 +1,95 @@
+! { dg-do run }
+
+module udr11
+ type dt
+ integer :: x = 0
+ end type
+end module udr11
+ use udr11, only : dt
+!$omp declare reduction(+:dt:omp_out%x=omp_out%x+omp_in%x)
+!$omp declare reduction(-:dt:omp_out%x=omp_out%x+omp_in%x)
+!$omp declare reduction(*:dt:omp_out%x=omp_out%x+omp_in%x)
+!$omp declare reduction(.and.:dt:omp_out%x=omp_out%x+omp_in%x)
+!$omp declare reduction(.or.:dt:omp_out%x=omp_out%x+3*omp_in%x)
+!$omp declare reduction(.eqv.:dt:omp_out%x=omp_out%x+omp_in%x)
+!$omp declare reduction(.neqv.:dt:omp_out%x=omp_out%x+omp_in%x)
+!$omp declare reduction(min:dt:omp_out%x=omp_out%x+omp_in%x)
+!$omp declare reduction(max:dt:omp_out%x=omp_out%x+omp_in%x)
+!$omp declare reduction(iand:dt:omp_out%x=omp_out%x+omp_in%x)
+!$omp declare reduction(ior:dt:omp_out%x=omp_out%x+omp_in%x)
+!$omp declare reduction(ieor:dt:omp_out%x=omp_out%x+omp_in%x)
+ interface operator(.and.)
+ function addme1 (x, y)
+ use udr11, only : dt
+ type (dt), intent (in) :: x, y
+ type(dt) :: addme1
+ end function addme1
+ end interface
+ interface operator(.or.)
+ function addme2 (x, y)
+ use udr11, only : dt
+ type (dt), intent (in) :: x, y
+ type(dt) :: addme2
+ end function addme2
+ end interface
+ interface operator(.eqv.)
+ function addme3 (x, y)
+ use udr11, only : dt
+ type (dt), intent (in) :: x, y
+ type(dt) :: addme3
+ end function addme3
+ end interface
+ interface operator(.neqv.)
+ function addme4 (x, y)
+ use udr11, only : dt
+ type (dt), intent (in) :: x, y
+ type(dt) :: addme4
+ end function addme4
+ end interface
+ interface operator(+)
+ function addme5 (x, y)
+ use udr11, only : dt
+ type (dt), intent (in) :: x, y
+ type(dt) :: addme5
+ end function addme5
+ end interface
+ interface operator(-)
+ function addme6 (x, y)
+ use udr11, only : dt
+ type (dt), intent (in) :: x, y
+ type(dt) :: addme6
+ end function addme6
+ end interface
+ interface operator(*)
+ function addme7 (x, y)
+ use udr11, only : dt
+ type (dt), intent (in) :: x, y
+ type(dt) :: addme7
+ end function addme7
+ end interface
+ type(dt) :: j, k, l, m, n, o, p, q, r, s, t, u
+ integer :: i
+!$omp parallel do reduction(.and.:j) reduction(.or.:k) &
+!$omp & reduction(.eqv.:l) reduction(.neqv.:m) &
+!$omp & reduction(min:n) reduction(max:o) &
+!$omp & reduction(iand:p) reduction(ior:q) reduction (ieor:r) &
+!$omp & reduction(+:s) reduction(-:t) reduction(*:u)
+ do i = 1, 100
+ j%x = j%x + i
+ k%x = k%x + 2 * i
+ l%x = l%x + 3 * i
+ m%x = m%x + i
+ n%x = n%x + 2 * i
+ o%x = o%x + 3 * i
+ p%x = p%x + i
+ q%x = q%x + 2 * i
+ r%x = r%x + 3 * i
+ s%x = s%x + i
+ t%x = t%x + 2 * i
+ u%x = u%x + 3 * i
+ end do
+ if (j%x /= 5050 .or. k%x /= 30300 .or. l%x /= 15150) call abort
+ if (m%x /= 5050 .or. n%x /= 10100 .or. o%x /= 15150) call abort
+ if (p%x /= 5050 .or. q%x /= 10100 .or. r%x /= 15150) call abort
+ if (s%x /= 5050 .or. t%x /= 10100 .or. u%x /= 15150) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr12.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr12.f90
new file mode 100644
index 000000000..601bca6a9
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr12.f90
@@ -0,0 +1,76 @@
+! { dg-do run }
+
+ interface
+ elemental subroutine sub1 (x, y)
+ integer, intent(in) :: y
+ integer, intent(out) :: x
+ end subroutine
+ elemental function fn2 (x)
+ integer, intent(in) :: x
+ integer :: fn2
+ end function
+ end interface
+!$omp declare reduction (foo : integer : omp_out = omp_out + omp_in) initializer (omp_priv = 0)
+!$omp declare reduction (bar : integer : omp_out = fn1 (omp_out, omp_in)) &
+!$omp & initializer (sub1 (omp_priv, omp_orig))
+!$omp declare reduction (baz : integer : sub2 (omp_out, omp_in)) &
+!$omp initializer (omp_priv = fn2 (omp_orig))
+ interface
+ elemental function fn1 (x, y)
+ integer, intent(in) :: x, y
+ integer :: fn1
+ end function
+ elemental subroutine sub2 (x, y)
+ integer, intent(in) :: y
+ integer, intent(inout) :: x
+ end subroutine
+ end interface
+ integer :: a(10), b, r
+ a(:) = 0
+ b = 0
+ r = 0
+!$omp parallel reduction (foo : a, b) reduction (+: r)
+ a = a + 2
+ b = b + 3
+ r = r + 1
+!$omp end parallel
+ if (any (a /= 2 * r) .or. b /= 3 * r) call abort
+ a(:) = 0
+ b = 0
+ r = 0
+!$omp parallel reduction (bar : a, b) reduction (+: r)
+ a = a + 2
+ b = b + 3
+ r = r + 1
+!$omp end parallel
+ if (any (a /= 4 * r) .or. b /= 6 * r) call abort
+ a(:) = 0
+ b = 0
+ r = 0
+!$omp parallel reduction (baz : a, b) reduction (+: r)
+ a = a + 2
+ b = b + 3
+ r = r + 1
+!$omp end parallel
+ if (any (a /= 2 * r) .or. b /= 3 * r) call abort
+end
+elemental function fn1 (x, y)
+ integer, intent(in) :: x, y
+ integer :: fn1
+ fn1 = x + 2 * y
+end function
+elemental subroutine sub1 (x, y)
+ integer, intent(in) :: y
+ integer, intent(out) :: x
+ x = 0
+end subroutine
+elemental function fn2 (x)
+ integer, intent(in) :: x
+ integer :: fn2
+ fn2 = x
+end function
+elemental subroutine sub2 (x, y)
+ integer, intent(inout) :: x
+ integer, intent(in) :: y
+ x = x + y
+end subroutine
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr13.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr13.f90
new file mode 100644
index 000000000..0da1da4bc
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr13.f90
@@ -0,0 +1,106 @@
+! { dg-do run }
+
+ interface
+ subroutine sub1 (x, y)
+ integer, intent(in) :: y(:)
+ integer, intent(out) :: x(:)
+ end subroutine
+ function fn2 (x, m1, m2, n1, n2)
+ integer, intent(in) :: x(:,:), m1, m2, n1, n2
+ integer :: fn2(m1:m2,n1:n2)
+ end function
+ subroutine sub3 (x, y)
+ integer, allocatable, intent(in) :: y(:,:)
+ integer, allocatable, intent(inout) :: x(:,:)
+ end subroutine
+ end interface
+!$omp declare reduction (foo : integer : sub3 (omp_out, omp_in)) &
+!$omp initializer (omp_priv = fn3 (omp_orig))
+!$omp declare reduction (bar : integer : omp_out = fn1 (omp_out, omp_in, &
+!$omp & lbound (omp_out, 1), ubound (omp_out, 1))) &
+!$omp & initializer (sub1 (omp_priv, omp_orig))
+!$omp declare reduction (baz : integer : sub2 (omp_out, omp_in)) &
+!$omp initializer (omp_priv = fn2 (omp_orig, lbound (omp_priv, 1), &
+!$omp ubound (omp_priv, 1), lbound (omp_priv, 2), ubound (omp_priv, 2)))
+ interface
+ function fn1 (x, y, m1, m2)
+ integer, intent(in) :: x(:), y(:), m1, m2
+ integer :: fn1(m1:m2)
+ end function
+ subroutine sub2 (x, y)
+ integer, intent(in) :: y(:,:)
+ integer, intent(inout) :: x(:,:)
+ end subroutine
+ function fn3 (x)
+ integer, allocatable, intent(in) :: x(:,:)
+ integer, allocatable :: fn3(:,:)
+ end function
+ end interface
+ integer :: a(10), b(3:5,7:9), r
+ integer, allocatable :: c(:,:)
+ a(:) = 0
+ r = 0
+!$omp parallel reduction (bar : a) reduction (+: r)
+ if (lbound (a, 1) /= 1 .or. ubound (a, 1) /= 10) call abort
+ a = a + 2
+ r = r + 1
+!$omp end parallel
+ if (any (a /= 4 * r) ) call abort
+ b(:,:) = 0
+ allocate (c (4:6,8:10))
+ c(:,:) = 0
+ r = 0
+!$omp parallel reduction (baz : b, c) reduction (+: r)
+ if (lbound (b, 1) /= 3 .or. ubound (b, 1) /= 5) call abort
+ if (lbound (b, 2) /= 7 .or. ubound (b, 2) /= 9) call abort
+ if (.not. allocated (c)) call abort
+ if (lbound (c, 1) /= 4 .or. ubound (c, 1) /= 6) call abort
+ if (lbound (c, 2) /= 8 .or. ubound (c, 2) /= 10) call abort
+ b = b + 3
+ c = c + 4
+ r = r + 1
+!$omp end parallel
+ if (any (b /= 3 * r) .or. any (c /= 4 * r)) call abort
+ deallocate (c)
+ allocate (c (0:1,7:11))
+ c(:,:) = 0
+ r = 0
+!$omp parallel reduction (foo : c) reduction (+: r)
+ if (.not. allocated (c)) call abort
+ if (lbound (c, 1) /= 0 .or. ubound (c, 1) /= 1) call abort
+ if (lbound (c, 2) /= 7 .or. ubound (c, 2) /= 11) call abort
+ c = c + 5
+ r = r + 1
+!$omp end parallel
+ if (any (c /= 10 * r)) call abort
+end
+function fn1 (x, y, m1, m2)
+ integer, intent(in) :: x(:), y(:), m1, m2
+ integer :: fn1(m1:m2)
+ fn1 = x + 2 * y
+end function
+subroutine sub1 (x, y)
+ integer, intent(in) :: y(:)
+ integer, intent(out) :: x(:)
+ x = 0
+end subroutine
+function fn2 (x, m1, m2, n1, n2)
+ integer, intent(in) :: x(:,:), m1, m2, n1, n2
+ integer :: fn2(m1:m2,n1:n2)
+ fn2 = x
+end function
+subroutine sub2 (x, y)
+ integer, intent(inout) :: x(:,:)
+ integer, intent(in) :: y(:,:)
+ x = x + y
+end subroutine
+function fn3 (x)
+ integer, allocatable, intent(in) :: x(:,:)
+ integer, allocatable :: fn3(:,:)
+ fn3 = x
+end function
+subroutine sub3 (x, y)
+ integer, allocatable, intent(inout) :: x(:,:)
+ integer, allocatable, intent(in) :: y(:,:)
+ x = x + 2 * y
+end subroutine
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr14.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr14.f90
new file mode 100644
index 000000000..d69745855
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr14.f90
@@ -0,0 +1,50 @@
+! { dg-do run }
+
+ type dt
+ integer :: g
+ integer, allocatable :: h(:)
+ end type
+!$omp declare reduction (baz : dt : bar (omp_out, omp_in)) &
+!$omp & initializer (foo (omp_priv, omp_orig))
+ integer :: r
+ type (dt), allocatable :: a(:)
+ allocate (a(7:8))
+ a(:)%g = 0
+ a(7)%h = (/ 0, 0, 0 /)
+ r = 0
+!$omp parallel reduction(+:r) reduction (baz:a)
+ if (.not.allocated (a)) call abort
+ if (lbound (a, 1) /= 7 .or. ubound (a, 1) /= 8) call abort
+ if (.not.allocated (a(7)%h)) call abort
+ if (allocated (a(8)%h)) call abort
+ if (lbound (a(7)%h, 1) /= 1 .or. ubound (a(7)%h, 1) /= 3) call abort
+ a(:)%g = a(:)%g + 2
+ a(7)%h = a(7)%h + 3
+ r = r + 1
+!$omp end parallel
+ if (.not.allocated (a)) call abort
+ if (lbound (a, 1) /= 7 .or. ubound (a, 1) /= 8) call abort
+ if (.not.allocated (a(7)%h)) call abort
+ if (allocated (a(8)%h)) call abort
+ if (lbound (a(7)%h, 1) /= 1 .or. ubound (a(7)%h, 1) /= 3) call abort
+ if (any (a(:)%g /= 2 * r) .or. any (a(7)%h(:) /= 3 * r)) call abort
+contains
+ subroutine foo (x, y)
+ type (dt), allocatable :: x(:), y(:)
+ if (allocated (x) .neqv. allocated (y)) call abort
+ if (lbound (x, 1) /= lbound (y, 1)) call abort
+ if (ubound (x, 1) /= ubound (y, 1)) call abort
+ if (allocated (x(7)%h) .neqv. allocated (y(7)%h)) call abort
+ if (allocated (x(8)%h) .neqv. allocated (y(8)%h)) call abort
+ if (lbound (x(7)%h, 1) /= lbound (y(7)%h, 1)) call abort
+ if (ubound (x(7)%h, 1) /= ubound (y(7)%h, 1)) call abort
+ x(7)%g = 0
+ x(7)%h = 0
+ x(8)%g = 0
+ end subroutine
+ subroutine bar (x, y)
+ type (dt), allocatable :: x(:), y(:)
+ x(:)%g = x(:)%g + y(:)%g
+ x(7)%h(:) = x(7)%h(:) + y(7)%h(:)
+ end subroutine
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr15.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr15.f90
new file mode 100644
index 000000000..2d1169568
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr15.f90
@@ -0,0 +1,64 @@
+! { dg-do run }
+
+module udr15m1
+ integer, parameter :: a = 6
+ integer :: b
+!$omp declare reduction (foo : integer : omp_out = omp_out + omp_in)
+!$omp declare reduction (.add. : integer : &
+!$omp & omp_out = omp_out .add. f3 (omp_in, -4)) &
+!$omp & initializer (s1 (omp_priv, omp_orig))
+ interface operator (.add.)
+ module procedure f1
+ end interface
+contains
+ integer function f1 (x, y)
+ integer, intent (in) :: x, y
+ f1 = x + y
+ end function f1
+ integer function f3 (x, y)
+ integer, intent (in) :: x, y
+ f3 = iand (x, y)
+ end function f3
+ subroutine s1 (x, y)
+ integer, intent (in) :: y
+ integer, intent (out) :: x
+ x = 3
+ end subroutine s1
+end module udr15m1
+module udr15m2
+ use udr15m1, f4 => f1, f5 => f3, s2 => s1, operator (.addtwo.) => operator (.add.)
+ type dt
+ integer :: x
+ end type
+!$omp declare reduction (+ : dt : omp_out = f6 (omp_out + omp_in)) &
+!$omp & initializer (s3 (omp_priv))
+ interface operator (+)
+ module procedure f2
+ end interface
+contains
+ type(dt) function f2 (x, y)
+ type(dt), intent (in) :: x, y
+ f2%x = x%x + y%x
+ end function f2
+ type(dt) function f6 (x)
+ type(dt), intent (in) :: x
+ f6%x = x%x
+ end function f6
+ subroutine s3 (x)
+ type(dt), intent (out) :: x
+ x = dt(0)
+ end subroutine
+end module udr15m2
+ use udr15m2, operator (.addthree.) => operator (.addtwo.), &
+ f7 => f4, f8 => f6, s4 => s3
+ integer :: i, j
+ type(dt) :: d
+ j = 3
+ d%x = 0
+!$omp parallel do reduction (.addthree.: j) reduction (+ : d)
+ do i = 1, 100
+ j = j.addthree.iand (i, -4)
+ d = d + dt(i)
+ end do
+ if (d%x /= 5050 .or. j /= 4903) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr2.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr2.f90
new file mode 100644
index 000000000..861a4b270
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr2.f90
@@ -0,0 +1,51 @@
+! { dg-do run }
+
+module udr2
+ type dt
+ integer :: x = 7
+ integer :: y = 9
+ end type
+end module udr2
+ use udr2, only : dt
+!$omp declare reduction (foo : integer : omp_out = omp_out + omp_in)
+ integer :: i, j(2:4,3:5)
+!$omp declare reduction (bar : integer : &
+!$omp & omp_out = omp_out + iand (omp_in, -4)) initializer (omp_priv = 3)
+ interface operator (+)
+ function notdefined(x, y)
+ use udr2, only : dt
+ type(dt), intent (in) :: x, y
+ type(dt) :: notdefined
+ end function
+ end interface
+ type (dt) :: d(2:4,3:5)
+!$omp declare reduction (+ : dt : omp_out%x = omp_out%x &
+!$omp & + iand (omp_in%x, -8))
+!$omp declare reduction (foo : dt : omp_out%x = iand (omp_in%x, -8) &
+!$omp & + omp_out%x) initializer (omp_priv = dt (5, 21))
+ j = 0
+!$omp parallel do reduction (foo : j)
+ do i = 1, 100
+ j = j + i
+ end do
+ if (any(j .ne. 5050)) call abort
+ j = 3
+!$omp parallel do reduction (bar : j)
+ do i = 1, 100
+ j = j + 4 * i
+ end do
+ if (any(j .ne. (5050 * 4 + 3))) call abort
+!$omp parallel do reduction (+ : d)
+ do i = 1, 100
+ if (any(d%y .ne. 9)) call abort
+ d%x = d%x + 8 * i
+ end do
+ if (any(d%x .ne. (5050 * 8 + 7)) .or. any(d%y .ne. 9)) call abort
+ d = dt (5, 21)
+!$omp parallel do reduction (foo : d)
+ do i = 1, 100
+ if (any(d%y .ne. 21)) call abort
+ d%x = d%x + 8 * i
+ end do
+ if (any(d%x .ne. (5050 * 8 + 5)) .or. any(d%y .ne. 21)) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr3.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr3.f90
new file mode 100644
index 000000000..258b67220
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr3.f90
@@ -0,0 +1,38 @@
+! { dg-do run }
+
+!$omp declare reduction (foo : character(kind=1, len=*) &
+!$omp & : omp_out = trim(omp_out) // omp_in) initializer (omp_priv = '')
+!$omp declare reduction (bar : character(kind=1, len=:) &
+!$omp & : omp_out = trim(omp_in) // omp_out) initializer (omp_priv = '')
+!$omp declare reduction (baz : character(kind=1, len=1) &
+!$omp & : omp_out = char (ichar (omp_out) + ichar (omp_in) &
+!$omp & - ichar ('0'))) initializer (omp_priv = '0')
+!$omp declare reduction (baz : character(kind=1, len=2) &
+!$omp & : omp_out = char (ichar (omp_out(1:1)) + ichar (omp_in(1:1)) &
+!$omp & - ichar ('0')) // char (ichar (omp_out(2:2)) + &
+!$omp & ichar (omp_in(2:2)) - ichar ('0'))) initializer (omp_priv = '00')
+ character(kind=1, len=64) :: c, d
+ character(kind = 1, len=1) :: e
+ character(kind = 1, len=1+1) :: f
+ integer :: i
+ c = ''
+ d = ''
+ e = '0'
+ f = '00'
+!$omp parallel do reduction (foo : c) reduction (bar : d) &
+!$omp & reduction (baz : e, f)
+ do i = 1, 64
+ c = trim(c) // char (ichar ('0') + i)
+ d = char (ichar ('0') + i) // d
+ e = char (ichar (e) + mod (i, 3))
+ f = char (ichar (f(1:1)) + mod (i, 2)) &
+& // char (ichar (f(2:2)) + mod (i, 3))
+ end do
+ do i = 1, 64
+ if (index (c, char (ichar ('0') + i)) .eq. 0) call abort
+ if (index (d, char (ichar ('0') + i)) .eq. 0) call abort
+ end do
+ if (e.ne.char (ichar ('0') + 64)) call abort
+ if (f(1:1).ne.char (ichar ('0') + 32)) call abort
+ if (f(2:2).ne.char (ichar ('0') + 64)) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr4.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr4.f90
new file mode 100644
index 000000000..89365476a
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr4.f90
@@ -0,0 +1,50 @@
+! { dg-do run }
+
+!$omp declare reduction (foo : character(kind=1, len=*) &
+!$omp & : omp_out = fn (omp_out, omp_in)) initializer (omp_priv = '')
+!$omp declare reduction (bar : character(kind=1, len=:) &
+!$omp & : omp_out = fn (omp_in, omp_out)) initializer (omp_priv = '')
+!$omp declare reduction (baz : character(kind=1, len=1) &
+!$omp & : omp_out = char (ichar (omp_out) + ichar (omp_in) &
+!$omp & - ichar ('0'))) initializer (omp_priv = '0')
+!$omp declare reduction (baz : character(kind=1, len=2) &
+!$omp & : omp_out = char (ichar (omp_out(1:1)) + ichar (omp_in(1:1)) &
+!$omp & - ichar ('0')) // char (ichar (omp_out(2:2)) + &
+!$omp & ichar (omp_in(2:2)) - ichar ('0'))) initializer (omp_priv = '00')
+ interface
+ elemental function fn (x, y)
+ character (len=64), intent (in) :: x, y
+ character (len=64) :: fn
+ end function
+ end interface
+ character(kind=1, len=64) :: c(-3:-2,1:1,7:8), d(2:3,-7:-5)
+ character(kind = 1, len=1) :: e(2:4)
+ character(kind = 1, len=1+1) :: f(8:10,9:10)
+ integer :: i, j, k
+ c = ''
+ d = ''
+ e = '0'
+ f = '00'
+!$omp parallel do reduction (foo : c) reduction (bar : d) &
+!$omp & reduction (baz : e, f) private (j, k)
+ do i = 1, 64
+ forall (j = -3:-2, k = 7:8) &
+ c(j,1,k) = trim(c(j,1,k)) // char (ichar ('0') + i)
+ d = char (ichar ('0') + i) // d
+ e = char (ichar (e) + mod (i, 3))
+ f = char (ichar (f(:,:)(1:1)) + mod (i, 2)) &
+& // char (ichar (f(:,:)(2:2)) + mod (i, 3))
+ end do
+ do i = 1, 64
+ if (any (index (c, char (ichar ('0') + i)) .eq. 0)) call abort
+ if (any (index (d, char (ichar ('0') + i)) .eq. 0)) call abort
+ end do
+ if (any (e.ne.char (ichar ('0') + 64))) call abort
+ if (any (f(:,:)(1:1).ne.char (ichar ('0') + 32))) call abort
+ if (any (f(:,:)(2:2).ne.char (ichar ('0') + 64))) call abort
+end
+elemental function fn (x, y)
+ character (len=64), intent (in) :: x, y
+ character (len=64) :: fn
+ fn = trim(x) // y
+end function
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr5.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr5.f90
new file mode 100644
index 000000000..6dae9b9b8
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr5.f90
@@ -0,0 +1,57 @@
+! { dg-do run }
+
+module m
+ interface operator(.add.)
+ module procedure do_add
+ end interface
+ type dt
+ real :: r = 0.0
+ end type
+contains
+ function do_add(x, y)
+ type (dt), intent (in) :: x, y
+ type (dt) :: do_add
+ do_add%r = x%r + y%r
+ end function
+ subroutine dp_add(x, y)
+ double precision :: x, y
+ x = x + y
+ end subroutine
+ subroutine dp_init(x)
+ double precision :: x
+ x = 0.0
+ end subroutine
+end module
+
+program udr5
+ use m, only : operator(.add.), dt, dp_add, dp_init
+ type(dt) :: xdt, one
+ real :: r
+ integer (kind = 4) :: i4
+ integer (kind = 8) :: i8
+ real (kind = 4) :: r4
+ double precision :: dp
+!$omp declare reduction(.add.:dt:omp_out=omp_out.add.omp_in)
+!$omp declare reduction(foo:integer(4),integer(kind=8),real (kind = 4) &
+!$omp & :omp_out = omp_out + omp_in) initializer (omp_priv = 0)
+!$omp declare reduction(foo:double precision:dp_add (omp_out, omp_in)) &
+!$omp & initializer (dp_init (omp_priv))
+
+ one%r = 1.0
+ r = 0.0
+ i4 = 0
+ i8 = 0
+ r4 = 0.0
+ call dp_init (dp)
+!$omp parallel reduction(.add.: xdt) reduction(+: r) &
+!$omp & reduction(foo: i4, i8, r4, dp)
+ xdt = xdt.add.one
+ r = r + 1.0
+ i4 = i4 + 1
+ i8 = i8 + 1
+ r4 = r4 + 1.0
+ call dp_add (dp, 1.0d0)
+!$omp end parallel
+ if (xdt%r .ne. r) call abort
+ if (i4.ne.r.or.i8.ne.r.or.r4.ne.r.or.dp.ne.r) call abort
+end program udr5
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr6.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr6.f90
new file mode 100644
index 000000000..20736fb79
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr6.f90
@@ -0,0 +1,69 @@
+! { dg-do run }
+
+module m
+ interface operator(.add.)
+ module procedure do_add
+ end interface
+ type dt
+ real :: r = 0.0
+ end type
+contains
+ elemental function do_add(x, y)
+ type (dt), intent (in) :: x, y
+ type (dt) :: do_add
+ do_add%r = x%r + y%r
+ end function
+ elemental subroutine dp_add(x, y)
+ double precision, intent (inout) :: x
+ double precision, intent (in) :: y
+ x = x + y
+ end subroutine
+ elemental subroutine dp_init(x)
+ double precision, intent (out) :: x
+ x = 0.0
+ end subroutine
+end module
+
+program udr6
+ use m, only : operator(.add.), dt, dp_add, dp_init
+ type(dt), allocatable :: xdt(:)
+ type(dt) :: one
+ real :: r
+ integer (kind = 4), allocatable, dimension(:) :: i4
+ integer (kind = 8), allocatable, dimension(:,:) :: i8
+ integer :: i
+ real (kind = 4), allocatable :: r4(:,:)
+ double precision, allocatable :: dp(:)
+!$omp declare reduction(.add.:dt:omp_out=omp_out.add.omp_in)
+!$omp declare reduction(foo:integer(4),integer(kind=8),real (kind = 4) &
+!$omp & :omp_out = omp_out + omp_in) initializer (omp_priv = 0)
+!$omp declare reduction(foo:double precision:dp_add (omp_out, omp_in)) &
+!$omp & initializer (dp_init (omp_priv))
+
+ one%r = 1.0
+ allocate (xdt(4), i4 (3), i8(-5:-2,2:3), r4(2:5,1:1), dp(7))
+ r = 0.0
+ i4 = 0
+ i8 = 0
+ r4 = 0.0
+ do i = 1, 7
+ call dp_init (dp(i))
+ end do
+!$omp parallel reduction(.add.: xdt) reduction(+: r) &
+!$omp & reduction(foo: i4, i8, r4, dp) private(i)
+ do i = 1, 4
+ xdt(i) = xdt(i).add.one
+ end do
+ r = r + 1.0
+ i4 = i4 + 1
+ i8 = i8 + 1
+ r4 = r4 + 1.0
+ do i = 1, 7
+ call dp_add (dp(i), 1.0d0)
+ end do
+!$omp end parallel
+ if (any (xdt%r .ne. r)) call abort
+ if (any (i4.ne.r).or.any(i8.ne.r)) call abort
+ if (any(r4.ne.r).or.any(dp.ne.r)) call abort
+ deallocate (xdt, i4, i8, r4, dp)
+end program udr6
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr7.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr7.f90
new file mode 100644
index 000000000..42be00c3a
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr7.f90
@@ -0,0 +1,46 @@
+! { dg-do run }
+
+program udr7
+ implicit none
+ interface
+ elemental subroutine omp_priv (x, y, z)
+ real, intent (in) :: x
+ real, intent (inout) :: y
+ real, intent (in) :: z
+ end subroutine omp_priv
+ elemental real function omp_orig (x)
+ real, intent (in) :: x
+ end function omp_orig
+ end interface
+!$omp declare reduction (omp_priv : real : &
+!$omp & omp_priv (omp_orig (omp_in), omp_out, 1.0)) &
+!$omp & initializer (omp_out (omp_priv, omp_in (omp_orig)))
+ real :: x (2:4, 1:1, -2:0)
+ integer :: i
+ x = 0
+!$omp parallel do reduction (omp_priv : x)
+ do i = 1, 64
+ x = x + i
+ end do
+ if (any (x /= 2080.0)) call abort
+contains
+ elemental subroutine omp_out (x, y)
+ real, intent (out) :: x
+ real, intent (in) :: y
+ x = y - 4.0
+ end subroutine omp_out
+ elemental real function omp_in (x)
+ real, intent (in) :: x
+ omp_in = x + 4.0
+ end function omp_in
+end program udr7
+elemental subroutine omp_priv (x, y, z)
+ real, intent (in) :: x
+ real, intent (inout) :: y
+ real, intent (in) :: z
+ y = y + (x - 4.0) + (z - 1.0)
+end subroutine omp_priv
+elemental real function omp_orig (x)
+ real, intent (in) :: x
+ omp_orig = x + 4.0
+end function omp_orig
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr8.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr8.f90
new file mode 100644
index 000000000..9ef48a5c7
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr8.f90
@@ -0,0 +1,46 @@
+! { dg-do run }
+
+module udr8m1
+ integer, parameter :: a = 6
+ integer :: b
+!$omp declare reduction (foo : integer : omp_out = omp_out + omp_in)
+!$omp declare reduction (.add. : integer : &
+!$omp & omp_out = omp_out .add. iand (omp_in, -4)) &
+!$omp & initializer (omp_priv = 3)
+ interface operator (.add.)
+ module procedure f1
+ end interface
+contains
+ integer function f1 (x, y)
+ integer, intent (in) :: x, y
+ f1 = x + y
+ end function f1
+end module udr8m1
+module udr8m2
+ use udr8m1
+ type dt
+ integer :: x
+ end type
+!$omp declare reduction (+ : dt : omp_out = omp_out + omp_in) &
+!$omp & initializer (omp_priv = dt (0))
+ interface operator (+)
+ module procedure f2
+ end interface
+contains
+ type(dt) function f2 (x, y)
+ type(dt), intent (in) :: x, y
+ f2%x = x%x + y%x
+ end function f2
+end module udr8m2
+ use udr8m2
+ integer :: i, j
+ type(dt) :: d
+ j = 3
+ d%x = 0
+!$omp parallel do reduction (.add.: j) reduction (+ : d)
+ do i = 1, 100
+ j = j.add.iand (i, -4)
+ d = d + dt(i)
+ end do
+ if (d%x /= 5050 .or. j /= 4903) call abort
+end
diff --git a/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr9.f90 b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr9.f90
new file mode 100644
index 000000000..a4fec1337
--- /dev/null
+++ b/gcc-4.9/libgomp/testsuite/libgomp.fortran/udr9.f90
@@ -0,0 +1,65 @@
+! { dg-do run }
+
+module udr9m1
+ integer, parameter :: a = 6
+ integer :: b
+!$omp declare reduction (foo : integer : combiner1 (omp_out, omp_in)) &
+!$omp & initializer (initializer1 (omp_priv, omp_orig))
+!$omp declare reduction (.add. : integer : &
+!$omp & combiner1 (omp_out, omp_in)) &
+!$omp & initializer (initializer1 (omp_priv, omp_orig))
+ interface operator (.add.)
+ module procedure f1
+ end interface
+contains
+ integer function f1 (x, y)
+ integer, intent (in) :: x, y
+ f1 = x + y
+ end function f1
+ elemental subroutine combiner1 (x, y)
+ integer, intent (inout) :: x
+ integer, intent (in) :: y
+ x = x + iand (y, -4)
+ end subroutine
+ subroutine initializer1 (x, y)
+ integer :: x, y
+ if (y .ne. 3) call abort
+ x = y
+ end subroutine
+end module udr9m1
+module udr9m2
+ use udr9m1
+ type dt
+ integer :: x
+ end type
+!$omp declare reduction (+ : dt : combiner2 (omp_in, omp_out)) &
+!$omp & initializer (initializer2 (omp_priv))
+ interface operator (+)
+ module procedure f2
+ end interface
+contains
+ type(dt) function f2 (x, y)
+ type(dt), intent (in) :: x, y
+ f2%x = x%x + y%x
+ end function f2
+ subroutine combiner2 (x, y)
+ type(dt) :: x, y
+ y = y + x
+ end subroutine combiner2
+ subroutine initializer2 (x)
+ type(dt), intent(out) :: x
+ x%x = 0
+ end subroutine initializer2
+end module udr9m2
+ use udr9m2
+ integer :: i, j
+ type(dt) :: d
+ j = 3
+ d%x = 0
+!$omp parallel do reduction (.add.: j) reduction (+ : d)
+ do i = 1, 100
+ j = j.add.iand (i, -4)
+ d = d + dt(i)
+ end do
+ if (d%x /= 5050 .or. j /= 4903) call abort
+end