aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.3.1/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.3.1/contrib')
-rw-r--r--gcc-4.3.1/contrib/ChangeLog1297
-rw-r--r--gcc-4.3.1/contrib/ChangeLog.tree-ssa46
-rwxr-xr-xgcc-4.3.1/contrib/analyze_brprob146
-rwxr-xr-xgcc-4.3.1/contrib/check_warning_flags.sh136
-rwxr-xr-xgcc-4.3.1/contrib/compare-debug76
-rwxr-xr-xgcc-4.3.1/contrib/compareSumTests3250
-rwxr-xr-xgcc-4.3.1/contrib/compare_tests111
-rwxr-xr-xgcc-4.3.1/contrib/dg-cmp-results.sh201
-rw-r--r--gcc-4.3.1/contrib/dglib.pm424
-rwxr-xr-xgcc-4.3.1/contrib/download_ecj25
-rwxr-xr-xgcc-4.3.1/contrib/filter_gcc_for_doxygen12
-rwxr-xr-xgcc-4.3.1/contrib/filter_knr2ansi.pl45
-rwxr-xr-xgcc-4.3.1/contrib/filter_params.pl14
-rw-r--r--gcc-4.3.1/contrib/gcc.doxy1267
-rwxr-xr-xgcc-4.3.1/contrib/gcc_build325
-rwxr-xr-xgcc-4.3.1/contrib/gcc_update277
-rw-r--r--gcc-4.3.1/contrib/gccbug.el84
-rwxr-xr-xgcc-4.3.1/contrib/gennews62
-rw-r--r--gcc-4.3.1/contrib/gthr_supp_vxw_5x.c92
-rwxr-xr-xgcc-4.3.1/contrib/index-prop26
-rw-r--r--gcc-4.3.1/contrib/paranoia.cc2714
-rwxr-xr-xgcc-4.3.1/contrib/patch_tester.sh450
-rwxr-xr-xgcc-4.3.1/contrib/prepare_patch.sh96
-rw-r--r--gcc-4.3.1/contrib/reghunt/ChangeLog28
-rw-r--r--gcc-4.3.1/contrib/reghunt/README16
-rwxr-xr-xgcc-4.3.1/contrib/reghunt/reg_periodic171
-rwxr-xr-xgcc-4.3.1/contrib/reghunt/reg_search300
-rwxr-xr-xgcc-4.3.1/contrib/reghunt/reg_test_template41
-rw-r--r--gcc-4.3.1/contrib/regression/ChangeLog138
-rw-r--r--gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Default.pngbin0 -> 81 bytes
-rw-r--r--gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Icon.pngbin0 -> 2122 bytes
-rw-r--r--gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Info.plist32
-rw-r--r--gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/de.lproj/InfoPlist.stringsbin0 -> 126 bytes
-rw-r--r--gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/en.lproj/InfoPlist.stringsbin0 -> 96 bytes
-rw-r--r--gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/ja.lproj/InfoPlist.stringsbin0 -> 74 bytes
-rw-r--r--gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/widget.html153
-rw-r--r--gcc-4.3.1/contrib/regression/README24
-rwxr-xr-xgcc-4.3.1/contrib/regression/btest-gcc.sh232
-rwxr-xr-xgcc-4.3.1/contrib/regression/objs-gcc.sh127
-rw-r--r--gcc-4.3.1/contrib/regression/site.exp18
-rwxr-xr-xgcc-4.3.1/contrib/test_installed121
-rwxr-xr-xgcc-4.3.1/contrib/test_summary156
-rwxr-xr-xgcc-4.3.1/contrib/texi2pod.pl495
-rwxr-xr-xgcc-4.3.1/contrib/uninclude52
-rwxr-xr-xgcc-4.3.1/contrib/warn_summary214
45 files changed, 10494 insertions, 0 deletions
diff --git a/gcc-4.3.1/contrib/ChangeLog b/gcc-4.3.1/contrib/ChangeLog
new file mode 100644
index 000000000..6d072141f
--- /dev/null
+++ b/gcc-4.3.1/contrib/ChangeLog
@@ -0,0 +1,1297 @@
+2008-06-06 Release Manager
+
+ * GCC 4.3.1 released.
+
+2008-04-01 Joseph Myers <joseph@codesourcery.com>
+
+ * texi2pod.pl: Handle @samp and @url inside verbatim blocks.
+ Handle @heading. Handle enumerated lists with starting numbers
+ and extra headings.
+
+2008-03-05 Release Manager
+
+ * GCC 4.3.0 released.
+
+2008-03-05 Jakub Jelinek <jakub@redhat.com>
+
+ * gennews (files): Reference GCC 4.3 web pages.
+
+2008-02-15 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
+
+ * check_warning_flags.sh: New file.
+
+2008-01-25 Joseph Myers <joseph@codesourcery.com>
+
+ * paranoia.cc (main): Remove handling of c4x_single and
+ c4x_extended formats.
+
+2007-12-26 Sebastian Pop <sebastian.pop@amd.com>
+
+ * compareSumTests3: Changed to GPLv3.
+ * prepare_patch.sh: Same.
+ * uninclude: Same.
+ * dglib.pm: Same.
+ * gennews: Same.
+ * texi2pod.pl: Same.
+ * analyze_brprob: Same.
+ * gcc_build: Same.
+
+2007-12-26 Sebastian Pop <sebastian.pop@amd.com>
+
+ * patch_tester.sh: Changed to GPLv3.
+
+2007-12-15 Sebastian Pop <sebastian.pop@amd.com>
+
+ * patch_tester.sh: Don't save the script anymore.
+
+2007-12-15 Sebastian Pop <sebastian.pop@amd.com>
+
+ * patch_tester.sh: New.
+ * prepare_patch.sh: New.
+
+2007-11-26 Alexandre Oliva <aoliva@redhat.com>
+
+ * compare-debug: Introduce -p flag to preserve .stripped files.
+
+2007-10-08 Manuel Lopez-Ibanez <manu@gcc.gnu.org>
+
+ * texi2pod.pl: Handle @asis.
+ (postprocess): Move @gccoptlist{} after all formatting commands.
+
+2007-10-01 Alexandre Oliva <aoliva@redhat.com>
+
+ * compare-debug: Avoid spurious errors when .stripped files
+ exist.
+
+2007-09-22 Hans-Peter Nilsson <hp@axis.com>
+
+ * warn_summary (srcdirFilter): Add fixincludes, sim,
+ newlib and mpfr.
+
+2007-08-23 H.J. Lu <hongjiu.lu@intel.com>
+
+ * gcc_update: Handle different URL paths and tags.
+
+2007-08-16 Alexandre Oliva <aoliva@redhat.com>
+
+ * compare-debug: New.
+
+2007-08-16 H.J. Lu <hongjiu.lu@intel.com>
+ Andreas Schwab <schwab@suse.de>
+
+ * gcc_update: Use "svn info" for revision number. Create
+ gcc/REVISION with branch name and revision number.
+
+2007-08-10 Diego Novillo <dnovillo@google.com>
+
+ * gcc.doxy: Rename from tree-ssa.doxy.
+
+2007-08-10 Diego Novillo <dnovillo@google.com>
+
+ * tree-ssa.doxy: Update for doxygen 1.5.
+ Include all the files in the middle and back ends.
+
+2007-06-30 Hans-Peter Nilsson <hp@axis.com>
+
+ * gcc_update (files_and_dependencies): Handle
+ gcc/testsuite/gcc.dg/cpp/direct2s.c.
+
+2007-05-13 Mark Mitchell <mark@codesourcery.com>
+
+ * gennews (files): Reference GCC 4.2 web pages.
+
+2007-04-04 Zack Weinberg <zack@mrtock.ucsd.edu>
+
+ * texi2pod.pl: Correct handling of @itemize with no argument.
+
+2007-03-17 Hans-Peter Nilsson <hp@axis.com>
+
+ * uninclude: New utility, from Alexandre Oliva.
+
+2007-02-26 Dominique Dhumieres <dominiq@lps.ens.fr>
+
+ * test_installed: Adjust to the move from g77 to gfortran.
+
+2007-02-16 Matthias Klose <doko@debian.org>
+
+ * texi2pod.pl: Handle @subsubsection, ignore @anchor.
+
+2007-02-10 Hans-Peter Nilsson <hp@axis.com>
+
+ * test_summary (configflags): Adjust for changes in the
+ config.status format due to update to autoconf 2.59.
+
+2007-02-09 Daniel Jacobowitz <dan@codesourcery.com>
+
+ * gcc_update: Update for configure.in -> configure.ac.
+
+2007-02-06 Richard Sandiford <richard@codesourcery.com>
+
+ * texi2pod.pl: Handle @multitable.
+
+2007-01-15 Tom Tromey <tromey@redhat.com>
+
+ * download_ecj: New file.
+
+2006-09-27 Matthew Sachs <msachs@apple.com>
+
+ * compareSumTests3: POD syntax fix.
+
+2006-09-27 Matthew Sachs <msachs@apple.com>
+
+ * dglib.pm: Initial commit.
+ * compareSumTests3: Initial commit.
+
+2006-09-18 Bernhard Fischer <aldot@gcc.gnu.org>
+
+ * dg-cmp-results.sh (compare-$$.awk): Print name[old]
+ and not non-existing nm[old].
+
+2006-07-06 Paul Eggert <eggert@cs.ucla.edu>
+
+ Port to hosts whose 'sort' and 'tail' implementations
+ treat operands with leading '+' as file names, as POSIX
+ has required since 2001. However, make sure the code still
+ works on pre-POSIX hosts.
+ * compare_tests: Don't assume "sort +2" is equivalent to
+ "sort -k 3", since POSIX 1003.1-2001 no longer requires this.
+
+2006-06-05 James Lemke <jwlemke@wasabisystems.com>
+
+ * dg-cmp-results.sh: New script for comparing DejaGNU logs.
+
+2006-06-02 Richard Earnshaw <rearnsha@arm.com>
+ Mike Stump <mrs@apple.com>
+
+ * compare_tests: Handle multilibs better.
+
+2006-05-31 Daniel Jacobowitz <dan@codesourcery.com>
+
+ * texi2pod.pl: Correct handling of absolute @include.
+
+2006-05-02 Daniel Jacobowitz <dan@codesourcery.com>
+
+ * texi2pod.pl: Handle -I.
+
+2006-02-28 Mark Mitchell <mark@codesourcery.com>
+
+ * gennews (files): Update for GCC 4.1.
+
+2006-01-24 Diego Novillo <dnovillo@redhat.com>
+
+ * gcc_update (files_and_dependencies): Handle
+ libgomp/testsuite/Makefile.in.
+
+2006-01-18 Richard Henderson <rth@redhat.com>
+ Diego Novillo <dnovillo@redhat.com>
+
+ * gcc_update (files_and_dependencies): Add libgomp files.
+
+2005-12-06 Ben Elliston <bje@au.ibm.com>
+
+ * newcvsroot: Remove.
+
+2005-11-03 Steven Bosscher <stevenb@suse.de>
+
+ * gcc_build: Fix my previous checking.
+
+2005-11-01 Joseph S. Myers <joseph@codesourcery.com>
+
+ * gcc_update: Include revision number in LAST_UPDATED.
+
+2005-10-30 Steven Bosscher <stevenb@suse.de>
+
+ * gcc_build: Use gcc.gnu.org as the default server. Set up
+ SVN_REPOSITORY correctly. Add support for checking out branches.
+
+2005-10-28 Andrew Pinski <pinskia@gcc.gnu.org>
+
+ * gcc_update: When svn update is called and
+ --silent is used, pass -q.
+
+2005-10-28 Andrew Pinski <pinskia@gcc.gnu.org>
+
+ * gcc_update: Remove the -q from svn invocation.
+
+2005-10-28 Andrew Pinski <pinskia@gcc.gnu.org>
+
+ * gcc_build: Remove -d from the call to gcc_update.
+
+2005-10-28 Daniel Berlin <dberlin@dberlin.org>
+ Ben Elliston <bje@au1.ibm.com>
+
+ * gcc_update: Update for svn.
+ * newcvsroot: Ditto.
+ * gcc_build: Ditto.
+
+2005-10-21 Mark Mitchell <mark@codesourcery.com>
+
+ * texi2pod.pl: Substitue for @value even when part of @include.
+
+2005-10-21 Bob Wilson <bob.wilson@acm.org>
+
+ * texi2pod.pl: Convert two single quotes or two backquotes to
+ double quotes.
+
+2005-08-23 Ben Elliston <bje@au.ibm.com>
+
+ * gcc_update: Update dependencies for libjava/aclocal.m4.
+
+2005-08-14 Kelley Cook <kcook@gcc.gnu.org>
+
+ * All files: Update with new FSF address.
+
+2005-07-14 Ben Elliston <bje@au.ibm.com>
+
+ * filter_params.pl: Typo fix.
+ * filter_knr2ansi.pl: Likewise.
+
+2005-06-05 Gerald Pfeifer <gerald@pfeifer.com>
+
+ * gennews (files): Update for egcs-1.0 release pages consolidation.
+
+2005-04-17 Joseph S. Myers <joseph@codesourcery.com>
+
+ * gennews (files): Update for GCC 4.0.
+
+2005-03-18 Alexandre Oliva <aoliva@redhat.com>
+
+ * gcc_update (silent): Unset instead of initializing to null.
+
+2005-03-18 Andreas Schwab <schwab@suse.de>
+
+ * gcc_update (apply_patch): Properly quote $1.
+
+2005-03-18 Zack Weinberg <zack@codesourcery.com>
+
+ * gcc_update: Add --silent option.
+
+2005-03-08 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * warn_summary (keywordFilter): Update sed pattern for new quoting
+ style in warnings.
+
+2005-03-07 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * warn_summary: Add -fortran subdir flag.
+ (subdirectoryFilter): Fix missing ada subdir.
+
+2004-11-20 Hans-Peter Nilsson <hp@bitrange.com>
+
+ * gcc_update (touch_files): Don't rely on "make" being GNU make;
+ grep for "Touching" to determine that the rule executes.
+
+2004-11-14 Hans-Peter Nilsson <hp@bitrange.com>
+
+ * gcc_update (touch_files): Explicitly pass --no-print-directory.
+
+2004-11-04 Andrew Pinski <pinskia@physics.uc.edu>
+
+ * gcc_update (boehm-gc/aclocal.m4): Remove boehm-gc/acinclude.m4.
+
+2004-08-04 Paolo Bonzini <bonzini@gnu.org>
+
+ * gcc_update: Add fixincludes.
+
+2004-08-26 Matthias Klose <doko@debian.org>
+
+ * texi2pod.pl: keep references of the form @ref{...}, print them bold.
+
+2004-08-26 Matthias Klose <doko@debian.org>
+
+ * test_summary: Include baseline used for libstdc++-v3 abi check
+ in test summary.
+
+2004-08-25 Ben Elliston <bje@au.ibm.com>
+
+ * gcc_update (libcpp/Makefile.in): Remove libcpp/Makefile.am.
+
+2004-08-20 Andreas Tobler <a.tobler@schweiz.ch>
+
+ * gcc_update (self): libjava/configure.in -> configure.ac. Automake
+ update done.
+
+2004-08-04 Paolo Bonzini <bonzini@gnu.org>
+
+ * gcc_update: libjava/configure.ac -> configure.in. Automake
+ 1.4 does not handle configure.ac properly.
+
+2004-08-04 Andreas Schwab <schwab@suse.de>
+
+ * gcc_update: libjava/configure.in -> configure.ac.
+
+2004-08-03 Richard Earnshaw <rearnsha@arm.com>
+
+ * gcc_update: gcc/config/arm/arm-tune.md -> arm-cores.def gentune.sh.
+
+2004-07-16 Jeff Law <law@redhat.com>
+
+ * analyze_brprob: Fix comments. More consistent output format.
+
+2004-06-17 Diego Novillo <dnovillo@redhat.com>
+
+ * tree-ssa.doxy: Update for doxygen 1.3.5.
+
+2004-06-14 Andreas Jaeger <aj@suse.de>
+
+ * gcc_update: boehm-gc/configure.in -> configure.ac.
+
+2004-06-16 Paolo Bonzini <bonzini@gnu.org>
+
+ * gcc_update (boehm-gc/configure): Depend on
+ boehm-gc/configure.ac instead of boehm-gc/configure.in
+
+2004-06-14 Paolo Bonzini <bonzini@gnu.org>
+
+ * gcc_update (libmudflap/configure): Depend on
+ libmudflap/configure.ac instead of libmudflap/configure.in
+
+2004-06-14 Andreas Jaeger <aj@suse.de>
+
+ * gcc_update: Add libcpp.
+
+2004-06-10 Andreas Jaeger <aj@suse.de>
+
+ * gcc_update: Add libgfortran.
+
+2004-05-29 Andrew Pinski <pinskia@physics.uc.edu>
+
+ * gcc_update (libbanshee/configure): Depend on
+ libbanshee/configure.ac instead of libbanshee/configure.in
+
+2004-05-17 Zack Weinberg <zack@codesourcery.com>
+
+ * gcc_update: Remove gcc/f/intdoc.texi and all libf2c files
+ from list of files to be touched.
+ * convert_to_f2c, convert_to_g2c, download_f2c: Delete.
+
+2004-05-15 Joseph S. Myers <jsm@polyomino.org.uk>
+
+ * gennews: Update for GCC 3.4.
+
+2004-05-13 Diego Novillo <dnovillo@redhat.com>
+
+ Merge from tree-ssa-20020619-branch.
+
+ * filter_gcc_for_doxygen: New file.
+ * filter_knr2ansi.pl: New file.
+ * filter_params.pl: New file.
+ * tree-ssa.doxy: New file.
+ * contrib/gcc_update (files_and_dependencies): Handle
+ libbanshee and libmudflap.
+
+2004-04-12 Kelley Cook <kcook@gcc.gnu.org>
+ Andreas Jaeger <aj@suse.de>
+
+ * gcc_update (files_and_dependencies): Insert zlib dependencies.
+
+2004-04-09 Nathanael Nerode <neroden@gcc.gnu.org>
+
+ * gcc_update (files_and_dependencies): libobjc/configure now
+ depends on configure.ac, not configure.in.
+
+2004-04-01 Kelley Cook <kcook@gcc.gnu.org>
+
+ * gcc_update (files_and_dependencies): Remove gcc/ada/*.texi.
+
+2004-03-16 Andreas Tobler <a.tobler@schweiz.ch>
+
+ * gcc_update (files_and_dependencies): Insert libffi dependencies.
+
+2004-03-10 Kelley Cook <kcook@gcc.gnu.org>
+
+ * gcc_update (files_and_dependencies): Insert libada/configure.
+
+2004-02-29 Andrew Pinski <pinskia@physics.uc.edu>
+
+ * gcc_update (files_and_dependencies): Update intl/configure.in
+ to intl/configure.ac.
+
+2004-01-09 Kelley Cook <kcook@gcc.gnu.org>
+
+ * gcc_update (files_and_dependencies): Update fastjar/configure.in
+ to fastjar/configure.ac
+
+2004-01-07 Kelley Cook <kcook@gcc.gnu.org>
+
+ * gcc_update (files_and_dependencies): Remove non-existant files.
+
+2004-01-05 Kelley Cook <kcook@gcc.gnu.org>
+
+ * gcc_update (files_generated): Revert 2003-12-18 change.
+
+2004-01-03 Andrew Pinski <pinskia@physics.uc.edu>
+
+ * gcc_update (files_and_dependencies): For gcc directory,
+ rename configure.in to configure.ac.
+
+2003-12-19 Andreas Tobler <a.tobler@schweiz.ch>
+
+ * gcc_update (files_and_dependencies): For libjava/libltdl directory,
+ remove acconfig.h. No longer used.
+
+2003-12-18 Kelley Cook <kcook@gcc.gnu.org>
+
+ * gcc_update (files_generated): Add in gcc/ada/stamp-xgnatug and update
+ gcc/ada/gnat_ug_* to use it.
+
+2003-12-16 James E Wilson <wilson@specifixinc.com>
+
+ * gcc_update (files_and_dependencies): For libjava/libltdl directory,
+ rename configure.in to configure.ac, and stamp-h.in to config-h.in.
+
+2003-12-11 Kelley Cook <kcook@gcc.gnu.org>
+
+ * gcc_update (files_and_dependencies): Correct typo in the filename
+ gnat_ug_wnt.texi.
+
+2003-12-08 Arnaud Charlet <charlet@act-europe.fr
+
+ * gcc_update: Remove handling of sinfo.h, einfo.h, nmake.ads,
+ treeprs.ads.
+
+2003-10-16 Mark Mitchell <mark@codesourcery.com>
+
+ * gennews (files): Add GCC 3.3 files.
+
+2003-08-21 Mark Mitchell <mark@codesourcery.com>
+
+ * gcc_build: Fix handling of -t option. Add -x option.
+
+2003-07-31 Matt Kraai <kraai@alumni.cmu.edu>
+
+ * texi2pod.pl: Remove extra line.
+
+2003-07-15 Matt Kraai <kraai@alumni.cmu.edu>
+
+ * texi2pod.pl: Default @itemize's parameter to @bullet.
+
+2003-07-12 Zack Weinberg <zack@codesourcery.com>
+
+ * gcc_update: gcc/acconfig.h no longer exists.
+
+2003-07-11 Matthias Klose <doko@debian.org>
+
+ * test_installed: Add options to run objc tests.
+ In generated site.exp, initialize rootme, CFLAGS, CXXFLAGS.
+
+2003-07-04 Zack Weinberg <zack@codesourcery.com>
+
+ * gcc_update: Remove gcc/intl/plural.c from list.
+ Add new generated files intl/plural.c, intl/configure,
+ intl/config.h.in.
+
+2003-06-13 Jason Thorpe <thorpej@wasabisystems.com>
+
+ * gcc_update (files_and_dependencies): Add
+ gcc/testsuite/gcc.dg/cpp/_Pragma3.c depends on
+ gcc/testsuite/gcc.dg/cpp/mi1c.h.
+
+2003-05-23 Nathanael Nerode <neroden@gcc.gnu.org>
+
+ * paranoia.cc: Fix spelling error.
+
+ * analyze_brprob, gcc_build, gennews, texi2pod.pl: Change GNU CC
+ to GCC.
+
+2003-04-04 Mike Stump <mrs@apple.com>
+
+ * compare_tests: Fix exit status and be more flexible with spacing.
+
+2003-03-08 Phil Edwards <pme@gcc.gnu.org>
+
+ * test_summary: Add -h, print existing comments as help.
+
+2003-03-03 H.J. Lu <hjl@gnu.org>
+
+ * gcc_build: Use $GCC_CVS/$CVS instead of cvs.
+ * gcc_update: Likewise.
+
+2003-01-19 Alexandre Oliva <aoliva@redhat.com>
+
+ * test_summary (configflags): Only use the first match. Remove
+ excess space. Use sub instead of gsub where possible. Use `none'
+ if no configure flags were given.
+
+2003-01-10 Loren J. Rittle <ljrittle@acm.org>
+
+ * test_summary (configflags): awk portability.
+
+2003-01-07 Alexandre Oliva <aoliva@redhat.com>
+
+ * test_summary (configflags): Compute correctly with both
+ autoconfiscated and Cygnus-style top-level.
+
+2003-01-03 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gcc_update (files_and_dependencies): Add gcc/cp/cfns.h depends
+ on gcc/cp/cfns.gperf and gcc/java/keyword.h depends on
+ gcc/java/keyword.gperf
+
+2002-12-29 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * gcc_update (files_and_dependencies): Add gcc/intl/plural.c as
+ depending on gcc/intl/plural.y.
+
+2002-12-28 Alexandre Oliva <aoliva@redhat.com>
+
+ * gcc_update (configure): Depend on config/acx.m4 as well.
+
+ * gcc_update (configure): Depend on configure.in.
+
+2002-12-24 Joseph S. Myers <jsm@polyomino.org.uk>
+
+ * texi2pod.pl: Skip contents of @copying.
+
+2002-11-09 Zack Weinberg <zack@codesourcery.com>
+
+ * gthr_supp_vxw_5x.c: New file.
+
+2002-10-21 Richard Henderson <rth@redhat.com>
+
+ * paranoia.cc (real_c_float::image): Accomodate size of
+ real_internal format.
+ (main): Unbuffer stdio. Add real_internal format.
+ (floor_log2_wide): New.
+
+2002-10-16 Richard Henderson <rth@redhat.com>
+
+ * paranoia.cc (ENUM_BITFIELD): New.
+ (class): Define as klass around real.h.
+ (real_c_float): Not a template any longer; define MODE as a
+ class static constant; use real_format elements for SIZE.
+ Update uses of real_to_decimal and real_to_hexadecimal.
+ (main): Change -g argument to use a format name.
+ (mode_for_size): Remove.
+
+2002-09-16 Richard Henderson <rth@redhat.com>
+
+ * paranoia.cc: New file.
+
+2002-09-04 Richard Henderson <rth@redhat.com>
+
+ * enquire.c: Remove.
+
+2002-08-14 Mark Mitchell <mark@codesourcery.com>
+
+ * gennews (files): Add GCC 3.2 files.
+
+2002-07-22 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * gennews (files): Add proper files for the GCC 3.1 release series.
+ Simplify and reformat introductory wording.
+
+2002-07-16 Nathanael Nerode <neroden@gcc.gnu.org>
+
+ * enquire.c: Move from gcc.
+
+2002-07-16 H.J. Lu <hjl@gnu.org>
+
+ * gcc_update (touch_files): Pass -s to ${MAKE}.
+
+2002-06-26 Zack Weinberg <zack@codesourcery.com>
+
+ * texi2pod.pl: Correct handling of the internal R<> sequence.
+
+2002-05-31 Florian Weimer <fw@deneb.enyo.de>
+
+ * gcc_update (files_and_dependencies): Add generated Texinfo files
+ for Ada.
+
+2002-05-16 Rainer Orth <ro@TechFak.Uni-Bielefeld.DE>
+
+ * test_installed: Allow for PWDCMD to override hardcoded pwd.
+ * test_summary: Likewise.
+
+2002-04-18 Geoffrey Keating <geoffk@redhat.com>
+
+ * gcc_update (touch_files): Don't have empty for loop.
+
+2002-04-16 Richard Henderson <rth@redhat.com>
+
+ * gcc_update: Remove libchill.
+
+2002-03-25 Zack Weinberg <zack@codesourcery.com>
+
+ * texi2pod.pl: Handle @end ftable and @end vtable.
+
+2002-03-11 Zack Weinberg <zack@codesourcery.com>
+
+ * texi2pod.pl: Handle @include, @ftable, @vtable.
+ Reformat some code for clarity.
+
+2002-02-24 Christian Jönsson <c.christian.joensson@telia.com>
+
+ * test_summary: Additional to XPASS and FAIL, add UNRESOLVED,
+ WARNING and ERROR output from the *.sum files.
+
+2002-01-26 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * gennews (files): Replace gcc-2.95/gcc-2.95.html,
+ gcc-2.95/gcc-2.95.1.html, gcc-2.95/gcc-2.95.2.html,
+ and gcc-2.95/gcc-2.95.3.html by gcc-2.95/index.html.
+
+2002-01-16 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * gennews (files): Replace egcs-1.1/egcs-1.1.2.html,
+ egcs-1.1/egcs-1.1.1.html, and egcs-1.1/egcs-1.1.html by
+ egcs-1.1/index.html.
+
+2002-01-01 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * warn_summary: Add -ada subdirectory flag.
+
+2001-12-12 Matthias Klose <doko@debian.org>
+
+ * texi2pod.pl: Merge changes from binutils' texi2pod.pl. Allows
+ generation of more than one man page from one source.
+ Add '-' to set of valid chars for an identifier.
+ Let -D option accept flags of the form <flag>=<value>.
+ Use \s+ for whitespace detection in '@c man' lines.
+ Handle @set and @clear independent of $output.
+ Substitute all @value{}'s in a line.
+
+2001-11-14 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * texi2pod.pl: Handle @ifnottex, @iftex and @display. Handle @var
+ in verbatim blocks specially. Handle @unnumbered, @unnumberedsec
+ and @center. Allow [a-z] after @enumerate. Handle 0 and numbers
+ greater than 9 in enumerations.
+
+2001-11-07 Laurent Guerby <guerby@acm.org>
+
+ * gcc_update (files_and_dependencies): Add Ada dependencies.
+
+2001-10-08 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * snapshot: Remove.
+
+2001-09-07 Richard Sandiford <rsandifo@redhat.com>
+
+ Revert:
+ * gcc_update: (files_and_dependencies) Add gcc/java/parse.c
+ gcc/java/parse-scan.c and gcc/java/keyword.h to list of files to
+ touch.
+
+2001-09-04 David.Billinghurst <David.Billinghurst@riotinto.com>
+
+ * gcc_update: (files_and_dependencies) Add gcc/java/parse.c
+ gcc/java/parse-scan.c and gcc/java/keyword.h to list of files to
+ touch.
+
+Mon Aug 13 02:29:08 CEST 2001 Jan Hubicka <jh@suse.cz>
+
+ * analyze_brprob: Update for changes in gcc debug output.
+
+Sat Jul 28 22:37:49 CEST 2001 Jan Hubicka <jh@suse.cz>
+
+ * analyze_brprob: Avoid more overflows.
+
+2001-07-27 Richard Henderson <rth@redhat.com>
+
+ * test_summary: Copy LAST_UPDATED UTC time to head of summary.
+
+Fri Jul 27 18:01:21 CEST 2001 Jan Hubicka <jh@suse.cz>
+
+ * analyze_brprob: Avoid overflows.
+
+2001-07-27 Richard Henderson <rth@redhat.com>
+
+ * gcc_update: Dump timestamp in LAST_UPDATED.
+
+2001-07-26 Andreas Jaeger <aj@suse.de>,
+ Hans-Peter Nilsson <hp@bitrange.com>
+
+ * analyze_brprob: Fix documentation.
+
+Mon Jul 23 15:47:19 CEST 2001 Jan Hubicka <jh@suse.cz>
+
+ * analyze_brprob: Fix awk compatibility problems; update comment.
+
+2001-07-23 Andreas Jaeger <aj@suse.de>
+
+ * analyze_brprob: Fix more typos.
+
+2001-07-23 Andreas Jaeger <aj@suse.de>
+
+ * analyze_brprob: Fix typos.
+
+2001-07-03 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * texi2pod.pl: Handle @r inside @item.
+
+2001-07-02 Zack Weinberg <zackw@stanford.edu>
+
+ * gcc_update: Remove entries for gcc.1, cpp.1, gcov.1.
+
+2001-07-01 Zoltan Felleg <zfelleg@telnet.hu>
+
+ * warn_summary: Fix typo in a comment.
+
+2001-06-14 Albert Chin-A-Young <china@thewrittenword.com>
+
+ * contrib/gcc_update: Fix timestamp on gcc/f/intdoc.texi.
+
+2001-06-13 Mark Mitchell <mark@codesourcery.com>
+
+ * gennews: Set TERM to vt100 for Lynx.
+
+2001-06-13 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * release: Remove.
+
+Tue Jun 12 12:21:40 CEST 2001 Jan Hubicka <jh@suse.cz>
+
+ * analyze_brprob: New file.
+
+2001-06-11 Mark Mitchell <mark@codesourcery.com>
+
+ * gcc_build: Output information about the commands used to
+ configure the compiler.
+
+2001-06-07 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * gennews: Update for GCC 3.0.
+
+2001-06-02 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * gcc_update: Update for move of documentation to gcc/doc.
+
+2001-05-28 Rainer Orth <ro@TechFak.Uni-Bielefeld.DE>
+
+ * contrib/test_summary (files): Sort before evaluating.
+
+2001-05-23 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * gcc_update (UPDATE_OPTIONS): Add -d to the default settings.
+
+2001-05-21 Mark Mitchell <mark@codesourcery.com>
+
+ * gcc_build: Use -d when invoking gcc_update.
+
+2001-05-18 Andreas Jaeger <aj@suse.de>
+
+ * gcc_update: Add rules for libf2c/libI77.
+
+2001-05-17 Alexandre Oliva <aoliva@redhat.com>
+
+ * gcc_update (touch_files): Use simpler, yet as portable, syntax.
+
+2001-05-14 Loren J. Rittle <ljrittle@acm.org>
+
+ * gcc_update (touch_files): Enhance make portability.
+
+2001-05-14 Alexandre Oliva <aoliva@redhat.com>
+
+ * gcc_update (touch_files): Use a Makefile to touch files.
+
+2001-05-03 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * texi2pod.pl: Add copyright and GPL notices.
+
+2001-01-24 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * texi2pod.pl: Handle "\,".
+
+2001-01-15 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * texi2pod.pl: Fix regular expression for @r to avoid exponential
+ recursion. From Russ Allbery <rra@stanford.edu>. Remove perl
+ version check.
+
+2001-01-15 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * warn_summary (stageNfilter): Update for recent changes in
+ bootstrap logic.
+
+2001-01-14 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * texi2pod.pl: Require at least perl 5.6.0.
+
+2001-01-13 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * gcc_update: Add gcc/gcc.1 to generated files.
+ * texi2pod.pl: Handle @r and @gccoptlist. Handle @gol. Handle
+ discarding to end of sentence with @xref where the sentence has an
+ interior "." in markup, and handle discarding parentheses around
+ such a sentence.
+
+2001-01-11 Bernd Schmidt <bernds@redhat.com>
+
+ * gennews: Add gcc-2.95.3.
+
+2001-01-10 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * texi2pod.pl: Handle @gcctabopt and @env in tables. Handle
+ @command. Format URLs and email addresses in bold.
+
+2001-01-03 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * gcc_update: Add cpp.1 to the list of generated files.
+
+ * texi2pod.pl: Handle @option and @env.
+
+2001-01-03 Mike Stump <mrs@wrs.com>
+
+ * snapshot: Update to account for java libraries.
+
+2000-12-28 Jeffrey Oldham <oldham@codesourcery.com>
+
+ * test_summary: Export filesuffix, not fileprefix.
+
+2000-12-22 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * release: Change some EGCS references to GCC.
+
+2000-12-14 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * warn_summary: Fix subdirectory filtering. Add -intl and -fixinc
+ subdirectory flags. Add source directory prefix filtering.
+ Redirect diagnostic output to stderr.
+
+2000-12-07 Zack Weinberg <zack@wolery.stanford.edu>
+
+ * texi2pod.pl: If multiple @c man sections with the same tag
+ appear, concatenate them in the final output. When skipping,
+ ignore block commands that can't cause skipping, and honor
+ those that can. Ensure that verbatim blocks are separate
+ paragraphs.
+
+2000-12-07 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * gcc_update: Don't touch tradcif.c or java/parse.h.
+
+2000-12-05 Zack Weinberg <zack@wolery.stanford.edu>
+
+ * texi2pod.pl: Restructure for comprehensibility, add
+ comments. Merge handling of @ignore and @ifxxx. Handle a
+ whole bunch more Texinfo commands. Use consistent formatting
+ style.
+
+2000-12-04 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * gennews: New script.
+
+2000-11-22 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * gcc_update: Update a comment as we now require bison for CVS users.
+
+2000-11-22 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * gcc_update: Add gcov.1 to the list of generated files.
+
+2000-11-21 Mark Mitchell <mark@codesourcery.com>
+
+ * gcc_build: Remove code to put information in a log file.
+
+2000-11-19 Zack Weinberg <zackw@stanford.edu>
+
+ * texi2pod.pl:
+ - Add real command line parsing.
+ - Support @ifset, @ifclear, @set, @value, -D switch.
+ - Support @sc. Improve handling of @ref and friends.
+ - Discard @subsection, @need, @node lines.
+ - Un-nest font changes to match texinfo semantics.
+ - Handle @{ and @}. Oops.
+ - Don't emit E<> directives inside verbatim blocks.
+
+2000-11-12 Bruce Korb <bkorb@gnu.org>
+
+ * release: generalize the release script a bit.
+
+Sat Nov 11 17:29:03 2000 Mark P Mitchell <mark@codesourcery.com>
+
+ * gcc_build: Add -o option for setting the objdir to use.
+
+2000-11-11 Jeff Law <law@redhat.com>,
+
+ * release: New file.
+
+2000-11-08 Jeff Law <law@redhat.com>,
+ Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * snapshot: New file.
+
+2000-11-08 Jeffrey Oldham <oldham@oz.codesourcery.com>
+
+ * gcc_build (bootstrap_gcc): New function.
+ (configure_gcc): Likewise.
+ (build_gcc): Rewritten to use configure and bootstrap.
+ (MAKE_BOOTSTRAP_OPTIONS): Replaced MAKE_OPTIONS.
+
+2000-10-31 Mark Mitchell <mark@codesourcery.com>
+
+ * gcc_build (MAKE): New variable.
+ (build_gcc): Use it. Fix logging of errors.
+ (install_gcc): Likewise.
+
+2000-10-29 Mark Mitchell <mark@codesourcery.com>
+
+ * gcc_build: Save the output from CVS into the logfile as well.
+
+2000-10-25 Mark Mitchell <mark@codesourcery.com>
+
+ * gcc_build: Fix typos.
+
+2000-10-14 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * gperf-2.7-19981006.pat: Remove.
+
+2000-10-08 Joseph S. Myers <jsm28@cam.ac.uk>
+
+ * test_installed: Change EGCS references to refer to GCC.
+
+2000-09-28 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * gcc_update (touch_files): Add some informative output.
+
+2000-09-16 Andreas Jaeger <aj@suse.de>
+
+ * gcc_update: Remove gcc/c-parse.gperf.
+
+2000-08-30 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>,
+ Alexandre Oliva <aoliva@redhat.com>
+
+ * gcc_update: Execute touch_files_reexec even if `cvs update`
+ failed.
+
+2000-08-09 Alexandre Oliva <aoliva@redhat.com>
+
+ * test_summary: AWK breaks with filenames containing `='.
+
+Sun Jul 16 12:04:33 2000 Mark P Mitchell <mark@codesourcery.com>
+
+ * gcc_build: New script.
+
+2000-07-13 Mark Mitchell <mark@codesourcery.com>
+
+ * gcc_update (files_and_dependencies): Remove generated YACC files.
+
+2000-07-12 Mark Mitchell <mark@codesourcery.com>
+
+ * gcc_update (files_and_dependencies): Remove generated YACC files.
+
+2000-06-04 Mark Mitchell <mark@codesourcery.com>
+
+ * newcvsroot: Handle filenames that contain spaces.
+
+2000-06-03 Zack Weinberg <zack@wolery.cumb.org>
+
+ * test_summary: In generated script, use cat <<'EOF' not cat <<\EOF.
+ Elide --with-gcc-version-trigger and --norecursion from
+ configure flags. Remove code to report status of haifa scheduler.
+
+2000-05-18 Alexandre Oliva <aoliva@cygnus.com>
+
+ * gcc_update (self): Set to `$0'.
+ (touch_files_reexec): Use `$self' instead of `$0'.
+
+2000-05-12 Alexandre Oliva <aoliva@cygnus.com>
+
+ * gcc_update (touch_files_reexec): New function, run after the
+ tree is modified.
+
+2000-05-08 Richard Hendeson <rth@cygnus.com>
+
+ * gcc_update: Remove references to inclhack.tpl.
+
+2000-04-28 Jason Merrill <jason@casey.cygnus.com>
+
+ * index-prop: Use a single pattern. Also support *** cdiffs.
+
+2000-04-28 Pavel Roskin <pavel_roskin@geocities.com>
+
+ * index-prop: Don't change /dev/null.
+
+2000-04-27 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * gcc_update: Refer to GCC and gcc_update instead of egcs and
+ egcs_update.
+
+2000-04-26 Jonathan Larmour <jlarmour@redhat.co.uk>
+
+ * index-prop: Fix occasional problem when using cvs diff -p.
+
+2000-04-18 Zack Weinberg <zack@wolery.cumb.org>
+
+ * gcc_update: Remove references to cexp.c/cexp.y.
+
+1999-12-18 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * newcvsroot: Add check on the number of command-line arguments.
+ Add usage.
+
+Sun Nov 28 00:41:44 1999 William Bader (william@nscs.fast.net)
+
+ * gcc_update: Allow patches compressed by bzip2.
+
+1999-10-11 Martin v. Löwis <loewis@informatik.hu-berlin.de>
+
+ * newcvsroot: New file.
+
+1999-09-11 Craig Burley <craig@jcb-sc.com>
+
+ * convert_to_f2c, convert_to_g2c, download_f2c: New file.
+
+1999-08-16 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * gcc_update: New file.
+ * egcs_update: Renamed to gcc_update.
+
+1999-08-09 Robert Lipe <robertlipe@usa.net>
+
+ * test_summary: Quote curly braces in 1999-07-03 change.
+
+1999-07-28 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * egcs_update (files_and_dependencies): Fixed typo in
+ gcc/cstamp-h.in. Added gcc/config.in.
+
+1999-07-27 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * egcs_update (files_and_dependencies): New function, with
+ complete list of files to be updated, as well as their
+ dependencies.
+ (touch_files): Check the timestamp of each generated file against
+ its dependencies'.
+ (main): New flags --touch, --list and --help. Remove the
+ pre-update step.
+
+1999-07-17 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_summary: Replace egcs with gcc. Update e-mail address.
+
+1999-07-05 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+ Jerry Quinn <jquinn@nortelnetworks.com>
+
+ * egcs_update (touch_files, apply_patch): New functions.
+ Use them. New command-line option --patch. Split test of local
+ tree into two parts. Add comments.
+
+1999-07-03 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_summary: If Target is `unix{*}', append the Target variants
+ to Host.
+
+1999-06-12 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_summary: Set default mail-address and version for egcs
+ instead of relying on unpredictable pathnames.
+ Reported by Andreas Jaeger <aj@arthur.rhein-neckar.de>
+
+Fri Apr 2 16:09:02 1999 Jeffrey A Law (law@cygnus.com)
+
+ * fixinc/*: Delete obsolete files.
+
+1999-02-04 Robert Lipe <robertlipe@usa.net>
+
+ * egcs_update: Test return values of 'cvs update'. Propogate
+ to caller as exit values.
+
+1999-01-25 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * egcs_update: Use "if" instead of "&&". Touch generated files
+ only after the corresponding *.y files.
+
+1999-01-19 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * egcs_update: Do not use xargs, but a backquote construct.
+
+1999-01-07 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_summary (version): Remove carriage return that gawk inserts
+ in the version string for some reason.
+
+1998-11-30 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * egcs_update: Only touch files that already exist.
+
+1998-11-29 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_summary (EOF): Remove double backslash.
+ Reported by Franz Sirl <Franz.Sirl-kernel@lauterbach.com>
+
+1998-11-28 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_summary (address): Set to egcs-testresults mailing list.
+
+1998-11-27 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_summary (address): Added Marc Lehmann's testsuite-results
+ to the default e-mail address.
+
+1998-11-25 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_summary (-p, prepend_logs): Add these before the summary.
+ (Compiler, Platform): Print these just before configflags.
+
+Sat Oct 31 10:53:40 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * warn_summary (longLineFilter): New shell function to encapsulate
+ this functionality. It is off by default, only active if -llf
+ flag is specified.
+ (subdirectoryFilter): Fix bug in filtering which made some
+ subdirectory warnings erroneously appear in the toplevel set.
+ (stageNfilter): Renamed from `stageNwarns'. Updated to collect
+ warnings from stage1 as well as stage0, which means warnings from
+ outside the bootstrap directory. Eg, the libraries, etc.
+ (warningFilter): New shell function to encapsulate this
+ functionality.
+ (keywordFilter): New shell function to encapsulate this
+ functionality.
+
+ Store data in a temp file rather than calculating it 3x. Arrange
+ to remove it on exit and signals.
+
+ Add -pass/-wpass flags to do "pass through" (i.e. manual
+ inspection) of bootstrap output from a particular stageN as well
+ as language subdirs.
+
+ Add better comments/documentation.
+
+Sat Oct 31 16:39:31 1998 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * egcs_update: Add comment about keeping the FAQ synchronized.
+
+Fri Oct 30 00:39:27 1998 Jeffrey A Law (law@cygnus.com)
+
+ * egcs_update: Do touch java/parse.c and java/parse-scan.c. They're
+ in the repo again.
+
+Fri Oct 16 07:35:00 1998 Bruce Korb <korb@datadesign.com>
+
+ * egcs_update: Added gcc/fixinc/* generated files to touch list.
+
+Tue Oct 13 23:28:33 1998 Jeffrey A Law (law@cygnus.com)
+
+ * egcs_update: Remove gcc/java/parse.c from list of files to
+ touch.
+
+Wed Oct 7 13:00:40 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * gperf-2.7-19981006.pat: New file, patch for egcs-local gperf.
+
+Mon Oct 5 14:19:48 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * warn_summary (subdirectoryFilter): New shell function to
+ optionally filter in/out gcc subdirectories when summarizing
+ warnings. Add new flags to support subdirectory filtering.
+
+ Also, ensure the bootstrap stage is a number. Add some more C
+ keywords that are preserved in the "warning type" summary and
+ tighten up the "arg ???" regexp.
+
+Tue Sep 22 07:30 Bruce Korb <korb@datadesign.com>
+
+ * fixinc/inclhack.def: Not all C++ comments in C headers
+ were treated alike. They are now. Also fixed syntax
+ of sed expression in "systypes" fix.
+
+ * fixinc/inclhack.def: Removed SVR4.2-ism from shell invocation
+
+ * egcs_update: Added fixinc/* generated files to touch list.
+
+Wed Sep 16 16:06:51 1998 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * egcs_update: Additionally touch gcc/java/parse.[ch].
+
+Thu Sep 9 16:48 Bruce Korb <korb@datadesign.com>
+
+ * fixinc/inclhack.def: Added two files required by
+ SCO's Open Server 5's avoid_bool fix.
+ Regenerated fixinc.x and inclhack.sh to incorporate
+ the update.
+
+Thu Sep 3 10:11:32 1998 Robert Lipe <robertl@dgii.com>
+
+ * egcs_update: Do the pass 1 CVS update only for files that
+ may reasonably be under CVS control.
+
+1998-08-14 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_installed: New script for testing already-installed
+ gcc/g++/g77.
+
+Wed Aug 12 19:59:36 1998 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * egcs_update: Assigned copyright to FSF.
+
+Tue Aug 11 17:55:53 1998 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+ Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * egcs_update: New switch --nostdflags and documentation
+ enhancements.
+
+Tue Aug 11 17:33:19 1998 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * egcs_update: New script.
+
+1998-08-05 Bruce Korb <korbb@datadesign.com>
+
+ * fixinc/Makefile
+ Added define for target machine so machine-specific tests
+ can be selected for or against.
+
+ * fixinc/fixincl.c
+ Added an array of string pointers to machines to select
+ or avoid, depending on a FD_MACH_IFNOT bit flag.
+ Used a shell script to match the defined TARGET_MACHINE
+ with any of the given match patterns.
+
+ * fixinc/fixincl.tpl
+ Generate the array of strings and bit flag, as needed,
+ depending on "mach" and "not_machine" attributes for a fix.
+
+ * fixinc/mkfixinc.sh
+ Invoke the make with TARGET assigned the value of the
+ machine name argument.
+
+Mon Jul 27 22:08:12 1998 Mike Stump (mrs@wrs.com)
+
+ * compare_tests: New script.
+
+1998-07-28 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_summary: Assigned copyright to FSF.
+
+Mon Jul 27 20:33:02 1998 Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
+
+ * test_summary: Corrected script name in comments providing
+ documentation. Added linebreaks for lines with > 80 characters.
+
+Fri Jun 19 02:36:59 1998 Alexandre Oliva <oliva@dcc.unicamp.br>
+
+ * test_summary: New switch, -i, and environment variable,
+ append_logs, for including files in the report.
+
+1998-06-01 Manfred Hollstein <manfred@s-direktnet.de>
+
+ * warn_summary: Update to Kaveh's latest version allowing to
+ specify the last stage built.
+
+1998-05-29 Bruce Korb <korbb@datadesign.com>
+
+ * fixinc/mkfixinc.sh
+ Changes to make it easier to invoke on platforms that
+ normally do not invoke fixincludes.
+
+ * fixinc/inclhack.def
+ Applied fixes from egcs/gcc/fixincludes from the past several
+ months.
+
+1998-05-28 Bruce Korb <korbb@datadesign.com>
+
+ * fixinc/*: Updated most everything for a first real
+ try at getting "fast_fixincludes" working.
+
+1998-05-28 Jason Merrill <jason@yorick.cygnus.com>
+
+ * index-prop: New file.
+
+Sat May 23 23:38:49 1998 Matthias Klose <doko@cs.tu-berlin.de>
+
+ * test_summary: find good awk (copied from warn_summary).
+
+Sat May 23 23:38:33 1998 Jeffrey A Law (law@cygnus.com)
+
+ * test_summary, warn_summary: New files
diff --git a/gcc-4.3.1/contrib/ChangeLog.tree-ssa b/gcc-4.3.1/contrib/ChangeLog.tree-ssa
new file mode 100644
index 000000000..2c1165f79
--- /dev/null
+++ b/gcc-4.3.1/contrib/ChangeLog.tree-ssa
@@ -0,0 +1,46 @@
+2004-03-25 Diego Novillo <dnovillo@redhat.com>
+
+ * gcc_update (files_and_dependencies): Add libbanshee and
+ libmudflap dependencies.
+
+2003-11-27 Diego Novillo <dnovillo@redhat.com>
+
+ * tree-ssa.doxy (FILE_PATTERNS): Update.
+
+2003-11-21 Diego Novillo <dnovillo@redhat.com>
+
+ * tree-ssa.doxy: Do not generate latex output.
+
+2003-07-21 Diego Novillo <dnovillo@redhat.com>
+
+ * tree-ssa.doxy: Include tree* files
+
+2003-07-15 Diego Novillo <dnovillo@redhat.com>
+
+ * tree-ssa.doxy: Add tree-must-alias.c.
+
+2003-01-28 Diego Novillo <dnovillo@redhat.com>
+
+ * filter_params.pl: Surround comments in @verbatim/@endverbatim.
+
+2003-01-19 Diego Novillo <dnovillo@redhat.com>
+
+ * tree-ssa.doxy (OUTPUT_DIRECTORY, INPUT_FILTER): Replace
+ hardwired values for with replaceable strings.
+
+2003-01-18 Diego Novillo <dnovillo@redhat.com>
+
+ * filter_params.pl: Change most comments to start with /**.
+
+2002-12-23 Steven Bosscher <Steven.Bosscher@usafa.af.mil>
+
+ * filter_params.pl: Filter ATTRIBUTE_UNUSED.
+
+2002-12-12 Daniel Berlin <dberlin@dberlin.org>
+ Steven Bosscher <Steven.Bosscher@usafa.af.mil>
+ Diego Novillo <dnovillo@redhat.com>
+
+ * filter_gcc_for_doxygen: New file.
+ * filter_knr2ansi.pl: New file.
+ * filter_params.pl: New file.
+ * tree-ssa.doxy: New file.
diff --git a/gcc-4.3.1/contrib/analyze_brprob b/gcc-4.3.1/contrib/analyze_brprob
new file mode 100755
index 000000000..093c1dea7
--- /dev/null
+++ b/gcc-4.3.1/contrib/analyze_brprob
@@ -0,0 +1,146 @@
+#!/usr/bin/awk -f
+# Script to analyze experimental results of our branch prediction heuristics
+# Contributed by Jan Hubicka, SuSE Inc.
+# Copyright (C) 2001, 2003 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+# This script is used to calculate two basic properties of the branch prediction
+# heuristics - coverage and hitrate. Coverage is number of executions of a given
+# branch matched by the heuristics and hitrate is probability that once branch is
+# predicted as taken it is really taken.
+#
+# These values are useful to determine the quality of given heuristics. Hitrate
+# may be directly used in predict.c.
+#
+# Usage:
+# Step 1: Compile and profile your program. You need to use -fprofile-arcs
+# flag to get the profiles
+# Step 2: Generate log files. The information about given heuristics are
+# saved into *.bp dumps. You need to pass the -db switch to the compiler as well
+# as -fbranch-probabilities to get the results of profiling noted in the dumps.
+# Ensure that there are no "Arc profiling: some edge counts were bad." warnings.
+# Step 3: Run this script to concatenate all *.life files:
+# analyze_brprob `find . -name *.life`
+# the information is collected and print once all files are parsed. This
+# may take a while.
+# Note that the script does use bc to perform long arithmetic.
+# Step 4: Read the results. Basically the following table is printed:
+# (this is just an example from a very early stage of branch prediction pass
+# development, so please don't take these numbers seriously)
+#
+#HEURISTICS BRANCHES (REL) HITRATE COVERAGE (REL)
+#opcode 2889 83.7% 94.96%/ 97.62% 7516383 75.3%
+#pointer 246 7.1% 99.69%/ 99.86% 118791 1.2%
+#loop header 449 13.0% 98.32%/ 99.07% 43553 0.4%
+#first match 3450 100.0% 89.92%/ 97.27% 9979782 100.0%
+#loop exit 924 26.8% 88.95%/ 95.58% 9026266 90.4%
+#error return 150 4.3% 64.48%/ 86.81% 453542 4.5%
+#call 803 23.3% 51.66%/ 98.61% 3614037 36.2%
+#loop branch 51 1.5% 99.26%/ 99.27% 26854 0.3%
+#noreturn call 951 27.6% 100.00%/100.00% 1759809 17.6%
+#
+# The heuristic called "first match" is a heuristic used by GCC branch
+# prediction pass and it predicts 89.92% branches correctly.
+#
+# The quality of heuristics can be rated using both, coverage and hitrate
+# parameters. For example "loop branch" heuristics (predicting loopback edge
+# as taken) have both very high hitrate and coverage, so it is very useful.
+# On the other hand, "exit block" heuristics (predicting exit edges as not
+# taken) have good hitrate, but poor coverage, so only 3 branches have been
+# predicted. The "loop header" heuristic has problems, since it tends to
+# misspredict.
+#
+# The implementation of this script is somewhat brute force. My awk skills
+# are limited.
+
+function longeval(e)
+{
+ e = "echo \"scale = 2 ;"e"\" | bc"
+ e | getline res
+ close (e)
+ return res
+}
+
+BEGIN {nnames = 0}
+
+/^ .* heuristics: .*.$/ {
+ name=$0
+ sub (/^ /,"",name)
+ sub (/ heuristics: .*.$/,"",name)
+ if (!(name in branches))
+ {
+ names[nnames] = name
+ branches[name]=0
+ counts[name]=0
+ hits[name]=0
+ phits[name]=0
+ nnames++
+ }
+ branches[name]+=1
+ }
+
+/^ .* heuristics: .*. exec [0-9]* hit [0-9]* (.*.)$/ {
+ name=$0
+ sub (/^ /,"",name)
+ sub (/ heuristics: .*. exec [0-9]* hit [0-9]* (.*.)$/,"",name)
+ pred=$0
+ sub (/^ .* heuristics: /,"",pred)
+ sub (/. exec [0-9]* hit [0-9]* (.*.)$/,"",pred)
+ count=$0
+ sub (/^ .* heuristics: .*. exec /,"",count)
+ sub (/ hit [0-9]* (.*.)$/,"",count)
+ hit=$0
+ sub (/^ .* heuristics: .*. exec [0-9]* hit /,"",hit)
+ sub (/ (.*.)$/,"",hit)
+
+ if (int(pred) < 50.0)
+ {
+ hit = count"-"hit;
+ }
+ counts[name]=counts[name] "+" count
+ hits[name]=hits[name] "+" hit
+ phits[name]=phits[name] "+(("hit")<"count"/2)*("count"-("hit"))+(("hit")>="count"/2)*("hit")"
+
+ #BC crashes on long strings. Irritating.
+ if (length(counts[name]) > 2000)
+ counts[name] = longeval(counts[name])
+ if (length(hits[name]) > 2000)
+ hits[name] = longeval(hits[name])
+ if (length(phits[name]) > 2000)
+ phits[name] = longeval(phits[name])
+ }
+END {
+ # Heuristics called combined predicts just everything.
+ maxcounts = longeval(counts["combined"])
+ maxbranches = branches["combined"]
+ max = names["combined"]
+ printf("HEURISTICS BRANCHES (REL) HITRATE COVERAGE (REL)\n")
+ for (i = 0; i < nnames ; i++)
+ {
+ name = names[i]
+ counts[name] = longeval(counts[name])
+ printf ("%-26s %8i %5.1f%% %6s%% / %6s%% %12s %5.1f%%\n",
+ name,
+ branches[name], branches[name] * 100 / maxbranches,
+ longeval("("hits[name]") * 100 /(" counts[name]"-0.00001)"),
+ longeval("("phits[name]") * 100 /(" counts[name]"-0.00001)"),
+ counts[name], longeval(counts[name]" * 100 / ("maxcounts"-0.00001)"))
+ }
+}
diff --git a/gcc-4.3.1/contrib/check_warning_flags.sh b/gcc-4.3.1/contrib/check_warning_flags.sh
new file mode 100755
index 000000000..95640f775
--- /dev/null
+++ b/gcc-4.3.1/contrib/check_warning_flags.sh
@@ -0,0 +1,136 @@
+#! /bin/sh
+#
+# Check that the warning flags documented in invoke.texi match up
+# with what the compiler accepts.
+#
+# Copyright (C) 2008 Free Software Foundation, Inc.
+# Written by Ralf Wildenhues <Ralf.Wildenhues@gmx.de>.
+#
+# This script is Free Software, and it can be copied, distributed and
+# modified as defined in the GNU General Public License. A copy of
+# its license can be downloaded from http://www.gnu.org/copyleft/gpl.html
+#
+# Call this script as
+# check_warning_flags.sh path/to/invoke.texi
+# with CC set to the compiler to be tested.
+# The script scribbles in the current directory.
+
+progname=`echo "$0" | sed 's,.*/,,'`
+usage ()
+{
+ echo "usage: $progname path/to/invoke.texi"
+ echo "set \$CC to the compiler to be checked"
+ exit 1
+}
+
+ret=0
+LC_ALL=C
+export LC_ALL
+: ${CC=gcc}
+test $# = 1 || usage
+invoke_texi=$1
+test -r "$invoke_texi" || {
+ echo "$progname: error: cannot read '$invoke_texi'" >&2
+ usage
+}
+filebase=check_warning_flags_file$$
+stderr=check_warning_flags_stderr$$
+
+remove_problematic_flags='
+ /-Wlarger-than-/d
+ /-W[alp],/d
+ /-Werror/d
+ /-Wpadded/d
+ /=/d'
+
+# Ensure that indexed warnings are accepted.
+set x `sed '/^@opindex W/{
+ s/^@opindex /-/
+ '"$remove_problematic_flags"'
+ /-W[alp]$/d
+ p
+}
+d' <"$invoke_texi"`
+shift
+: >$filebase.c
+$CC -c $filebase.c "$@" 2>&1 |
+ grep -v 'command line option.*is valid for.*but not for' >$stderr
+if test -s $stderr; then
+ echo "options listed in @opindex but not accepted by the compiler:" >&2
+ cat $stderr >&2
+ ret=1
+fi
+rm -f $filebase.c $stderr
+
+# Check documentation of warning options.
+for lang in c c++ objc obj-c++; do
+ case $lang in
+ c) ext=c; langmatch='[^-]C[^+].*only' ;;
+ c++) ext=C; langmatch='[^-]C++.*only' ;;
+ objc) ext=m; langmatch='Objective-C[^+].*only' ;;
+ obj-c++) ext=M; langmatch='Objective-C++.*only' ;;
+ esac
+ file=$filebase.$ext
+ : >$file
+ $CC -c $file 2>$stderr
+ if grep 'not installed on this system' $stderr >/dev/null ||
+ grep 'installation problem, cannot exec' $stderr >/dev/null ||
+ grep 'error trying to exec' $stderr >/dev/null
+ then
+ echo "$progname: $CC is not configured for language $lang, skipping checks" >&2
+ rm -f $file $filebase.o $filebase.obj $stderr
+ continue
+ fi
+
+ # Verify good warning flags.
+ set x `sed '
+ t a
+ :a
+ /^@item -W/{
+ /'"$langmatch"'/b x
+ / only)/d
+ b x
+ }
+ d
+ :x
+ '"$remove_problematic_flags"'
+ s/^@item //
+ s/ .*//
+ ' <"$invoke_texi"`
+ shift
+ $CC -c $file -O "$@" 2>$stderr
+ if test -s $stderr; then
+ echo failures: >&2
+ cat $stderr >&2
+ ret=1
+ fi
+
+ # Verify bad warning flags.
+ set x `sed '
+ t a
+ :a
+ /^@item -W/{
+ / only)/!d
+ /'"$langmatch"'/d
+ b x
+ }
+ d
+ :x
+ '"$remove_problematic_flags"'
+ s/^@item //
+ s/ .*//
+ ' <"$invoke_texi"`
+ shift
+ $CC -c $file -O "$@" 2>$stderr
+ # cat $stderr >&2
+ test $# = `grep 'command line option.*valid.*but not for' <$stderr | wc -l` || {
+ for warning
+ do
+ grep "command line option.*$warning.*valid" <$stderr >&2 ||
+ echo "valid for $lang but not annotated as such: $warning"
+ done
+ ret=1
+ }
+ rm -f $file $filebase.o $filebase.obj $stderr
+done
+exit $ret
diff --git a/gcc-4.3.1/contrib/compare-debug b/gcc-4.3.1/contrib/compare-debug
new file mode 100755
index 000000000..6e979e9d7
--- /dev/null
+++ b/gcc-4.3.1/contrib/compare-debug
@@ -0,0 +1,76 @@
+#! /bin/sh
+
+# Compare stripped copies of two given object files.
+
+# Copyright (C) 2007 Free Software Foundation
+# Originally by Alexandre Oliva <aoliva@redhat.com>
+
+# This file is part of GCC.
+
+# GCC is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 3, or (at your option) any later
+# version.
+
+# GCC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+# License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+rm='rm -f'
+
+case $1 in
+-p | --preserve)
+ rm='echo preserving'
+ shift
+ ;;
+esac
+
+if test $# != 2; then
+ echo 'usage: compare-debug file1.o file2.o' >&2
+ exit 1
+fi
+
+if test ! -f "$1"; then
+ echo "$1" does not exist >&2
+ exit 1
+fi
+
+if test ! -f "$2"; then
+ echo "$2" does not exist >&2
+ exit 1
+fi
+
+suf1=stripped
+while test -f "$1.$suf1"; do
+ suf1=$suf1.
+done
+
+suf2=stripped
+while test -f "$2.$suf2"; do
+ suf2=$suf2.
+done
+
+trap 'rm -f "$1.$suf1" "$2.$suf2"' 0 1 2 15
+
+cp "$1" "$1.$suf1"
+strip "$1.$suf1"
+
+cp "$2" "$2.$suf2"
+strip "$2.$suf2"
+
+if cmp "$1.$suf1" "$2.$suf2"; then
+ status=0
+else
+ status=1
+fi
+
+$rm "$1.$suf1" "$2.$suf2"
+
+trap "exit $status; exit" 0 1 2 15
+
+exit $status
diff --git a/gcc-4.3.1/contrib/compareSumTests3 b/gcc-4.3.1/contrib/compareSumTests3
new file mode 100755
index 000000000..64f35d74e
--- /dev/null
+++ b/gcc-4.3.1/contrib/compareSumTests3
@@ -0,0 +1,250 @@
+#!/usr/bin/perl
+
+# Three-way DejaGNU comparison; uses dglib.pm. Run perldoc on this file for
+# usage.
+#
+# Author: Matthew Sachs <msachs@apple.com>
+#
+# Copyright (c) 2006 Free Software Foundation.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+
+=pod
+
+=head1 SYNOPSIS
+
+compareSumTests3 -- Two-way or three-way compare between DejaGNU .sum files
+
+=head1 USAGE
+
+ compareSumTests3 old1.sum [old2.sum] new.sum
+ compareSumTests3 -i 1:2 -x 2:3 old1.sum old2.sum new.sum
+
+=head1 DESCRIPTION
+
+Gives results in terms of 'new' (e.g. things that work in 'new' and don't in
+other compilers are improvements, things that don't in 'new' and do in others
+are regressions, and it tells you which of the two old compilers (or both)
+the test is a regression from.
+
+We treat any DG result other than PASS or XFAIL as a failure, e.g.
+UNRESOLVED, UNTESTED or test was not run.
+
+We merge some tests into 'logical tests' with multiple subphases.
+For instance, some tests will have compile, execute, and link
+subtests. For these tests, if one of the phases fails, we
+indicate which phase the failure originates in. For instance,
+in the following test results:
+
+ gcc.c-torture/compile_execute/xxxx.c: [FAIL:C,FAIL:X,PASS]
+
+the "compile_execute" replaces the compile or execute portion of the test name,
+and "FAIL:C" and "FAIL:X" indicates where the combined test failed.
+
+=head1 OPTIONS
+
+=head2 OVERVIEW
+
+=over 4
+
+=item *
+
+C<-i X:Y>: Only display differences between the two indicated runs.
+
+=item *
+
+C<-p>: Give plain output, suitable for piping to another program.
+
+=item *
+
+C<-x X:Y>: Exclude differences between the two indicated runs.
+
+=back
+
+=head2 PLAIN OUTPUT FORMAT
+
+In the plain
+output format, the category headers are not displayed and there are no tabs
+in front of each result line. Instead, each result line has two characters
+followed by a space in front of it. The first character will be either an 'I'
+for improvement or 'R' for regression; the second character will be a 1, 2, or 3,
+indicating which run was the odd one out.
+
+=head2 SELECTING CHANGE SUBSETS
+
+The following options cause only a selected subset of changes to be displayed.
+These options ask for a "run", a number which is used to select
+one of the three runs (C<old1>, C<old2>, or C<new>.) C<1> and C<2> signify C<old1> and C<old2>
+respectively; 3 signifies C<new>. If multiple options are given, the changes displayed
+will be those which obey all of the given restrictions.
+
+Typical usage of these options is to express something like "give me all changes
+between 2 and 3, except for those where there was the same difference betwen 1 and 2
+(as between 2 and 3.)" This would be given as:
+
+ -i 2:3 -x 1:2
+
+=over 4
+
+=item *
+
+C<-i X:Y>: Only differences which are present between the two runs given
+are displayed. For instance, if C<-i 1:2> is given and test A passes in
+runs 1 and 2 but fails in run 3, that result will not be displayed.
+
+=item *
+
+C<-x X:Y>: Differences which are identical to a difference between the two runs
+given will B<not> be displayed. For instance, if C<-x 1:2> is given and
+test A passes in run 1 and fails in runs 2 and 3, that result will not be
+displayed (since C<-x> will cause the difference between 1 and 2 to be ignored,
+and the difference in 1 and 3 parallels the difference between 1 and 2.)
+This option may only be used in conjunction with C<-i>.
+
+=back
+
+=cut
+
+use strict;
+use warnings;
+use Getopt::Long;
+
+use FindBin qw($Bin);
+use lib "$Bin";
+use dglib;
+
+my %options;
+my $error = undef;
+
+if(!GetOptions(
+ "p" => \$options{p},
+ "i=s" => \$options{i},
+ "x=s" => \$options{x},
+)) {
+ $error = "";
+} elsif(@ARGV != 2 and @ARGV != 3) {
+ $error = "";
+} elsif($options{x} and !$options{i}) {
+ $error = "-x may only be given in conjunction with -i.";
+} else {
+ foreach my $opt("i", "x") {
+ if($options{$opt} and
+ ($options{$opt} !~ /^([123]):([123])$/ or
+ $1 == $2)
+ ) {
+ $error = "Invalid -$opt argument.";
+ }
+ }
+}
+
+if(defined($error)) {
+ print STDERR "$error\n" if $error;
+ print STDERR "Usage: compareSumTests3 [-p] [-i X:Y [-x X:Y]] old1.sum old2.sum new.sum\n";
+ print STDERR "Try 'perldoc $0' for further information.\n";
+ exit 1;
+}
+
+my(@sumfiles) = @ARGV;
+-f $_ || die "$_ is not a regular file!\n" foreach @sumfiles;
+my(%results, @inc_changes, @exc_changes, %checksums);
+
+# We decrement the values given so that they correspond
+# to indices into our results array.
+if($options{i}) {
+ $options{i} =~ /(\d+):(\d+)/;
+ @inc_changes = ($1 - 1, $2 - 1);
+}
+if($options{x}) {
+ $options{x} =~ /(\d+):(\d+)/;
+ @exc_changes = ($1 - 1, $2 - 1);
+}
+
+
+my %analyzed_results = compareSumFiles(\@sumfiles);
+
+foreach my $cat (qw(improvements regressions miscellaneous)) {
+ if(@sumfiles == 3) {
+ my @subcounts;
+ if(!$options{p}) {
+ $subcounts[$_] = @{$analyzed_results{$cat}->[$_] || []} for(0..2);
+ print "\u$cat: ", ($subcounts[0]+$subcounts[1]+$subcounts[2]), "\n";
+ }
+
+ for(my $i = 0; $i < 3; $i++) {
+ if(!$options{p} and $cat ne "miscellaneous") {
+ if($i == 0) {
+ if($cat eq "regressions") {
+ print "\tSuccess in old1 only: $subcounts[$i]\n";
+ } else {
+ print "\tFailure in old1 only: $subcounts[$i]\n";
+ }
+ } elsif($i == 1) {
+ if($cat eq "regressions") {
+ print "\tSuccess in old2 only: $subcounts[$i]\n";
+ } else {
+ print "\tFailure in old2 only: $subcounts[$i]\n";
+ }
+ } else {
+ if($cat eq "regressions") {
+ print "\tFailure in new only: $subcounts[$i]\n";
+ } else {
+ print "\tSuccess in new only: $subcounts[$i]\n";
+ }
+ }
+ }
+
+ foreach my $test (sort {$a->{name} cmp $b->{name}} @{$analyzed_results{$cat}->[$i] || []}) {
+ if(!$options{p}) {
+ if($cat eq "miscellaneous") {
+ print "\t";
+ } else {
+ print "\t\t";
+ }
+ } else {
+ if($cat eq "regressions") {
+ print "R";
+ } else {
+ print "I";
+ }
+
+ print $i+1, " ";
+ }
+ printf "%s [%s,%s,%s]\n", $test->{name}, $test->{data}->[0], $test->{data}->[1], $test->{data}->[2];
+ }
+ }
+ } else {
+ if(!$options{p}) {
+ my $subcount = @{$analyzed_results{$cat}};
+ print "\u$cat: $subcount\n";
+ }
+
+ foreach my $test (sort {$a->{name} cmp $b->{name}} @{$analyzed_results{$cat}}) {
+ if(!$options{p}) {
+ print "\t";
+ } else {
+ if($cat eq "regressions") {
+ print "R"; } else {
+ print "I";
+ }
+
+ print " ";
+ }
+ printf "%s [%s,%s]\n", $test->{name}, $test->{data}->[0], $test->{data}->[1], $test->{data}->[2];
+ }
+ }
+}
diff --git a/gcc-4.3.1/contrib/compare_tests b/gcc-4.3.1/contrib/compare_tests
new file mode 100755
index 000000000..bed97429d
--- /dev/null
+++ b/gcc-4.3.1/contrib/compare_tests
@@ -0,0 +1,111 @@
+#!/bin/sh
+# This script automatically test the given tool with the tool's test cases,
+# reporting anything of interest.
+
+# exits with 0 if there is nothing of interest
+# exits with 1 if there is something interesting
+# exits with 2 if an error occurred
+
+# Give two .sum files to compare them
+
+# Written by Mike Stump <mrs@cygnus.com>
+
+tool=gxx
+
+tmp1=/tmp/$tool-testing.$$a
+tmp2=/tmp/$tool-testing.$$b
+now_s=/tmp/$tool-testing.$$d
+before_s=/tmp/$tool-testing.$$e
+
+if [ "$2" = "" ]; then
+ echo "Usage: $0 previous current" >&2
+ exit 2
+fi
+
+sed 's/^XFAIL/FAIL/; s/^XPASS/PASS/' < "$1" | awk '/^Running target / {target = $3} { if (target != "unix") { sub(/: /, "&"target": " ); }; print $0; }' >$tmp1
+sed 's/^XFAIL/FAIL/; s/^XPASS/PASS/' < "$2" | awk '/^Running target / {target = $3} { if (target != "unix") { sub(/: /, "&"target": " ); }; print $0; }' >$tmp2
+
+before=$tmp1
+now=$tmp2
+
+exit_status=0
+trap "rm -f $tmp1 $tmp2 $now_s $before_s" 0 1 2 3 5 9 13 15
+
+if sort -k 2 </dev/null >/dev/null 2>&1; then
+ skip1='-k 2'
+else
+ skip1='+1'
+fi
+
+sort -t ':' $skip1 "$now" > "$now_s"
+sort -t ':' $skip1 "$before" > "$before_s"
+
+grep '^FAIL:' "$now_s" | sed 's/^[^:]*:[ ]//' >$tmp1
+grep '^PASS' "$before_s" | sed 's/^[^:]*:[ ]//' | comm -12 $tmp1 - >$tmp2
+
+grep -s . $tmp2 >/dev/null
+if [ $? = 0 ]; then
+ echo "Tests that now fail, but worked before:"
+ echo
+ cat $tmp2
+ echo
+ exit_status=1
+fi
+
+grep '^PASS' "$now_s" | sed 's/^[^:]*:[ ]//' >$tmp1
+grep '^FAIL' "$before_s" | sed 's/^[^:]*:[ ]//' | comm -12 $tmp1 - >$tmp2
+
+grep -s . $tmp2 >/dev/null
+if [ $? = 0 ]; then
+ echo "Tests that now work, but didn't before:"
+ echo
+ cat $tmp2
+ echo
+fi
+
+grep '^FAIL' "$now_s" | sed 's/^[^:]*:[ ]//' >$tmp1
+grep '^[PF]A[SI][SL]' "$before_s" | sed 's/^[^:]*:[ ]//' | comm -23 $tmp1 - >$tmp2
+
+grep -s . $tmp2 >/dev/null
+if [ $? = 0 ]; then
+ echo "New tests that FAIL:"
+ echo
+ cat $tmp2
+ echo
+ exit_status=1
+fi
+
+grep '^PASS' "$now_s" | sed 's/^[^:]*:[ ]//' >$tmp1
+grep '^[PF]A[SI][SL]' "$before_s" | sed 's/^[^:]*:[ ]//' | comm -23 $tmp1 - >$tmp2
+
+grep -s . $tmp2 >/dev/null
+if [ $? = 0 ]; then
+ echo "New tests that PASS:"
+ echo
+ cat $tmp2
+ echo
+fi
+
+grep '^[PF]A[SI][SL]' "$now_s" | sed 's/^[^:]*:[ ]//' >$tmp1
+grep '^PASS' "$before_s" | sed 's/^[^:]*:[ ]//' | comm -13 $tmp1 - >$tmp2
+
+grep -s . $tmp2 >/dev/null
+if [ $? = 0 ]; then
+ echo "Old tests that passed, that have disappeared: (Eeek!)"
+ echo
+ cat $tmp2
+ echo
+fi
+
+grep '^[PF]A[SI][SL]' "$now_s" | sed 's/^[^:]*:[ ]//' >$tmp1
+grep '^FAIL' "$before_s" | sed 's/^[^:]*:[ ]//' | comm -13 $tmp1 - >$tmp2
+
+grep -s . $tmp2 >/dev/null
+if [ $? = 0 ]; then
+ echo "Old tests that failed, that have disappeared: (Eeek!)"
+ echo
+ cat $tmp2
+ echo
+fi
+
+exit $exit_status
diff --git a/gcc-4.3.1/contrib/dg-cmp-results.sh b/gcc-4.3.1/contrib/dg-cmp-results.sh
new file mode 100755
index 000000000..753005db2
--- /dev/null
+++ b/gcc-4.3.1/contrib/dg-cmp-results.sh
@@ -0,0 +1,201 @@
+#!/bin/bash
+# Copyright (C) 2006 Free Software Foundation
+#
+# Analyze changes in GCC DejaGNU test logs for binutils, gcc, gdb, etc.
+# Original version written in 2005 by James Lemke <jwlemke@wasabisystems.com>.
+#
+# See usage() below.
+
+usage () {
+ cat <<EOF >&2
+Usage:
+ dg-cmp-results.sh [-v] [-v] [-v] <variant-name> <old-file> <new-file>
+ <variant-name> names the desired variant, "/" must be written as "\/".
+ Use the empty string ("") for the first variant in each file.
+ Output is to stdout.
+ Non-verbose output is degradation info like PASS->FAIL.
+ -v adds improvement info like FAIL->PASS.
+ -v -v adds info like tests that are no longer run.
+ -v -v -v adds info for tests that have not changed status.
+ -v -v -v -v is used for debugging.
+EOF
+}
+
+verbose=0
+while test "$1" = "-v"; do
+ verbose=`expr $verbose + 1`
+ shift
+done
+
+if test $# -ne 3 -o ! -f "$2" -o ! -f "$3"; then
+ usage
+ exit 1
+fi
+
+# Command differences for various platforms.
+case `uname -s` in
+Darwin|NetBSD)
+ E=-E # sed
+ ;;
+*)
+ E=-r # sed
+ ;;
+esac
+
+# sections are identified by separator lines beginning with '\t\t==='.
+# section 0 identifies run date, target, and host.
+# section 1 and subsequent contain test data for a target variant.
+# -skip to /^Running target/ and use that line to identify the variant.
+# -subsequent lines contain the result data. They begin with:
+# '(PASS|FAIL|XFAIL|XPASS|UNTESTED|UNSUPPORTED|UNRESOLVED):'
+VARIANT="$1"
+OFILE="$2"
+OBASE=`basename "$2"`
+NFILE="$3"
+NBASE=`basename "$3"`
+
+echo "dg-cmp-results.sh: Verbosity is ${verbose}, Variant is \"${VARIANT}\""
+echo
+
+header="^Running target $VARIANT"
+
+temp=`grep "$header" $OFILE`
+if test -z "$temp"; then
+ echo "Error: variant \"$VARIANT\" not found in $OFILE."
+ exit 1
+fi
+temp=`grep "$header" $NFILE`
+if test -z "$temp"; then
+ echo "Error: variant \"$VARIANT\" not found in $NFILE."
+ exit 1
+fi
+unset temp
+
+# Copy out the old file's section 0.
+echo "Older log file: $OFILE"
+sed $E -e '/^[[:space:]]+===/,$d' $OFILE
+
+# Copy out the new file's section 0.
+echo "Newer log file: $NFILE"
+sed $E -e '/^[[:space:]]+===/,$d' $NFILE
+
+# Create a temporary file from the old file's interesting section.
+sed $E -e "1,/$header/d" \
+ -e '/^[[:space:]]+===/,$d' \
+ -e '/^[A-Z]+:/!d' \
+ -e '/^(WARNING|ERROR):/d' \
+ -e 's/\r$//' \
+ -e 's/^/O:/' \
+ $OFILE |
+ sort -s -t : -k 3b - \
+ >/tmp/o$$-$OBASE
+
+# Create a temporary file from the new file's interesting section.
+sed $E -e "1,/$header/d" \
+ -e '/^[[:space:]]+===/,$d' \
+ -e '/^[A-Z]+:/!d' \
+ -e '/^(WARNING|ERROR):/d' \
+ -e 's/\r$//' \
+ -e 's/^/N:/' \
+ $NFILE |
+ sort -s -t : -k 3b - \
+ >/tmp/n$$-$NBASE
+
+# Merge the two files, then compare adjacent lines.
+# Comparison is complicated by tests that may be run multiple times.
+# If that case, we assume that the order is the same in both files.
+cat <<EOF >compare-$$.awk
+BEGIN {
+ FS = ":"
+ queue1 = 1; queueN = 0; status[queue1] = ""; name[queue1] = ""
+ verbose = verbose + 0 # Make sure it's defined.
+}
+
+# FIFO circular queue
+function push(st, nm) {
+ queueN += 1; status[queueN] = st; name[queueN] = nm
+}
+function peek() {
+ result = 0
+ if (queueN >= queue1) result = queue1
+ return result
+}
+function drop() {
+ queue1 += 1
+ if (queue1 > queueN) { queue1 = 1; queueN = 0; }
+}
+
+function compare(st, nm) {
+ old = peek()
+ if (old == 0) {
+ # This new test wasn't run last time.
+ if (verbose >= 2) printf("NA->%s:%s\n", st, nm)
+ }
+ else {
+ # Compare this new test to the first queued old one.
+ if (verbose >= 4) {
+ printf("Comparing two lines:\n O:%s:%s\n N:%s:%s\n",
+ status[old], name[old], st, nm)
+ }
+ if (name[old] != nm) {
+ # The old test wasn't run this time and
+ # the new test wasn't run last time.
+ if (verbose >= 2) {
+ printf("%s->NA:%s\n", status[old], name[old])
+ if (nm != "") printf("NA->%s:%s\n", st, nm)
+ }
+ drop()
+ }
+ else {
+ notable = 0
+ if (status[old] == st) {
+ # Status of this test has not changed.
+ if (verbose >= 3) printf("%s:%s\n", st, nm)
+ }
+ else if(status[old] == "PASS" && st == "XFAIL") {
+ if (verbose >= 1) notable = 1
+ }
+ else if(status[old] == "PASS" || st == "FAIL") {
+ # Test did pass but doesn't now
+ # or didn't fail but does now.
+ notable = 1
+ }
+ else if(st == "PASS") {
+ # Test didn't pass but does now.
+ if (verbose >= 1) notable = 1
+ }
+ else if(verbose >= 2) {
+ # Miscellaneous status change.
+ notable = 1
+ }
+ if (notable > 0) printf("%s->%s:%s\n", status[old], st, nm)
+ drop()
+ }
+ }
+}
+
+/^O:/ {
+ while (old = peek()) {
+ if (name[old] == \$3) break;
+ # The queued test is no longer run.
+ compare("", "");
+ }
+ # Save this test for later comparison.
+ push(\$2, \$3)
+}
+
+/^N:/ {
+ compare(\$2, \$3)
+}
+
+END {
+ while (old = peek()) compare("", "")
+}
+EOF
+sort -m -s -t : -k 3b /tmp/o$$-$OBASE /tmp/n$$-$NBASE |
+ awk -v verbose=$verbose -f compare-$$.awk /dev/stdin
+
+# Delete the temporary files.
+rm -f compare-$$.awk /tmp/o$$-$OBASE /tmp/n$$-$NBASE
+
+exit 0
diff --git a/gcc-4.3.1/contrib/dglib.pm b/gcc-4.3.1/contrib/dglib.pm
new file mode 100644
index 000000000..c86d4f014
--- /dev/null
+++ b/gcc-4.3.1/contrib/dglib.pm
@@ -0,0 +1,424 @@
+# Library of functions for dealing with DejaGNU, or which are otherwise
+# generally useful for the DejaGNU tool stack.
+#
+# Author: Matthew Sachs <msachs@apple.com>
+#
+# Functions:
+# parseLogFile: See "sub parseLogFile" below for details. This function
+# returns a detailed parse of a DejaGNU log or sum file.
+# ispass: Takes a DejaGNU result (e.g. "PASS", "XPASS") and returns
+# true if and only if it is a passing result (PASS, XFAIL, or
+# KFAIL.)
+#
+# Copyright (c) 2006 Free Software Foundation.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+
+package dglib;
+
+use strict;
+use warnings;
+use Exporter;
+
+our @ISA = qw(Exporter);
+our @EXPORT = qw(ispass parseLogFile);
+
+use File::Basename;
+use POSIX qw(mktime);
+
+
+# Create a group hierarchy, returning the leaf node
+sub mkGroupPath {
+ my($root, $groups, @newgroups) = @_;
+
+ my $parent = $root;
+ my $fullname = "";
+ foreach my $group(@newgroups) {
+ $fullname .= "/" if $fullname;
+ $fullname .= $group;
+ if(exists($groups->{$fullname})) {
+ $parent = $groups->{$fullname};
+ } else {
+ my $newgroup = {name => $group, parent => $parent};
+ $groups->{$fullname} = $newgroup;
+ $parent->{testgroup} ||= [];
+ push @{$parent->{testgroup}}, $newgroup;
+ $parent = $newgroup;
+ }
+ }
+
+ return $parent;
+}
+
+# Extract information from DejaGNU log or sum files.
+# Options, if provided, should be a hashref with zero or more of the following keys:
+# gccdir:
+# Passing in the full path to the root of the gcc/testsuite directory
+# will help in the parsing, but if it isn't provided, it will be guessed.
+# diagnostics:
+# If set to 0, diagnostics will not be returned. This can save a lot
+# of memory if you are not using this information.
+# fullname:
+# If set to 0, the fullname key will not be included in tests.
+# Returns a hash with the following keys:
+# incomplete: 1 if the summary file appears truncated, otherwise 0
+# diagnostics: List of (type, value) for any errors detected. Type can be ERROR, WARNING, or NOTE.
+# test: Array of root-level tests, with keys:
+# name: Name of the test, relative to the enclosing test group.
+# fullname: Fully-qualified name of the test.
+# result: DejaGNU result (PASS, FAIL, XPASS, &c)
+# detail: For multi-phase (e.g. compile/link/execute), this will be
+# the furthest phase which the test was able to attempt,
+# so if the result is FAIL and this is "link phase", the test
+# compiled but failed to link. This key may contain other
+# auxiliary data.
+# pseudotest: If 1, this test may not really exist; see "pseudotest" below.
+# testgroup: Array of root-level testgroups, with keys:
+# name: Name of the group.
+# parent: Parent test group.
+# test: As per above.
+# testgroup: Child test groups.
+# compiler: Version string from compiler used to run the tests (if detected)
+sub parseLogFile($;$) {
+ my($logfile, $options) = @_;
+ $options ||= {};
+ my $gccdir = $options->{gccdir} || "";
+ my $return_diags = exists($options->{diagnostics}) ? $options->{diagnostics} : 1;
+ my $emit_fullname = exists($options->{fullname}) ? $options->{fullname} : 1;
+ my $is_gdb = 0;
+ my $gdbhack = "";
+
+ my %ret = (incomplete => 1, diagnostics => [], testgroup => []);
+ my(%testindex, %groupindex);
+
+ open(LOGFILE, $logfile) or die "Couldn't open log file $logfile: $!\n";
+
+ my($currgroup, $currtest, $lastrun);
+ $currgroup = \%ret;
+
+ my %monmap = (Jan => 0, Feb => 1, Mar => 2, Apr => 3, May => 4, Jun => 5, Jul => 6, Aug => 7, Sep => 8, Oct => 9, Nov => 10, Dec => 11);
+
+ # We don't want gccdir matching on an empty string.
+ $gccdir ||= "this will never match, or my name isn't Reginald St. Croix";
+
+ my $line = 1;
+ while(<LOGFILE>) {
+ chomp;
+ s/\x{d}$//; #^M
+ next if $_ eq "";
+
+ if(/^gcc version/) {
+ $ret{compiler} = $_;
+ } elsif(/^got a .* signal, interrupted by user /) {
+ $ret{incomplete} = 2;
+ } elsif(/^\s*=== gdb/) {
+ $is_gdb = 1;
+ # The log file from the GDB test suite is prone to have random crap
+ # in front of test result lines, so we need to be looser about how
+ # we parse those for GDB.
+ $gdbhack = ".*";
+ } elsif(/^(Test Run By \S+ on|runtest completed at) ... (.{3}) (\d{1,2}) (\d{1,2}):(\d{1,2}):(\d{1,2}) (\d{4})/) {
+ my $time = mktime($6, $5, $4, $3, $monmap{$2}, $7 - 1900);
+ if($1 eq "runtest completed at") {
+ $ret{end_time} = $time;
+ } else {
+ $ret{start_time} = $time;
+ }
+ } elsif(m<^Running (?!target )\Q$gccdir\E/?(\S+)> or m<^Running (?!target )\S*?((?:gcc|gdb|libstdc\+\+-v3)/testsuite/\S+)>) {
+ # We keep track of the last "Running foo/bar/baz.exp" line because
+ # some tests don't bother printing out the full paths of their files,
+ # and this gives us the directory information.
+
+ $lastrun = $1;
+ $lastrun =~ s!/[^/]*/\.\.!!; # foo/bar/../baz -> foo/baz
+ $currgroup = mkGroupPath(\%ret, \%groupindex, split(m!/!, $lastrun));
+ #$currgroup->{testfile} = $lastrun;
+ } elsif(/^Executing on (.*?):(.*)/) {
+ # Okay, if it's "Executing on host", it's a new
+ # file. If it's "Executing on unix", it's probably
+ # a test within the file -- an execution test, specifically --
+ # (makes sense, no?) But not always, sometimes we
+ # see "Executing on unix" outside the context of a
+ # file.
+
+ # Try to pick out the gccdir-relative filename.
+ # If we can't find it, it isn't really a new testfile,
+ # but a derived file.
+ my($exectype, $execwhat) = ($1, $2);
+ next if $execwhat =~ /^dsymutil/;
+ $execwhat =~
+ s!.*?\s\Q$gccdir\E/?(\S+).*!$1! or
+ s!.*?/((?:gcc|gdb|libstdc\+\+-v3)/testsuite/\S+).*!$1! or
+ $exectype = "unix";
+
+ if($exectype eq "host" or !$currgroup) {
+ # New file
+
+ my $nogroup = 0;
+ if($execwhat =~ / /) {
+ # We probably haven't parsed the file correctly.
+ # Try getting it from $lastrun.
+
+ $execwhat = dirname($lastrun) . "/" . basename($execwhat) if $lastrun and $execwhat;
+ $execwhat =~ s/\s.*//;
+
+ # At the end of each tool, it invokes "gcc -v" or "c++ -v"
+ # as a test. We don't really want to treat this as a test.
+ if($execwhat =~ m!/(gcc|c\+\+)$!) {
+ undef $currtest;
+ undef $currgroup;
+ $nogroup = 1;
+ }
+ }
+
+ if(!$nogroup) {
+ undef $currtest;
+ $execwhat =~ s!/[^/]*/\.\.!!; # foo/bar/../baz -> foo/baz
+
+ if($lastrun) {
+ my $lastbase = dirname($lastrun);
+ my $basegroup = $execwhat;
+ $basegroup =~ s!^\Q$lastbase\E/!!;
+ $execwhat = "$lastrun/$basegroup";
+ }
+
+ $currgroup = mkGroupPath(\%ret, \%groupindex, split(m!/!, $execwhat));
+ #$currgroup->{testfile} = $execwhat;
+ }
+ } else {
+ # New test within current file
+
+ $currtest = {};
+ }
+ } elsif(/^# of/) {
+ # This line appears should appear near the end of summary files.
+ # If it doesn't, something went wrong.
+
+ if($ret{incomplete} == 2) {
+ #Ah, but we previously saw indication that we were killed via a signal.
+ $ret{incomplete} = 1;
+ } else {
+ $ret{incomplete} = 0;
+ }
+ } elsif(/^testcase .* completed/) {
+ # End of a .exp file
+ undef $currtest;
+ undef $currgroup;
+ } elsif(/^$gdbhack(FAIL|PASS|UNRESOLVED|UNSUPPORTED|UNTESTED|XFAIL|XPASS|KFAIL|KPASS): (.*)/) {
+ # If the currtest already has a name, that means we've already seen
+ # its results, so what we have now is a new test. However, if we
+ # haven't seen results for currtest yet, that means currtest just
+ # has some diagnostics associated with it but no actual results,
+ # so just use that one.
+ undef $currtest if $currtest->{name};
+
+ my $phase = ""; # compile/link/execute
+ my($test, $result) = ($2, $1);
+
+ # Compile/(link/)execute combining
+ if($test =~ /^(.*) compile\s*$/) {
+ $test = "$1 compile,link,execute";
+ $phase = "compile";
+ } elsif($test =~ /^(.*)-(.*) (link|execute)\s*$/) {
+ $test = "$1 compile,link,execute";
+ if($3 eq "link") {
+ $phase = "link";
+ } else {
+ $phase = "execute";
+ }
+ } elsif($test =~ /(compile|compilation|execute|execution)/) {
+ my $phasematch = $1;
+ if($test =~ /^com/) {
+ $phase = "compile";
+ } else {
+ $phase = "execute";
+ }
+ $test =~ s!\Q$phasematch\E!compile,execute!;
+ }
+
+ # gcov tests behave in non-standard fashion.
+ my $failwhy = "";
+ $test =~ s/ gcov failed: (.*)// and $failwhy = $1;
+
+ # And some other tests have random information after a colon :(
+ # But for scan-assembler, this really is part of the name.
+ if(!$is_gdb and $test !~ /scan-assembler/ and $test =~ s/:\s*(.+)//) {
+ $failwhy = $1;
+ }
+
+ $test =~ s/\s*$//;
+ $test =~ s/^\s*$//;
+
+ # Sometimes there's a test which shows up as:
+ # foo (test for excess errors)
+ # foo (something else)
+ # foo: error executing dg-final
+ # if it runs, but just:
+ # foo
+ # if it doesn't. When we see the top form, we create a
+ # "pseudotest" in the bottom form, so that comparisons
+ # can be made.
+ my $basetest = $test;
+ $basetest =~ s/:? .*//;
+
+ if(exists($testindex{$test}) and !$testindex{$test}->{pseudotest}) {
+ $currtest = $testindex{$test};
+ if(ispass($currtest->{result})) {
+ $currtest->{result} = $result;
+ $currtest->{detail} = "$phase phase";
+ $currtest->{detail} .= "; $failwhy" if $failwhy;
+ }
+ } else {
+ # This might have been created earlier as a pseudotest.
+ # If so, overwrite it.
+ $currtest ||= $testindex{$test} || {};
+
+ $currtest->{name} = basename($test);
+ if($emit_fullname) {
+ $currtest->{fullname} = ($currgroup->{name} || dirname($test)) . "/$currtest->{name}";
+ }
+ my $grpname = $currgroup->{name} || "";
+ $currtest->{name} =~ s/^\s*\Q$grpname\E\s*//;
+ $currtest->{name} =~ s/^: // if $is_gdb;
+ # Sometimes there's a test at the root of the group.
+ # For instance, you'll have:
+ # FAIL: foo/bar.c (test for excess errors)
+ # UNRESOLVED: foo/bar.c: couldn't open "bar.s": no such file or directory
+ # In this case, groupname *is* the entire name, so the regex above will make the test name empty.
+ # In this case, we actually want to use the parent group and make this a test within that group.
+ my $orig_currgroup = $currgroup;
+ if(!$currtest->{name}) {
+ $currtest->{name} = $grpname;
+ $currgroup = $currgroup->{parent};
+ $grpname = $currgroup->{name} || "";
+ }
+
+ $currtest->{result} = $result;
+ if($phase and $failwhy) {
+ $currtest->{detail} = "$phase phase; $failwhy" if $phase;
+ } elsif($phase) {
+ $currtest->{detail} = "$phase phase";
+ } elsif($failwhy) {
+ $currtest->{detail} = $failwhy;
+ }
+
+ $currgroup->{test} ||= [];
+ push @{$currgroup->{test}}, $currtest;
+ $testindex{$test} = $currtest;
+ $currgroup = $orig_currgroup;
+
+ if($basetest ne $test) {
+ if(!exists($testindex{$basetest}) ) {
+ my $btbase = basename($basetest);
+ $testindex{$basetest} = {
+ name => $btbase,
+ result => $result,
+ pseudotest => 1,
+ fullname => $btbase
+ };
+ if($emit_fullname) {
+ $testindex{basetest}->{fullname} = ($currgroup->{name} || dirname($basetest)) . "/$btbase";
+ }
+ push @{$currgroup->{parent}->{test}}, $testindex{$basetest};
+ } else {
+ # Only let the base test pass if all the sub-tests pass
+ $testindex{$basetest}->{result} = $result if !ispass($result);
+ }
+ }
+
+ }
+ } elsif(/^\s+=== .* Summary ===\s*$/) {
+ undef $currgroup;
+ undef $currtest;
+ }
+
+ my $severity;
+ if(/^(ERROR|WARNING|NOTE): (.*)/) {
+ $severity = $1;
+ my $message = $2;
+
+ if($message eq "program timed out.") {
+ $currtest->{result} = "TIMEDOUT";
+ } elsif(
+ $message =~ /can't read "(HOSTCC|libiconv)": no such variable/ or
+ $message =~ /no files matched glob pattern/ or
+ $message =~ /error executing dg-final: .*: no such file/
+ ) {
+ $severity = "NOTE";
+ }
+ } else {
+ $severity = "logline";
+ }
+
+ if($return_diags) {
+ my $dobj;
+ if($currtest) {
+ $currtest->{diagnostics} ||= [];
+ $dobj = $currtest->{diagnostics};
+ } elsif($currgroup) {
+ $currgroup->{diagnostics} ||= [];
+ $dobj = $currgroup->{diagnostics};
+ } else {
+ $dobj = $ret{diagnostics};
+ }
+
+ push @$dobj, {message => $_, severity => $severity, line => $line};
+ }
+ } continue {
+ $line++;
+ }
+ close LOGFILE;
+
+ return %ret;
+}
+
+# Split a test into testdivs
+sub splitTest($$) {
+ my($root, $test) = @_;
+
+ $test->{fullname} =~ /^(\S+)\s*(.*)/;
+ my($path, $descriptor) = ($1, $2);
+ my @nodes = split(m!/!, $path);
+ push @nodes, $descriptor if $descriptor;
+ my $lastnode = pop @nodes;
+
+ my $hash = $root;
+ foreach (@nodes) {
+ $hash->{testdiv} ||= {};
+ $hash = $hash->{testdiv}->{$_} ||= {};
+ }
+
+
+ $hash->{test} ||= {};
+ $hash->{test}->{$lastnode} = $test;
+}
+
+
+# ==== Comparison ====
+
+sub ispass($) {
+ my $result = shift;
+
+ if($result eq "PASS" or $result eq "XFAIL" or $result eq "KFAIL") {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+1;
diff --git a/gcc-4.3.1/contrib/download_ecj b/gcc-4.3.1/contrib/download_ecj
new file mode 100755
index 000000000..5043f9407
--- /dev/null
+++ b/gcc-4.3.1/contrib/download_ecj
@@ -0,0 +1,25 @@
+#! /bin/sh
+
+#
+# Download the ecj jar file needed by gcj.
+# Run this from the top level of the gcc source tree and the libjava
+# build will do the right thing.
+#
+# (C) 2006 Free Software Foundation
+#
+# This script is Free Software, and it can be copied, distributed and
+# modified as defined in the GNU General Public License. A copy of
+# its license can be downloaded from http://www.gnu.org/copyleft/gpl.html
+#
+
+ftp -n sourceware.org << EOF
+verbose
+hash
+user ftp ''
+cd /pub/java
+binary
+get ecj-latest.jar
+EOF
+
+mv ecj-latest.jar ecj.jar
+
diff --git a/gcc-4.3.1/contrib/filter_gcc_for_doxygen b/gcc-4.3.1/contrib/filter_gcc_for_doxygen
new file mode 100755
index 000000000..3787eebbf
--- /dev/null
+++ b/gcc-4.3.1/contrib/filter_gcc_for_doxygen
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+# This filters GCC source before Doxygen can get confused by it;
+# this script is listed in the doxyfile. The output is not very
+# pretty, but at least we get output that Doxygen can understand.
+#
+# $1 is a source file of some kind. The source we wish doxygen to
+# process is put on stdout.
+
+dir=`dirname $0`
+perl $dir/filter_params.pl < $1 | perl $dir/filter_knr2ansi.pl
+exit 0
diff --git a/gcc-4.3.1/contrib/filter_knr2ansi.pl b/gcc-4.3.1/contrib/filter_knr2ansi.pl
new file mode 100755
index 000000000..6b43e4077
--- /dev/null
+++ b/gcc-4.3.1/contrib/filter_knr2ansi.pl
@@ -0,0 +1,45 @@
+#!/usr/bin/perl
+#
+# Goes through the input line by line to find K&R style function
+# declarations, and replaces them with ANSI style declarations.
+#
+@blah = <>;
+
+for ($i = 0; $i < @blah; $i++)
+{
+ if ($blah[$i] =~ /^([a-zA-Z_0-9]+)\s*\([^)]+\)\s*$/)
+ {
+ $name = $1;
+ $funci = $i;
+ $blah[$funci]="$name (";
+ $i++;
+ $lastline = $i;
+ while ($lastline < @blah && $blah[$lastline] !~ /^{/)
+ {
+ $lastline++;
+ }
+ $lastline--;
+ while ($i < @blah && $blah[$i] !~ /^{/)
+ {
+ $arg = $blah[$i];
+ if ($i != $lastline)
+ {
+ $arg =~ s/;/,/g;
+ }
+ else
+ {
+ $arg =~ s/;//g;
+ }
+ $blah[$i] = "";
+ $blah[$funci] = "$blah[$funci]" . "$arg";
+ $i++;
+ }
+ $blah[$funci] = "$blah[$funci]" . ")\n";
+ }
+}
+
+for ($i = 0; $i < @blah; $i++)
+{
+ print $blah[$i];
+}
+
diff --git a/gcc-4.3.1/contrib/filter_params.pl b/gcc-4.3.1/contrib/filter_params.pl
new file mode 100755
index 000000000..22dae6cc5
--- /dev/null
+++ b/gcc-4.3.1/contrib/filter_params.pl
@@ -0,0 +1,14 @@
+#!/usr/bin/perl
+
+# Filters out some of the #defines used throughout the GCC sources:
+# - GTY(()) marks declarations for gengtype.c
+# - PARAMS(()) is used for K&R compatibility. See ansidecl.h.
+
+while (<>) {
+ s/^\/\* /\/\*\* \@verbatim /;
+ s/\*\// \@endverbatim \*\//;
+ s/GTY[ \t]*\(\(.*\)\)//g;
+ s/[ \t]ATTRIBUTE_UNUSED//g;
+ s/PARAMS[ \t]*\(\((.*?)\)\)/\($1\)/sg;
+ print;
+}
diff --git a/gcc-4.3.1/contrib/gcc.doxy b/gcc-4.3.1/contrib/gcc.doxy
new file mode 100644
index 000000000..7a284e754
--- /dev/null
+++ b/gcc-4.3.1/contrib/gcc.doxy
@@ -0,0 +1,1267 @@
+# Doxyfile 1.5.2
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+
+#-----------------------------------------------------------------------------
+# NOTE: YOU MUST EDIT THE FOLLOWING HARDWIRED PATHS BEFORE USING THIS FILE.
+#-----------------------------------------------------------------------------
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = @OUTPUT_DIRECTORY@
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER = @INPUT_FILTER@
+
+#-----------------------------------------------------------------------------
+
+
+
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file that
+# follow. The default is UTF-8 which is also the encoding used for all text before
+# the first occurrence of this tag. Doxygen uses libiconv (or the iconv built into
+# libc) for the transcoding. See http://www.gnu.org/software/libiconv for the list of
+# possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = "GCC Middle and Back End API Reference"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER =
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Finnish, French, German, Greek, Hungarian,
+# Italian, Japanese, Japanese-en (Japanese with English messages), Korean,
+# Korean-en, Lithuanian, Norwegian, Polish, Portuguese, Romanian, Russian,
+# Serbian, Slovak, Slovene, Spanish, Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = YES
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member
+# documentation.
+
+DETAILS_AT_TOP = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for Java.
+# For instance, namespaces will be presented as packages, qualified scopes
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to
+# include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from the
+# version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = gcc
+
+# This tag can be used to specify the character encoding of the source files that
+# doxygen parses. Internally doxygen uses the UTF-8 encoding, which is also the default
+# input encoding. Doxygen uses libiconv (or the iconv built into libc) for the transcoding.
+# See http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = c-* */testsuite/* */config/* */.svn/* */po/*
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the output.
+# The symbol name can be a fully qualified name, a word, or if the wildcard * is used,
+# a substring. Examples: ANamespace, AClass, AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default)
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default)
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code. Otherwise they will link to the documentstion.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = YES
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = letter
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = YES
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see http://www.mcternan.me.uk/mscgen/) to
+# produce the chart and insert it in the documentation. The MSCGEN_PATH tag allows you to
+# specify the directory where the mscgen tool resides. If left empty the tool is assumed to
+# be found in the default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = YES
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a call dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a caller dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MAX_DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen will always
+# show the root nodes and its direct children regardless of this setting.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, which results in a white background.
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE = NO
diff --git a/gcc-4.3.1/contrib/gcc_build b/gcc-4.3.1/contrib/gcc_build
new file mode 100755
index 000000000..6713066d7
--- /dev/null
+++ b/gcc-4.3.1/contrib/gcc_build
@@ -0,0 +1,325 @@
+#! /bin/sh
+
+########################################################################
+#
+# File: gcc_build
+# Author: Mark Mitchell
+# Date: 2000-07-10
+#
+# Adapted to Subversion by Ben Elliston <bje@au.ibm.com>, 2005-07-14.
+#
+# Contents:
+# Script to automatically download and build GCC.
+#
+# Copyright (c) 2000, 2001, 2003, 2005 Free Software Foundation.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+########################################################################
+
+########################################################################
+# Notes
+########################################################################
+
+# You can set the following variables in the environment. They
+# have no corresponding command-line options because they should
+# only be needed infrequently:
+#
+# MAKE The path to `make'.
+
+########################################################################
+# Functions
+########################################################################
+
+# Issue the error message given by $1 and exit with a non-zero
+# exit code.
+
+error() {
+ echo "gcc_build: error: $1"
+ exit 1
+}
+
+# Issue a usage message explaining how to use this script.
+
+usage() {
+cat <<EOF
+gcc_build [-c configure_options]
+ [-d destination_directory]
+ [-m make_boot_options]
+ [-o objdir]
+ [-b branch_name]
+ [-u username]
+ [-p protocol]
+ [-t tarfile]
+ [-x make_check_options]
+ [bootstrap]
+ [build]
+ [checkout]
+ [configure]
+ [export]
+ [install]
+ [test]
+ [update]
+EOF
+ exit 1
+}
+
+# Change to the directory given by $1.
+
+changedir() {
+ cd $1 || \
+ error "Could not change directory to $1"
+}
+
+# Checkout a fresh copy of the GCC build tree.
+
+checkout_gcc() {
+ # If the destination already exists, don't risk destroying it.
+ test -e ${DESTINATION} && \
+ error "${DESTINATION} already exists"
+
+ # Checkout the tree
+ test -n "${SVN_USERNAME}" && SVN_USERNAME="${SVN_USERNAME}@"
+ SVNROOT="${SVN_PROTOCOL}://${SVN_USERNAME}${SVN_SERVER}${SVN_REPOSITORY}${SVN_BRANCH}"
+
+ $GCC_SVN co $SVNROOT ${DESTINATION} || \
+ error "Could not check out GCC"
+}
+
+# Update GCC.
+
+update_gcc() {
+ # If the destination does not already exist, complain.
+ test -d ${DESTINATION} || \
+ error "${DESTINATION} does not exist"
+
+ # Enter the destination directory.
+ changedir ${DESTINATION}
+
+ # Update the tree
+ ./contrib/gcc_update || \
+ error "Could not update GCC"
+}
+
+# Configure for a build of GCC.
+
+configure_gcc() {
+ # Go to the source directory.
+ changedir ${DESTINATION}
+
+ # Remove the object directory.
+ rm -rf ${OBJDIR}
+ # Create it again.
+ mkdir ${OBJDIR} || \
+ error "Could not create ${OBJDIR}"
+ # Enter it.
+ changedir ${OBJDIR}
+
+ # Configure the tree.
+ echo "Configuring: ${DESTINATION}/configure ${CONFIGURE_OPTIONS}"
+ eval ${DESTINATION}/configure ${CONFIGURE_OPTIONS} || \
+ error "Could not configure the compiler"
+}
+
+# Bootstrap GCC. Assume configuration has already occurred.
+
+bootstrap_gcc() {
+ # Go to the source directory.
+ changedir ${DESTINATION}
+ # Go to the object directory.
+ changedir ${OBJDIR}
+
+ # Bootstrap the compiler
+ echo "Building: ${MAKE} ${MAKE_BOOTSTRAP_OPTIONS} bootstrap"
+ eval ${MAKE} ${MAKE_BOOTSTRAP_OPTIONS} bootstrap || \
+ error "Could not bootstrap the compiler"
+}
+
+# Test GCC.
+
+test_gcc() {
+ # Go to the source directory.
+ changedir ${DESTINATION}
+ # Go to the object directory.
+ changedir ${OBJDIR}
+
+ echo "Running tests... This will take a while."
+ eval \${MAKE} -k ${MAKE_CHECK_OPTIONS} check
+ ${DESTINATION}/contrib/test_summary
+}
+
+# Export the GCC source tree.
+
+export_gcc() {
+ # Go to the source directory.
+ changedir ${DESTINATION}
+ # Go up one level.
+ changedir ..
+ # Build a tarball of the source directory.
+ tar czf ${TARFILE} \
+ --exclude=${OBJDIR} \
+ --exclude=.svn \
+ --exclude='.#*' \
+ --exclude='*~' \
+ `basename ${DESTINATION}`
+}
+
+# Install GCC.
+
+install_gcc() {
+ # Go to the source directory.
+ changedir ${DESTINATION}
+ # Go to the object directory.
+ changedir ${OBJDIR}
+
+ ${MAKE} install || error "Installation failed"
+}
+
+########################################################################
+# Initialization
+########################################################################
+
+# SVN command
+GCC_SVN=${GCC_SVN-${SVN-svn}}
+# The SVN server containing the GCC repository.
+SVN_SERVER="gcc.gnu.org"
+# The path to the repository on that server.
+SVN_REPOSITORY="/svn/gcc/"
+# The branch to check out from that server.
+# Defaults to trunk if no branch is defined with -b.
+SVN_BRANCH=""
+# The SVN protocol to use.
+SVN_PROTOCOL="svn"
+# The username to use when connecting to the server.
+# An empty string means anonymous.
+SVN_USERNAME=""
+
+# The directory where the checked out GCC will be placed.
+DESTINATION="${HOME}/dev/gcc"
+# The relative path from the top of the source tree to the
+# object directory.
+OBJDIR="objdir"
+
+# The file where the tarred up sources will be placed.
+TARFILE="${HOME}/dev/gcc.tgz"
+
+# Options to pass to configure.
+CONFIGURE_OPTIONS=
+# The `make' program.
+MAKE=${MAKE:-make}
+# Options to pass to "make bootstrap".
+MAKE_BOOTSTRAP_OPTIONS=
+# Options to pass to "make check".
+MAKE_CHECK_OPTIONS=
+
+# Modes of operation
+BOOTSTRAP=0
+CHECKOUT=0
+CONFIGURE=0
+EXPORT=0
+INSTALL=0
+TEST=0
+UPDATE=0
+
+########################################################################
+# Main Program
+########################################################################
+
+# Issue usage if no parameters are given.
+test $# -eq 0 && usage
+
+# Parse the options.
+while getopts "c:d:m:o:p:t:b:u:x:" ARG; do
+ case $ARG in
+ c) CONFIGURE_OPTIONS="${OPTARG}";;
+ d) DESTINATION="${OPTARG}";;
+ m) MAKE_BOOTSTRAP_OPTIONS="${OPTARG}";;
+ o) OBJDIR="${OPTARG}";;
+ p) SVN_PROTOCOL="${OPTARG}";;
+ t) TARFILE="${OPTARG}";;
+ x) MAKE_CHECK_OPTIONS="${OPTARG}";;
+ b) SVN_BRANCH="${OPTARG}";;
+ u) SVN_USERNAME="${OPTARG}";;
+ \?) usage;;
+ esac
+done
+shift `expr ${OPTIND} - 1`
+
+# Handle the major modes.
+while [ $# -ne 0 ]; do
+ case $1 in
+ bootstrap) BOOTSTRAP=1;;
+ build) CONFIGURE=1; BOOTSTRAP=1;;
+ checkout) CHECKOUT=1;;
+ configure) CONFIGURE=1;;
+ export) EXPORT=1;;
+ install) INSTALL=1;;
+ test) TEST=1;;
+ update) UPDATE=1;;
+ *) usage;;
+ esac
+ shift
+done
+
+# Check the arguments for sanity.
+if [ ${CHECKOUT} -ne 0 ] && [ ${UPDATE} -ne 0 ]; then
+ error "Cannot checkout and update simultaneously"
+fi
+
+if [ ${CHECKOUT} -eq 0 ] && test -n "${SVN_BRANCH}"; then
+ error "Branch argument only makes sense when doing a checkout"
+fi
+
+# Validate the branch name.
+if test -n "${SVN_BRANCH}"; then
+ SVN_BRANCH="branches/${SVN_BRANCH}";
+else
+ SVN_BRANCH="trunk";
+fi
+
+# Checkout the tree.
+if [ ${CHECKOUT} -ne 0 ]; then
+ checkout_gcc
+elif [ ${UPDATE} -ne 0 ]; then
+ update_gcc
+fi
+
+# Configure to build the tree.
+if [ ${CONFIGURE} -ne 0 ]; then
+ configure_gcc
+fi
+
+# Bootstrap the compiler.
+if [ ${BOOTSTRAP} -ne 0 ]; then
+ bootstrap_gcc
+fi
+
+# Test the compiler
+if [ ${TEST} -ne 0 ]; then
+ test_gcc
+fi
+
+# Install the compiler.
+if [ ${INSTALL} -ne 0 ]; then
+ install_gcc
+fi
+
+# Export the sources
+if [ ${EXPORT} -ne 0 ]; then
+ export_gcc
+fi
diff --git a/gcc-4.3.1/contrib/gcc_update b/gcc-4.3.1/contrib/gcc_update
new file mode 100755
index 000000000..f29dcb609
--- /dev/null
+++ b/gcc-4.3.1/contrib/gcc_update
@@ -0,0 +1,277 @@
+#! /bin/sh
+#
+# Update a local Subversion tree from the GCC repository, with an emphasis
+# on treating generated files correctly, so that autoconf, gperf et
+# al are not required for the ``end'' user.
+#
+# (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation
+# Originally by Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>, August 1998.
+#
+# This script is Free Software, and it can be copied, distributed and
+# modified as defined in the GNU General Public License. A copy of
+# its license can be downloaded from http://www.gnu.org/copyleft/gpl.html
+#
+#
+# By default all command-line options are passed to `svn update` in
+# addition to $UPDATE_OPTIONS (defined below). If the first parameter
+# reads --nostdflags, $UPDATE_OPTIONS as well as this parameter itself
+# are omitted.
+#
+# If the first parameter reads --patch, the second parameter is considered
+# a patch file.
+#
+# If the first parameter is --touch, no svn operation will be performed,
+# only generated files that appear to be out of date in the local tree
+# will be touched.
+#
+# If the first parameter is --list, a list of the generated files and
+# their dependencies will be printed; --help prints this message.
+#
+# If the first parameter is --silent, this script prints nothing other
+# than error messages; the second parameter is then interpreted as above.
+#
+# Examples:
+#
+# contrib/gcc_update -r 8712
+# contrib/gcc_update --patch some-patch
+# contrib/gcc_update --touch
+# contrib/gcc_update --list
+
+
+# SVN command
+GCC_SVN=${GCC_SVN-${SVN-svn}}
+# Default options used when updating via SVN (none).
+UPDATE_OPTIONS=""
+
+######## Anything below shouldn't be changed by regular users.
+
+# Arrange for the value of $0 to be available for functions
+self=$0
+
+# This function prints its arguments to standard output unless
+# "silent" is set.
+unset silent
+chat () {
+ if [ -z "$silent" ]; then
+ echo "$@"
+ fi
+}
+
+# This function prints a list of all generated files, along with their
+# dependencies. Note that only one target is supported per line: the
+# colon is stripped from the output.
+files_and_dependencies () {
+ sed -e 's/ *#.*//' -e '/^$/d' -e 's/://' <<\EOF
+# fixincludes
+fixincludes/configure: fixincludes/configure.ac fixincludes/aclocal.m4
+fixincludes/config.h.in: fixincludes/configure.ac fixincludes/aclocal.m4
+# intl library
+intl/plural.c: intl/plural.y
+intl/configure: intl/configure.ac intl/aclocal.m4
+intl/config.h.in: intl/configure.ac intl/aclocal.m4
+# Now, proceed to gcc automatically generated files
+gcc/configure: gcc/configure.ac
+gcc/cstamp-h.in: gcc/configure.ac
+gcc/config.in: gcc/cstamp-h.in
+gcc/fixinc/fixincl.x: gcc/fixinc/fixincl.tpl gcc/fixinc/inclhack.def
+gcc/config/arm/arm-tune.md: gcc/config/arm/arm-cores.def gcc/config/arm/gentune.sh
+# And then, language-specific files
+gcc/cp/cfns.h: gcc/cp/cfns.gperf
+gcc/java/keyword.h: gcc/java/keyword.gperf
+# testsuite
+# Without this, _Pragma3.c can have a false negative.
+gcc/testsuite/gcc.dg/cpp/_Pragma3.c: gcc/testsuite/gcc.dg/cpp/mi1c.h
+# Similarly, without this, you will see:
+# direct2s.c:35: warning: current file is older than direct2.c
+gcc/testsuite/gcc.dg/cpp/direct2s.c: gcc/testsuite/gcc.dg/cpp/direct2.c
+# And libraries, at last
+libbanshee/configure: libbanshee/configure.ac
+libmudflap/configure: libmudflap/configure.ac
+libobjc/configure: libobjc/configure.ac
+zlib/aclocal.m4: zlib/configure.ac zlib/acinclude.m4
+zlib/Makefile.in: zlib/Makefile.am zlib/configure.ac zlib/aclocal.m4
+zlib/configure: zlib/configure.ac zlib/aclocal.m4
+fastjar/aclocal.m4: fastjar/configure.ac
+fastjar/Makefile.in: fastjar/Makefile.am fastjar/configure.ac fastjar/aclocal.m4
+fastjar/configure: fastjar/configure.ac fastjar/aclocal.m4
+boehm-gc/aclocal.m4: boehm-gc/configure.ac
+boehm-gc/Makefile.in: boehm-gc/Makefile.am boehm-gc/configure.ac boehm-gc/aclocal.m4
+boehm-gc/configure: boehm-gc/configure.ac boehm-gc/aclocal.m4
+libada/configure: libada/configure.ac
+libffi/aclocal.m4: libffi/configure.ac libffi/acinclude.m4
+libffi/Makefile.in: libffi/Makefile.am libffi/configure.ac libffi/aclocal.m4
+libffi/configure: libffi/configure.ac libffi/aclocal.m4
+libffi/fficonfig.h.in: libffi/configure.ac libffi/aclocal.m4
+libgfortran/aclocal.m4: libgfortran/configure.ac libgfortran/acinclude.m4
+libgfortran/Makefile.in: libgfortran/Makefile.am libgfortran/configure.ac libgfortran/aclocal.m4
+libgfortran/configure: libgfortran/configure.ac libgfortran/aclocal.m4
+libjava/aclocal.m4: libjava/configure.ac
+libjava/Makefile.in: libjava/Makefile.am libjava/configure.ac libjava/aclocal.m4
+libjava/configure: libjava/configure.ac libjava/aclocal.m4
+libjava/libltdl/aclocal.m4: libjava/libltdl/configure.ac libjava/libltdl/acinclude.m4
+libjava/libltdl/Makefile.in: libjava/libltdl/Makefile.am libjava/libltdl/configure.ac libjava/libltdl/aclocal.m4
+libjava/libltdl/configure: libjava/libltdl/configure.ac libjava/libltdl/aclocal.m4
+libjava/libltdl/config-h.in: libjava/libltdl/configure.ac libjava/libltdl/aclocal.m4
+libcpp/aclocal.m4: libcpp/configure.ac
+libcpp/Makefile.in: libcpp/configure.ac libcpp/aclocal.m4
+libcpp/configure: libcpp/configure.ac libcpp/aclocal.m4
+libgomp/aclocal.m4: libgomp/configure.ac libgomp/acinclude.m4
+libgomp/Makefile.in: libgomp/Makefile.am libgomp/aclocal.m4
+libgomp/testsuite/Makefile.in: libgomp/Makefile.am libgomp/aclocal.m4
+libgomp/configure: libgomp/configure.ac libgomp/aclocal.m4
+libgomp/config.h.in: libgomp/configure.ac libgomp/aclocal.m4
+# Top level
+Makefile.in: Makefile.tpl Makefile.def
+configure: configure.ac config/acx.m4
+EOF
+}
+
+
+# This function touches generated files such that the ``end'' user does
+# not have to rebuild them.
+touch_files () {
+ rm -f Makefile.$$
+ echo 'all: \' > Makefile.$$
+ files_and_dependencies | sed 's, .*, \\,' >> Makefile.$$
+ echo '; @true' >> Makefile.$$
+ files_and_dependencies | sed 's, ,: ,' >> Makefile.$$
+ files_and_dependencies | sed 's, .*, \\,' >> Makefile.$$
+ echo ':' >> Makefile.$$
+ echo ' @for f in $? $@; do test -f $$f || exit 0; done; \' >> Makefile.$$
+ echo ' echo Touching $@...; \' >> Makefile.$$
+ chat ' echo Touching $@... 1>&2; \' >> Makefile.$$
+ echo ' touch $@' >> Makefile.$$
+ files_and_dependencies | sed 's,[^ ]* ,,;s,$, :,' >> Makefile.$$
+
+ # We would have to explicitly shut off the "Entering... Leaving..."
+ # messages through "--no-print-directory" to handle the case when
+ # we were called from a recursive invocation (i.e. "$(MAKE)" in a
+ # Makefile, not just make). Passing only "-s" doesn't help then,
+ # because make has helpfully added "-w" to MAKEFLAGS automatically.
+ # Unfortunately we do not require GNU make other than for building
+ # and testing, so let's just grep known text explicitly echoed by
+ # the rule.
+ while ${MAKE-make} -s -f Makefile.$$ all | grep Touching > /dev/null; do
+ sleep 1
+ done 2>&1
+ rm -f Makefile.$$
+}
+
+
+# Whenever we update the tree or install a patch, we may be modifying
+# this script. By re-execing it, we ensure that the appropriate
+# dependencies and rules will be used.
+touch_files_reexec () {
+ chat "Adjusting file timestamps"
+ exec ${CONFIG_SHELL-/bin/sh} $self ${silent+"--silent"} --touch
+}
+
+# This functions applies a patch to an existing tree.
+apply_patch () {
+ if [ -f "$1" ]; then
+ echo "Applying patch file $1"
+ case $1 in
+ *gz)
+ gzip -d -c "$1" | patch -p1 ;;
+ *bz2)
+ bzip2 -d -c "$1" | patch -p1 ;;
+ *)
+ patch -p1 < "$1";;
+ esac
+ fi
+ touch_files_reexec
+}
+
+# Check whether this indeed looks like a local tree.
+if [ ! -f gcc/version.c ]; then
+ echo "This does not seem to be a GCC tree!"
+ exit
+fi
+
+case "$1" in
+--silent)
+ silent=t
+ shift
+ ;;
+esac
+
+case "$1" in
+# First of all, check whether we are going to process a patch.
+--patch)
+ if test "$#" != 2; then
+ echo "$1" expects only one argument >&2
+ exit 1
+ fi
+ apply_patch "${2}"
+ exit $?
+ ;;
+
+--touch)
+ if test "$#" != 1; then
+ echo "$1" does not expect any argument >&2
+ exit 1
+ fi
+ touch_files
+ exit $?
+ ;;
+
+--list)
+ if test "$#" != 1; then
+ echo "$1" does not expect any argument >&2
+ exit 1
+ fi
+ files_and_dependencies | sed 's/ /: /'
+ exit $?
+ ;;
+
+--help)
+ sed -e '1,2d' -e '/^UPDATE_OPTIONS=/{i\
+\
+
+p
+}' \
+ -e '/^$/,$d' -e 's/#//' -e 's/^ //' < $0
+ exit $?
+ ;;
+
+esac
+
+# Check whether this indeed looks like a local SVN tree.
+if [ ! -d .svn ]; then
+ echo "This does not seem to be a GCC SVN tree!"
+ exit
+fi
+
+# Check command-line options
+if [ x"${1}"x = x"--nostdflags"x ]; then
+ shift
+else
+ set -- $UPDATE_OPTIONS ${1+"$@"}
+fi
+
+chat "Updating SVN tree"
+
+$GCC_SVN ${silent+-q} --non-interactive update ${1+"$@"}
+if [ $? -ne 0 ]; then
+ (touch_files_reexec)
+ echo "SVN update of full tree failed." >&2
+ exit 1
+fi
+
+rm -f LAST_UPDATED gcc/REVISION
+
+revision=`svn info | awk '/Revision:/ { print $2 }'`
+branch=`svn info | sed -ne "/URL:/ {
+s,.*/trunk,trunk,
+s,.*/branches/,,
+s,.*/tags/,,
+p
+}"`
+{
+ date
+ echo "`TZ=UTC date` (revision $revision)"
+} > LAST_UPDATED
+
+echo "[$branch revision $revision]" > gcc/REVISION
+
+touch_files_reexec
diff --git a/gcc-4.3.1/contrib/gccbug.el b/gcc-4.3.1/contrib/gccbug.el
new file mode 100644
index 000000000..55821e5e6
--- /dev/null
+++ b/gcc-4.3.1/contrib/gccbug.el
@@ -0,0 +1,84 @@
+;;; gccbug.el --- forward bug reports to gnats
+;; (C) 2000 Free Software Foundation
+;; Written by Martin v. Löwis
+;; Usage:
+;; In rmail, bind a key to rmail-gccbug-reply, e.g.
+;; (require 'rmail)
+;; (require 'gccbug)
+;; (define-key rmail-mode-map "R" 'rmail-gccbug-reply)
+;; Then, when reviewing a report, type R to create a gnats-formatted
+;; message.
+
+(provide 'gccbug)
+
+(defun gccbug-reply ()
+ (interactive)
+ (let ((orig-yank-prefix mail-yank-prefix))
+ (insert ">Submitter-Id: net\n")
+ (insert ">Originator: \n")
+ (insert ">Confidential: no\n")
+ (insert ">Synopsis: ")
+ (save-excursion
+ (mail-subject)
+ (let ((stop (point)))
+ (re-search-backward "Re: ")
+ (copy-region-as-kill (match-end 0) stop)))
+ (yank)
+ (insert "\n")
+ (insert ">Severity: serious\n")
+ (insert ">Priority: medium\n")
+ (insert ">Category: \n")
+ (insert ">Class: \n")
+ ;(insert ">State: analyzed\n")
+ (insert ">Release: 2.95.2\n")
+ (insert ">Environment:\n")
+ (insert ">Description:\n")
+ (set 'mail-yank-prefix nil)
+ (set 'mail-yank-ignored-headers
+ "^via:\\|^mail-from:\\|^origin:\\|^status:\\|^remailed\\|^received:\\|^summary-line:\\|^to:\\|^subject:\\|^in-reply-to:\\|^return-path:\\|^X-.*:\\|^User-Agent:\\|^MIME-Version:\\|^Content-.*:\\|^List-.*:\\|C[Cc]:\\|^Precedence:\\|^Sender:\\|^Mailing-List:\\|^Delivered-To:\\|^>From")
+ (mail-yank-original t)
+ (set 'mail-yank-prefix orig-yank-prefix)
+ ; Copy From: field to Originator:
+ (re-search-backward "From: ")
+ (let ((beg (match-end 0)))
+ (end-of-line)
+ (kill-region beg (point)))
+ (re-search-backward ">Originator: ")
+ (goto-char (match-end 0))
+ (yank)
+ ; Kill From: line
+ (re-search-forward "From:")
+ (beginning-of-line)
+ (kill-line 1)
+ ; Replace Message-ID: with Original-Message-ID
+ (beginning-of-buffer)
+ (re-search-forward "Message-ID: .*")
+ (replace-match "Original-\\&")
+ ; Replace To: line, adding recipient to Notify list
+ (mail-to)
+ (re-search-backward "To: ")
+ (replace-match "To: gcc-gnats@gcc.gnu.org\nX-GNATS-Notify: ")
+ ; add additional fields
+ (end-of-buffer)
+ (insert ">How-To-Repeat: \n>Fix: \n")
+ ; See whether an Organization: is present
+ (let ((org (re-search-backward "Organization:.*" nil t)))
+ (if org
+ (progn
+ (kill-region (point) (match-end 0))
+ (re-search-backward ">Confidential")
+ (insert ">")
+ (yank)
+ (insert "\n"))))
+; This kills CC: lines, but rmail-reply below won't create any
+; (mail-cc)
+; (beginning-of-line)
+; (kill-line 1)
+ ))
+
+(defun rmail-gccbug-reply ()
+ (interactive)
+ (rmail-toggle-header 0)
+ (rmail-reply t)
+ (gccbug-reply))
+
diff --git a/gcc-4.3.1/contrib/gennews b/gcc-4.3.1/contrib/gennews
new file mode 100755
index 000000000..6e0a47d1a
--- /dev/null
+++ b/gcc-4.3.1/contrib/gennews
@@ -0,0 +1,62 @@
+#! /bin/sh
+#
+# Script to generate the NEWS file from online release notes.
+# Contributed by Joseph Myers <jsm28@cam.ac.uk>.
+#
+# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+
+website=http://gcc.gnu.org/
+files="
+ gcc-4.3/index.html gcc-4.3/changes.html
+ gcc-4.2/index.html gcc-4.2/changes.html
+ gcc-4.1/index.html gcc-4.1/changes.html
+ gcc-4.0/index.html gcc-4.0/changes.html
+ gcc-3.4/index.html gcc-3.4/changes.html
+ gcc-3.3/index.html gcc-3.3/changes.html
+ gcc-3.2/index.html gcc-3.2/changes.html
+ gcc-3.1/index.html gcc-3.1/changes.html
+ gcc-3.0/gcc-3.0.html gcc-3.0/features.html gcc-3.0/caveats.html
+ gcc-2.95/index.html gcc-2.95/features.html gcc-2.95/caveats.html
+ egcs-1.1/index.html egcs-1.1/features.html egcs-1.1/caveats.html
+ egcs-1.0/index.html egcs-1.0/features.html egcs-1.0/caveats.html"
+
+set -e
+
+cat <<EOF
+This file contains information about GCC releases which has been generated
+automatically from the online release notes. It covers releases of GCC
+(and the former EGCS project) since EGCS 1.0, on the line of development
+that led to GCC 3. For information on GCC 2.8.1 and older releases of GCC 2,
+see ONEWS.
+
+EOF
+
+header="======================================================================"
+
+echo $header
+
+for file in $files; do
+ wfile=$website$file
+ echo $wfile
+ # We lie to Lynx about the kind of terminal in use because we
+ # want to get consistent output everywhere, and we want this
+ # script to work even when it is not run interactively.
+ env TERM=vt100 lynx -dump $wfile
+ echo $header
+done
diff --git a/gcc-4.3.1/contrib/gthr_supp_vxw_5x.c b/gcc-4.3.1/contrib/gthr_supp_vxw_5x.c
new file mode 100644
index 000000000..6ce288f3e
--- /dev/null
+++ b/gcc-4.3.1/contrib/gthr_supp_vxw_5x.c
@@ -0,0 +1,92 @@
+/* Kernel-side additional module for the VxWorks threading support
+ logic for GCC. Written 2002 by Zack Weinberg.
+
+ This file is distributed with GCC, but it is not part of GCC.
+ The contents of this file are in the public domain. */
+
+/* If you are using the Tornado IDE, copy this file to
+ $WIND_BASE/target/config/comps/src/gthread_supp.c. Then create a
+ file named 10comp_gthread_supp.cdf in target/config/comps/vxWorks
+ with the following contents:
+
+ Component INCLUDE_GCC_GTHREAD {
+ NAME GCC 3.x gthread support (required by C++)
+ CONFIGLETTES gthread_supp.c
+ REQUIRES INCLUDE_CPLUS
+ INCLUDE_WHEN INCLUDE_CPLUS
+ _FOLDER FOLDER_CPLUS
+ }
+
+ If you are using command line builds, instead copy this file to
+ $WIND_BASE/target/src/config/gthread_supp.c, and add the following
+ block to target/src/config/usrExtra.c:
+
+ #ifdef INCLUDE_CPLUS
+ #include "../../src/config/gthread_supp.c"
+ #endif
+
+ You should now be able to rebuild your application using GCC 3.x. */
+
+#include <vxWorks.h>
+#include <taskLib.h>
+
+/* This file provides these routines: */
+extern void *__gthread_get_tsd_data (WIND_TCB *tcb);
+extern void __gthread_set_tsd_data (WIND_TCB *tcb, void *data);
+
+extern void __gthread_enter_tsd_dtor_context (WIND_TCB *tcb);
+extern void __gthread_leave_tsd_dtor_context (WIND_TCB *tcb);
+
+/* Set and retrieve the TSD data block for the task TCB.
+
+ Possible choices for TSD_SLOT are:
+ reserved1
+ reserved2
+ spare1
+ spare2
+ spare3
+ spare4
+ (these are all fields of the TCB structure; all have type 'int').
+
+ If you find that the slot chosen by default is already used for
+ something else, simply change the #define below and recompile this
+ file. No other file should reference TSD_SLOT directly. */
+
+/* WARNING: This code is not 64-bit clean (it assumes that a pointer
+ can be held in an 'int' without truncation). As much of the rest
+ of VxWorks also makes this assumption, we can't really avoid it. */
+
+#define TSD_SLOT reserved1
+
+void *
+__gthread_get_tsd_data (WIND_TCB *tcb)
+{
+ return (void *) (tcb->TSD_SLOT);
+}
+
+void
+__gthread_set_tsd_data (WIND_TCB *tcb, void *data)
+{
+ tcb->TSD_SLOT = (int) data;
+}
+
+/* Enter and leave "TSD destructor context". This is defined as a
+ state in which it is safe to call free() from a task delete hook
+ on a memory block allocated by the task being deleted.
+ For VxWorks 5.x, nothing needs to be done. */
+
+#if __GNUC__ >= 2
+#define UNUSED __attribute__((unused))
+#else
+#define UNUSED
+#endif
+
+void
+__gthread_enter_tsd_dtor_context (WIND_TCB *tcb UNUSED)
+{
+}
+
+void
+__gthread_leave_tsd_dtor_context (WIND_TCB *tcb UNUSED)
+{
+}
diff --git a/gcc-4.3.1/contrib/index-prop b/gcc-4.3.1/contrib/index-prop
new file mode 100755
index 000000000..1ea648989
--- /dev/null
+++ b/gcc-4.3.1/contrib/index-prop
@@ -0,0 +1,26 @@
+#! /usr/bin/perl -wi
+# Fix up the output of cvs diff -c so that it works with patch.
+# We do this by propagating the full pathname from the Index: line
+# into the diff itself.
+#
+# Thrown together by Jason Merrill <jason@cygnus.com>
+
+while (<>)
+{
+ if (/^Index: (.*)/)
+ {
+ $full = $1;
+ print;
+ for (1..7)
+ {
+ $_ = <>;
+ s/^([-+*]{3}) [^\t]+\t/$1 $full\t/
+ unless m{ /dev/null\t};
+ print;
+ }
+ }
+ else
+ {
+ print;
+ }
+}
diff --git a/gcc-4.3.1/contrib/paranoia.cc b/gcc-4.3.1/contrib/paranoia.cc
new file mode 100644
index 000000000..ce21d3520
--- /dev/null
+++ b/gcc-4.3.1/contrib/paranoia.cc
@@ -0,0 +1,2714 @@
+/* A C version of Kahan's Floating Point Test "Paranoia"
+
+Thos Sumner, UCSF, Feb. 1985
+David Gay, BTL, Jan. 1986
+
+This is a rewrite from the Pascal version by
+
+B. A. Wichmann, 18 Jan. 1985
+
+(and does NOT exhibit good C programming style).
+
+Adjusted to use Standard C headers 19 Jan. 1992 (dmg);
+
+(C) Apr 19 1983 in BASIC version by:
+Professor W. M. Kahan,
+567 Evans Hall
+Electrical Engineering & Computer Science Dept.
+University of California
+Berkeley, California 94720
+USA
+
+converted to Pascal by:
+B. A. Wichmann
+National Physical Laboratory
+Teddington Middx
+TW11 OLW
+UK
+
+converted to C by:
+
+David M. Gay and Thos Sumner
+AT&T Bell Labs Computer Center, Rm. U-76
+600 Mountain Avenue University of California
+Murray Hill, NJ 07974 San Francisco, CA 94143
+USA USA
+
+with simultaneous corrections to the Pascal source (reflected
+in the Pascal source available over netlib).
+[A couple of bug fixes from dgh = sun!dhough incorporated 31 July 1986.]
+
+Reports of results on various systems from all the versions
+of Paranoia are being collected by Richard Karpinski at the
+same address as Thos Sumner. This includes sample outputs,
+bug reports, and criticisms.
+
+You may copy this program freely if you acknowledge its source.
+Comments on the Pascal version to NPL, please.
+
+The following is from the introductory commentary from Wichmann's work:
+
+The BASIC program of Kahan is written in Microsoft BASIC using many
+facilities which have no exact analogy in Pascal. The Pascal
+version below cannot therefore be exactly the same. Rather than be
+a minimal transcription of the BASIC program, the Pascal coding
+follows the conventional style of block-structured languages. Hence
+the Pascal version could be useful in producing versions in other
+structured languages.
+
+Rather than use identifiers of minimal length (which therefore have
+little mnemonic significance), the Pascal version uses meaningful
+identifiers as follows [Note: A few changes have been made for C]:
+
+
+BASIC C BASIC C BASIC C
+
+A J S StickyBit
+A1 AInverse J0 NoErrors T
+B Radix [Failure] T0 Underflow
+B1 BInverse J1 NoErrors T2 ThirtyTwo
+B2 RadixD2 [SeriousDefect] T5 OneAndHalf
+B9 BMinusU2 J2 NoErrors T7 TwentySeven
+C [Defect] T8 TwoForty
+C1 CInverse J3 NoErrors U OneUlp
+D [Flaw] U0 UnderflowThreshold
+D4 FourD K PageNo U1
+E0 L Milestone U2
+E1 M V
+E2 Exp2 N V0
+E3 N1 V8
+E5 MinSqEr O Zero V9
+E6 SqEr O1 One W
+E7 MaxSqEr O2 Two X
+E8 O3 Three X1
+E9 O4 Four X8
+F1 MinusOne O5 Five X9 Random1
+F2 Half O8 Eight Y
+F3 Third O9 Nine Y1
+F6 P Precision Y2
+F9 Q Y9 Random2
+G1 GMult Q8 Z
+G2 GDiv Q9 Z0 PseudoZero
+G3 GAddSub R Z1
+H R1 RMult Z2
+H1 HInverse R2 RDiv Z9
+I R3 RAddSub
+IO NoTrials R4 RSqrt
+I3 IEEE R9 Random9
+
+SqRWrng
+
+All the variables in BASIC are true variables and in consequence,
+the program is more difficult to follow since the "constants" must
+be determined (the glossary is very helpful). The Pascal version
+uses Real constants, but checks are added to ensure that the values
+are correctly converted by the compiler.
+
+The major textual change to the Pascal version apart from the
+identifiersis that named procedures are used, inserting parameters
+wherehelpful. New procedures are also introduced. The
+correspondence is as follows:
+
+
+BASIC Pascal
+lines
+
+90- 140 Pause
+170- 250 Instructions
+380- 460 Heading
+480- 670 Characteristics
+690- 870 History
+2940-2950 Random
+3710-3740 NewD
+4040-4080 DoesYequalX
+4090-4110 PrintIfNPositive
+4640-4850 TestPartialUnderflow
+
+*/
+
+ /* This version of paranoia has been modified to work with GCC's internal
+ software floating point emulation library, as a sanity check of same.
+
+ I'm doing this in C++ so that I can do operator overloading and not
+ have to modify so damned much of the existing code. */
+
+ extern "C" {
+#include <stdio.h>
+#include <stddef.h>
+#include <limits.h>
+#include <string.h>
+#include <stdlib.h>
+#include <math.h>
+#include <unistd.h>
+#include <float.h>
+
+ /* This part is made all the more awful because many gcc headers are
+ not prepared at all to be parsed as C++. The biggest stickler
+ here is const structure members. So we include exactly the pieces
+ that we need. */
+
+#define GTY(x)
+
+#include "ansidecl.h"
+#include "auto-host.h"
+#include "hwint.h"
+
+#undef EXTRA_MODES_FILE
+
+ struct rtx_def;
+ typedef struct rtx_def *rtx;
+ struct rtvec_def;
+ typedef struct rtvec_def *rtvec;
+ union tree_node;
+ typedef union tree_node *tree;
+
+#define DEFTREECODE(SYM, STRING, TYPE, NARGS) SYM,
+ enum tree_code {
+#include "tree.def"
+ LAST_AND_UNUSED_TREE_CODE
+ };
+#undef DEFTREECODE
+
+#define ENUM_BITFIELD(X) enum X
+#define class klass
+
+#include "real.h"
+
+#undef class
+ }
+
+/* We never produce signals from the library. Thus setjmp need do nothing. */
+#undef setjmp
+#define setjmp(x) (0)
+
+static bool verbose = false;
+static int verbose_index = 0;
+
+/* ====================================================================== */
+/* The implementation of the abstract floating point class based on gcc's
+ real.c. I.e. the object of this exercise. Templated so that we can
+ all fp sizes. */
+
+class real_c_float
+{
+ public:
+ static const enum machine_mode MODE = SFmode;
+
+ private:
+ static const int external_max = 128 / 32;
+ static const int internal_max
+ = (sizeof (REAL_VALUE_TYPE) + sizeof (long) + 1) / sizeof (long);
+ long image[external_max < internal_max ? internal_max : external_max];
+
+ void from_long(long);
+ void from_str(const char *);
+ void binop(int code, const real_c_float&);
+ void unop(int code);
+ bool cmp(int code, const real_c_float&) const;
+
+ public:
+ real_c_float()
+ { }
+ real_c_float(long l)
+ { from_long(l); }
+ real_c_float(const char *s)
+ { from_str(s); }
+ real_c_float(const real_c_float &b)
+ { memcpy(image, b.image, sizeof(image)); }
+
+ const real_c_float& operator= (long l)
+ { from_long(l); return *this; }
+ const real_c_float& operator= (const char *s)
+ { from_str(s); return *this; }
+ const real_c_float& operator= (const real_c_float &b)
+ { memcpy(image, b.image, sizeof(image)); return *this; }
+
+ const real_c_float& operator+= (const real_c_float &b)
+ { binop(PLUS_EXPR, b); return *this; }
+ const real_c_float& operator-= (const real_c_float &b)
+ { binop(MINUS_EXPR, b); return *this; }
+ const real_c_float& operator*= (const real_c_float &b)
+ { binop(MULT_EXPR, b); return *this; }
+ const real_c_float& operator/= (const real_c_float &b)
+ { binop(RDIV_EXPR, b); return *this; }
+
+ real_c_float operator- () const
+ { real_c_float r(*this); r.unop(NEGATE_EXPR); return r; }
+ real_c_float abs () const
+ { real_c_float r(*this); r.unop(ABS_EXPR); return r; }
+
+ bool operator < (const real_c_float &b) const { return cmp(LT_EXPR, b); }
+ bool operator <= (const real_c_float &b) const { return cmp(LE_EXPR, b); }
+ bool operator == (const real_c_float &b) const { return cmp(EQ_EXPR, b); }
+ bool operator != (const real_c_float &b) const { return cmp(NE_EXPR, b); }
+ bool operator >= (const real_c_float &b) const { return cmp(GE_EXPR, b); }
+ bool operator > (const real_c_float &b) const { return cmp(GT_EXPR, b); }
+
+ const char * str () const;
+ const char * hex () const;
+ long integer () const;
+ int exp () const;
+ void ldexp (int);
+};
+
+void
+real_c_float::from_long (long l)
+{
+ REAL_VALUE_TYPE f;
+
+ real_from_integer (&f, MODE, l, l < 0 ? -1 : 0, 0);
+ real_to_target (image, &f, MODE);
+}
+
+void
+real_c_float::from_str (const char *s)
+{
+ REAL_VALUE_TYPE f;
+ const char *p = s;
+
+ if (*p == '-' || *p == '+')
+ p++;
+ if (strcasecmp(p, "inf") == 0)
+ {
+ real_inf (&f);
+ if (*s == '-')
+ real_arithmetic (&f, NEGATE_EXPR, &f, NULL);
+ }
+ else if (strcasecmp(p, "nan") == 0)
+ real_nan (&f, "", 1, MODE);
+ else
+ real_from_string (&f, s);
+
+ real_to_target (image, &f, MODE);
+}
+
+void
+real_c_float::binop (int code, const real_c_float &b)
+{
+ REAL_VALUE_TYPE ai, bi, ri;
+
+ real_from_target (&ai, image, MODE);
+ real_from_target (&bi, b.image, MODE);
+ real_arithmetic (&ri, code, &ai, &bi);
+ real_to_target (image, &ri, MODE);
+
+ if (verbose)
+ {
+ char ab[64], bb[64], rb[64];
+ const real_format *fmt = real_format_for_mode[MODE - QFmode];
+ const int digits = (fmt->p * fmt->log2_b + 3) / 4;
+ char symbol_for_code;
+
+ real_from_target (&ri, image, MODE);
+ real_to_hexadecimal (ab, &ai, sizeof(ab), digits, 0);
+ real_to_hexadecimal (bb, &bi, sizeof(bb), digits, 0);
+ real_to_hexadecimal (rb, &ri, sizeof(rb), digits, 0);
+
+ switch (code)
+ {
+ case PLUS_EXPR:
+ symbol_for_code = '+';
+ break;
+ case MINUS_EXPR:
+ symbol_for_code = '-';
+ break;
+ case MULT_EXPR:
+ symbol_for_code = '*';
+ break;
+ case RDIV_EXPR:
+ symbol_for_code = '/';
+ break;
+ default:
+ abort ();
+ }
+
+ fprintf (stderr, "%6d: %s %c %s = %s\n", verbose_index++,
+ ab, symbol_for_code, bb, rb);
+ }
+}
+
+void
+real_c_float::unop (int code)
+{
+ REAL_VALUE_TYPE ai, ri;
+
+ real_from_target (&ai, image, MODE);
+ real_arithmetic (&ri, code, &ai, NULL);
+ real_to_target (image, &ri, MODE);
+
+ if (verbose)
+ {
+ char ab[64], rb[64];
+ const real_format *fmt = real_format_for_mode[MODE - QFmode];
+ const int digits = (fmt->p * fmt->log2_b + 3) / 4;
+ const char *symbol_for_code;
+
+ real_from_target (&ri, image, MODE);
+ real_to_hexadecimal (ab, &ai, sizeof(ab), digits, 0);
+ real_to_hexadecimal (rb, &ri, sizeof(rb), digits, 0);
+
+ switch (code)
+ {
+ case NEGATE_EXPR:
+ symbol_for_code = "-";
+ break;
+ case ABS_EXPR:
+ symbol_for_code = "abs ";
+ break;
+ default:
+ abort ();
+ }
+
+ fprintf (stderr, "%6d: %s%s = %s\n", verbose_index++,
+ symbol_for_code, ab, rb);
+ }
+}
+
+bool
+real_c_float::cmp (int code, const real_c_float &b) const
+{
+ REAL_VALUE_TYPE ai, bi;
+ bool ret;
+
+ real_from_target (&ai, image, MODE);
+ real_from_target (&bi, b.image, MODE);
+ ret = real_compare (code, &ai, &bi);
+
+ if (verbose)
+ {
+ char ab[64], bb[64];
+ const real_format *fmt = real_format_for_mode[MODE - QFmode];
+ const int digits = (fmt->p * fmt->log2_b + 3) / 4;
+ const char *symbol_for_code;
+
+ real_to_hexadecimal (ab, &ai, sizeof(ab), digits, 0);
+ real_to_hexadecimal (bb, &bi, sizeof(bb), digits, 0);
+
+ switch (code)
+ {
+ case LT_EXPR:
+ symbol_for_code = "<";
+ break;
+ case LE_EXPR:
+ symbol_for_code = "<=";
+ break;
+ case EQ_EXPR:
+ symbol_for_code = "==";
+ break;
+ case NE_EXPR:
+ symbol_for_code = "!=";
+ break;
+ case GE_EXPR:
+ symbol_for_code = ">=";
+ break;
+ case GT_EXPR:
+ symbol_for_code = ">";
+ break;
+ default:
+ abort ();
+ }
+
+ fprintf (stderr, "%6d: %s %s %s = %s\n", verbose_index++,
+ ab, symbol_for_code, bb, (ret ? "true" : "false"));
+ }
+
+ return ret;
+}
+
+const char *
+real_c_float::str() const
+{
+ REAL_VALUE_TYPE f;
+ const real_format *fmt = real_format_for_mode[MODE - QFmode];
+ const int digits = int(fmt->p * fmt->log2_b * .30102999566398119521 + 1);
+
+ real_from_target (&f, image, MODE);
+ char *buf = new char[digits + 10];
+ real_to_decimal (buf, &f, digits+10, digits, 0);
+
+ return buf;
+}
+
+const char *
+real_c_float::hex() const
+{
+ REAL_VALUE_TYPE f;
+ const real_format *fmt = real_format_for_mode[MODE - QFmode];
+ const int digits = (fmt->p * fmt->log2_b + 3) / 4;
+
+ real_from_target (&f, image, MODE);
+ char *buf = new char[digits + 10];
+ real_to_hexadecimal (buf, &f, digits+10, digits, 0);
+
+ return buf;
+}
+
+long
+real_c_float::integer() const
+{
+ REAL_VALUE_TYPE f;
+ real_from_target (&f, image, MODE);
+ return real_to_integer (&f);
+}
+
+int
+real_c_float::exp() const
+{
+ REAL_VALUE_TYPE f;
+ real_from_target (&f, image, MODE);
+ return real_exponent (&f);
+}
+
+void
+real_c_float::ldexp (int exp)
+{
+ REAL_VALUE_TYPE ai;
+
+ real_from_target (&ai, image, MODE);
+ real_ldexp (&ai, &ai, exp);
+ real_to_target (image, &ai, MODE);
+}
+
+/* ====================================================================== */
+/* An implementation of the abstract floating point class that uses native
+ arithmetic. Exists for reference and debugging. */
+
+template<typename T>
+class native_float
+{
+ private:
+ // Force intermediate results back to memory.
+ volatile T image;
+
+ static T from_str (const char *);
+ static T do_abs (T);
+ static T verbose_binop (T, char, T, T);
+ static T verbose_unop (const char *, T, T);
+ static bool verbose_cmp (T, const char *, T, bool);
+
+ public:
+ native_float()
+ { }
+ native_float(long l)
+ { image = l; }
+ native_float(const char *s)
+ { image = from_str(s); }
+ native_float(const native_float &b)
+ { image = b.image; }
+
+ const native_float& operator= (long l)
+ { image = l; return *this; }
+ const native_float& operator= (const char *s)
+ { image = from_str(s); return *this; }
+ const native_float& operator= (const native_float &b)
+ { image = b.image; return *this; }
+
+ const native_float& operator+= (const native_float &b)
+ {
+ image = verbose_binop(image, '+', b.image, image + b.image);
+ return *this;
+ }
+ const native_float& operator-= (const native_float &b)
+ {
+ image = verbose_binop(image, '-', b.image, image - b.image);
+ return *this;
+ }
+ const native_float& operator*= (const native_float &b)
+ {
+ image = verbose_binop(image, '*', b.image, image * b.image);
+ return *this;
+ }
+ const native_float& operator/= (const native_float &b)
+ {
+ image = verbose_binop(image, '/', b.image, image / b.image);
+ return *this;
+ }
+
+ native_float operator- () const
+ {
+ native_float r;
+ r.image = verbose_unop("-", image, -image);
+ return r;
+ }
+ native_float abs () const
+ {
+ native_float r;
+ r.image = verbose_unop("abs ", image, do_abs(image));
+ return r;
+ }
+
+ bool operator < (const native_float &b) const
+ { return verbose_cmp(image, "<", b.image, image < b.image); }
+ bool operator <= (const native_float &b) const
+ { return verbose_cmp(image, "<=", b.image, image <= b.image); }
+ bool operator == (const native_float &b) const
+ { return verbose_cmp(image, "==", b.image, image == b.image); }
+ bool operator != (const native_float &b) const
+ { return verbose_cmp(image, "!=", b.image, image != b.image); }
+ bool operator >= (const native_float &b) const
+ { return verbose_cmp(image, ">=", b.image, image >= b.image); }
+ bool operator > (const native_float &b) const
+ { return verbose_cmp(image, ">", b.image, image > b.image); }
+
+ const char * str () const;
+ const char * hex () const;
+ long integer () const
+ { return long(image); }
+ int exp () const;
+ void ldexp (int);
+};
+
+template<typename T>
+inline T
+native_float<T>::from_str (const char *s)
+{
+ return strtold (s, NULL);
+}
+
+template<>
+inline float
+native_float<float>::from_str (const char *s)
+{
+ return strtof (s, NULL);
+}
+
+template<>
+inline double
+native_float<double>::from_str (const char *s)
+{
+ return strtod (s, NULL);
+}
+
+template<typename T>
+inline T
+native_float<T>::do_abs (T image)
+{
+ return fabsl (image);
+}
+
+template<>
+inline float
+native_float<float>::do_abs (float image)
+{
+ return fabsf (image);
+}
+
+template<>
+inline double
+native_float<double>::do_abs (double image)
+{
+ return fabs (image);
+}
+
+template<typename T>
+T
+native_float<T>::verbose_binop (T a, char symbol, T b, T r)
+{
+ if (verbose)
+ {
+ const int digits = int(sizeof(T) * CHAR_BIT / 4) - 1;
+#ifdef NO_LONG_DOUBLE
+ fprintf (stderr, "%6d: %.*a %c %.*a = %.*a\n", verbose_index++,
+ digits, (double)a, symbol,
+ digits, (double)b, digits, (double)r);
+#else
+ fprintf (stderr, "%6d: %.*La %c %.*La = %.*La\n", verbose_index++,
+ digits, (long double)a, symbol,
+ digits, (long double)b, digits, (long double)r);
+#endif
+ }
+ return r;
+}
+
+template<typename T>
+T
+native_float<T>::verbose_unop (const char *symbol, T a, T r)
+{
+ if (verbose)
+ {
+ const int digits = int(sizeof(T) * CHAR_BIT / 4) - 1;
+#ifdef NO_LONG_DOUBLE
+ fprintf (stderr, "%6d: %s%.*a = %.*a\n", verbose_index++,
+ symbol, digits, (double)a, digits, (double)r);
+#else
+ fprintf (stderr, "%6d: %s%.*La = %.*La\n", verbose_index++,
+ symbol, digits, (long double)a, digits, (long double)r);
+#endif
+ }
+ return r;
+}
+
+template<typename T>
+bool
+native_float<T>::verbose_cmp (T a, const char *symbol, T b, bool r)
+{
+ if (verbose)
+ {
+ const int digits = int(sizeof(T) * CHAR_BIT / 4) - 1;
+#ifndef NO_LONG_DOUBLE
+ fprintf (stderr, "%6d: %.*a %s %.*a = %s\n", verbose_index++,
+ digits, (double)a, symbol,
+ digits, (double)b, (r ? "true" : "false"));
+#else
+ fprintf (stderr, "%6d: %.*La %s %.*La = %s\n", verbose_index++,
+ digits, (long double)a, symbol,
+ digits, (long double)b, (r ? "true" : "false"));
+#endif
+ }
+ return r;
+}
+
+template<typename T>
+const char *
+native_float<T>::str() const
+{
+ char *buf = new char[50];
+ const int digits = int(sizeof(T) * CHAR_BIT * .30102999566398119521 + 1);
+#ifndef NO_LONG_DOUBLE
+ sprintf (buf, "%.*e", digits - 1, (double) image);
+#else
+ sprintf (buf, "%.*Le", digits - 1, (long double) image);
+#endif
+ return buf;
+}
+
+template<typename T>
+const char *
+native_float<T>::hex() const
+{
+ char *buf = new char[50];
+ const int digits = int(sizeof(T) * CHAR_BIT / 4);
+#ifndef NO_LONG_DOUBLE
+ sprintf (buf, "%.*a", digits - 1, (double) image);
+#else
+ sprintf (buf, "%.*La", digits - 1, (long double) image);
+#endif
+ return buf;
+}
+
+template<typename T>
+int
+native_float<T>::exp() const
+{
+ int e;
+ frexp (image, &e);
+ return e;
+}
+
+template<typename T>
+void
+native_float<T>::ldexp (int exp)
+{
+ image = ldexpl (image, exp);
+}
+
+template<>
+void
+native_float<float>::ldexp (int exp)
+{
+ image = ldexpf (image, exp);
+}
+
+template<>
+void
+native_float<double>::ldexp (int exp)
+{
+ image = ::ldexp (image, exp);
+}
+
+/* ====================================================================== */
+/* Some libm routines that Paranoia expects to be available. */
+
+template<typename FLOAT>
+inline FLOAT
+FABS (const FLOAT &f)
+{
+ return f.abs();
+}
+
+template<typename FLOAT, typename RHS>
+inline FLOAT
+operator+ (const FLOAT &a, const RHS &b)
+{
+ return FLOAT(a) += FLOAT(b);
+}
+
+template<typename FLOAT, typename RHS>
+inline FLOAT
+operator- (const FLOAT &a, const RHS &b)
+{
+ return FLOAT(a) -= FLOAT(b);
+}
+
+template<typename FLOAT, typename RHS>
+inline FLOAT
+operator* (const FLOAT &a, const RHS &b)
+{
+ return FLOAT(a) *= FLOAT(b);
+}
+
+template<typename FLOAT, typename RHS>
+inline FLOAT
+operator/ (const FLOAT &a, const RHS &b)
+{
+ return FLOAT(a) /= FLOAT(b);
+}
+
+template<typename FLOAT>
+FLOAT
+FLOOR (const FLOAT &f)
+{
+ /* ??? This is only correct when F is representable as an integer. */
+ long i = f.integer();
+ FLOAT r;
+
+ r = i;
+ if (i < 0 && f != r)
+ r = i - 1;
+
+ return r;
+}
+
+template<typename FLOAT>
+FLOAT
+SQRT (const FLOAT &f)
+{
+#if 0
+ FLOAT zero = long(0);
+ FLOAT two = 2;
+ FLOAT one = 1;
+ FLOAT diff, diff2;
+ FLOAT z, t;
+
+ if (f == zero)
+ return zero;
+ if (f < zero)
+ return zero / zero;
+ if (f == one)
+ return f;
+
+ z = f;
+ z.ldexp (-f.exp() / 2);
+
+ diff2 = FABS (z * z - f);
+ if (diff2 > zero)
+ while (1)
+ {
+ t = (f / (two * z)) + (z / two);
+ diff = FABS (t * t - f);
+ if (diff >= diff2)
+ break;
+ z = t;
+ diff2 = diff;
+ }
+
+ return z;
+#elif defined(NO_LONG_DOUBLE)
+ double d;
+ char buf[64];
+
+ d = strtod (f.hex(), NULL);
+ d = sqrt (d);
+ sprintf(buf, "%.35a", d);
+
+ return FLOAT(buf);
+#else
+ long double ld;
+ char buf[64];
+
+ ld = strtold (f.hex(), NULL);
+ ld = sqrtl (ld);
+ sprintf(buf, "%.35La", ld);
+
+ return FLOAT(buf);
+#endif
+}
+
+template<typename FLOAT>
+FLOAT
+LOG (FLOAT x)
+{
+#if 0
+ FLOAT zero = long(0);
+ FLOAT one = 1;
+
+ if (x <= zero)
+ return zero / zero;
+ if (x == one)
+ return zero;
+
+ int exp = x.exp() - 1;
+ x.ldexp(-exp);
+
+ FLOAT xm1 = x - one;
+ FLOAT y = xm1;
+ long n = 2;
+
+ FLOAT sum = xm1;
+ while (1)
+ {
+ y *= xm1;
+ FLOAT term = y / FLOAT (n);
+ FLOAT next = sum + term;
+ if (next == sum)
+ break;
+ sum = next;
+ if (++n == 1000)
+ break;
+ }
+
+ if (exp)
+ sum += FLOAT (exp) * FLOAT(".69314718055994530941");
+
+ return sum;
+#elif defined (NO_LONG_DOUBLE)
+ double d;
+ char buf[64];
+
+ d = strtod (x.hex(), NULL);
+ d = log (d);
+ sprintf(buf, "%.35a", d);
+
+ return FLOAT(buf);
+#else
+ long double ld;
+ char buf[64];
+
+ ld = strtold (x.hex(), NULL);
+ ld = logl (ld);
+ sprintf(buf, "%.35La", ld);
+
+ return FLOAT(buf);
+#endif
+}
+
+template<typename FLOAT>
+FLOAT
+EXP (const FLOAT &x)
+{
+ /* Cheat. */
+#ifdef NO_LONG_DOUBLE
+ double d;
+ char buf[64];
+
+ d = strtod (x.hex(), NULL);
+ d = exp (d);
+ sprintf(buf, "%.35a", d);
+
+ return FLOAT(buf);
+#else
+ long double ld;
+ char buf[64];
+
+ ld = strtold (x.hex(), NULL);
+ ld = expl (ld);
+ sprintf(buf, "%.35La", ld);
+
+ return FLOAT(buf);
+#endif
+}
+
+template<typename FLOAT>
+FLOAT
+POW (const FLOAT &base, const FLOAT &exp)
+{
+ /* Cheat. */
+#ifdef NO_LONG_DOUBLE
+ double d1, d2;
+ char buf[64];
+
+ d1 = strtod (base.hex(), NULL);
+ d2 = strtod (exp.hex(), NULL);
+ d1 = pow (d1, d2);
+ sprintf(buf, "%.35a", d1);
+
+ return FLOAT(buf);
+#else
+ long double ld1, ld2;
+ char buf[64];
+
+ ld1 = strtold (base.hex(), NULL);
+ ld2 = strtold (exp.hex(), NULL);
+ ld1 = powl (ld1, ld2);
+ sprintf(buf, "%.35La", ld1);
+
+ return FLOAT(buf);
+#endif
+}
+
+/* ====================================================================== */
+/* Real Paranoia begins again here. We wrap the thing in a template so
+ that we can instantiate it for each floating point type we care for. */
+
+int NoTrials = 20; /*Number of tests for commutativity. */
+bool do_pause = false;
+
+enum Guard { No, Yes };
+enum Rounding { Other, Rounded, Chopped };
+enum Class { Failure, Serious, Defect, Flaw };
+
+template<typename FLOAT>
+struct Paranoia
+{
+ FLOAT Radix, BInvrse, RadixD2, BMinusU2;
+
+ /* Small floating point constants. */
+ FLOAT Zero;
+ FLOAT Half;
+ FLOAT One;
+ FLOAT Two;
+ FLOAT Three;
+ FLOAT Four;
+ FLOAT Five;
+ FLOAT Eight;
+ FLOAT Nine;
+ FLOAT TwentySeven;
+ FLOAT ThirtyTwo;
+ FLOAT TwoForty;
+ FLOAT MinusOne;
+ FLOAT OneAndHalf;
+
+ /* Declarations of Variables. */
+ int Indx;
+ char ch[8];
+ FLOAT AInvrse, A1;
+ FLOAT C, CInvrse;
+ FLOAT D, FourD;
+ FLOAT E0, E1, Exp2, E3, MinSqEr;
+ FLOAT SqEr, MaxSqEr, E9;
+ FLOAT Third;
+ FLOAT F6, F9;
+ FLOAT H, HInvrse;
+ int I;
+ FLOAT StickyBit, J;
+ FLOAT MyZero;
+ FLOAT Precision;
+ FLOAT Q, Q9;
+ FLOAT R, Random9;
+ FLOAT T, Underflow, S;
+ FLOAT OneUlp, UfThold, U1, U2;
+ FLOAT V, V0, V9;
+ FLOAT W;
+ FLOAT X, X1, X2, X8, Random1;
+ FLOAT Y, Y1, Y2, Random2;
+ FLOAT Z, PseudoZero, Z1, Z2, Z9;
+ int ErrCnt[4];
+ int Milestone;
+ int PageNo;
+ int M, N, N1;
+ Guard GMult, GDiv, GAddSub;
+ Rounding RMult, RDiv, RAddSub, RSqrt;
+ int Break, Done, NotMonot, Monot, Anomaly, IEEE, SqRWrng, UfNGrad;
+
+ /* Computed constants. */
+ /*U1 gap below 1.0, i.e, 1.0-U1 is next number below 1.0 */
+ /*U2 gap above 1.0, i.e, 1.0+U2 is next number above 1.0 */
+
+ int main ();
+
+ FLOAT Sign (FLOAT);
+ FLOAT Random ();
+ void Pause ();
+ void BadCond (int, const char *);
+ void SqXMinX (int);
+ void TstCond (int, int, const char *);
+ void notify (const char *);
+ void IsYeqX ();
+ void NewD ();
+ void PrintIfNPositive ();
+ void SR3750 ();
+ void TstPtUf ();
+
+ // Pretend we're bss.
+ Paranoia() { memset(this, 0, sizeof (*this)); }
+};
+
+template<typename FLOAT>
+int
+Paranoia<FLOAT>::main()
+{
+ /* First two assignments use integer right-hand sides. */
+ Zero = long(0);
+ One = long(1);
+ Two = long(2);
+ Three = long(3);
+ Four = long(4);
+ Five = long(5);
+ Eight = long(8);
+ Nine = long(9);
+ TwentySeven = long(27);
+ ThirtyTwo = long(32);
+ TwoForty = long(240);
+ MinusOne = long(-1);
+ Half = "0x1p-1";
+ OneAndHalf = "0x3p-1";
+ ErrCnt[Failure] = 0;
+ ErrCnt[Serious] = 0;
+ ErrCnt[Defect] = 0;
+ ErrCnt[Flaw] = 0;
+ PageNo = 1;
+ /*=============================================*/
+ Milestone = 7;
+ /*=============================================*/
+ printf ("Program is now RUNNING tests on small integers:\n");
+
+ TstCond (Failure, (Zero + Zero == Zero), "0+0 != 0");
+ TstCond (Failure, (One - One == Zero), "1-1 != 0");
+ TstCond (Failure, (One > Zero), "1 <= 0");
+ TstCond (Failure, (One + One == Two), "1+1 != 2");
+
+ Z = -Zero;
+ if (Z != Zero)
+ {
+ ErrCnt[Failure] = ErrCnt[Failure] + 1;
+ printf ("Comparison alleges that -0.0 is Non-zero!\n");
+ U2 = "0.001";
+ Radix = 1;
+ TstPtUf ();
+ }
+
+ TstCond (Failure, (Three == Two + One), "3 != 2+1");
+ TstCond (Failure, (Four == Three + One), "4 != 3+1");
+ TstCond (Failure, (Four + Two * (-Two) == Zero), "4 + 2*(-2) != 0");
+ TstCond (Failure, (Four - Three - One == Zero), "4-3-1 != 0");
+
+ TstCond (Failure, (MinusOne == (Zero - One)), "-1 != 0-1");
+ TstCond (Failure, (MinusOne + One == Zero), "-1+1 != 0");
+ TstCond (Failure, (One + MinusOne == Zero), "1+(-1) != 0");
+ TstCond (Failure, (MinusOne + FABS (One) == Zero), "-1+abs(1) != 0");
+ TstCond (Failure, (MinusOne + MinusOne * MinusOne == Zero),
+ "-1+(-1)*(-1) != 0");
+
+ TstCond (Failure, Half + MinusOne + Half == Zero, "1/2 + (-1) + 1/2 != 0");
+
+ /*=============================================*/
+ Milestone = 10;
+ /*=============================================*/
+
+ TstCond (Failure, (Nine == Three * Three), "9 != 3*3");
+ TstCond (Failure, (TwentySeven == Nine * Three), "27 != 9*3");
+ TstCond (Failure, (Eight == Four + Four), "8 != 4+4");
+ TstCond (Failure, (ThirtyTwo == Eight * Four), "32 != 8*4");
+ TstCond (Failure, (ThirtyTwo - TwentySeven - Four - One == Zero),
+ "32-27-4-1 != 0");
+
+ TstCond (Failure, Five == Four + One, "5 != 4+1");
+ TstCond (Failure, TwoForty == Four * Five * Three * Four, "240 != 4*5*3*4");
+ TstCond (Failure, TwoForty / Three - Four * Four * Five == Zero,
+ "240/3 - 4*4*5 != 0");
+ TstCond (Failure, TwoForty / Four - Five * Three * Four == Zero,
+ "240/4 - 5*3*4 != 0");
+ TstCond (Failure, TwoForty / Five - Four * Three * Four == Zero,
+ "240/5 - 4*3*4 != 0");
+
+ if (ErrCnt[Failure] == 0)
+ {
+ printf ("-1, 0, 1/2, 1, 2, 3, 4, 5, 9, 27, 32 & 240 are O.K.\n");
+ printf ("\n");
+ }
+ printf ("Searching for Radix and Precision.\n");
+ W = One;
+ do
+ {
+ W = W + W;
+ Y = W + One;
+ Z = Y - W;
+ Y = Z - One;
+ }
+ while (MinusOne + FABS (Y) < Zero);
+ /*.. now W is just big enough that |((W+1)-W)-1| >= 1 ... */
+ Precision = Zero;
+ Y = One;
+ do
+ {
+ Radix = W + Y;
+ Y = Y + Y;
+ Radix = Radix - W;
+ }
+ while (Radix == Zero);
+ if (Radix < Two)
+ Radix = One;
+ printf ("Radix = %s .\n", Radix.str());
+ if (Radix != One)
+ {
+ W = One;
+ do
+ {
+ Precision = Precision + One;
+ W = W * Radix;
+ Y = W + One;
+ }
+ while ((Y - W) == One);
+ }
+ /*... now W == Radix^Precision is barely too big to satisfy (W+1)-W == 1
+ ... */
+ U1 = One / W;
+ U2 = Radix * U1;
+ printf ("Closest relative separation found is U1 = %s .\n\n", U1.str());
+ printf ("Recalculating radix and precision\n ");
+
+ /*save old values */
+ E0 = Radix;
+ E1 = U1;
+ E9 = U2;
+ E3 = Precision;
+
+ X = Four / Three;
+ Third = X - One;
+ F6 = Half - Third;
+ X = F6 + F6;
+ X = FABS (X - Third);
+ if (X < U2)
+ X = U2;
+
+ /*... now X = (unknown no.) ulps of 1+... */
+ do
+ {
+ U2 = X;
+ Y = Half * U2 + ThirtyTwo * U2 * U2;
+ Y = One + Y;
+ X = Y - One;
+ }
+ while (!((U2 <= X) || (X <= Zero)));
+
+ /*... now U2 == 1 ulp of 1 + ... */
+ X = Two / Three;
+ F6 = X - Half;
+ Third = F6 + F6;
+ X = Third - Half;
+ X = FABS (X + F6);
+ if (X < U1)
+ X = U1;
+
+ /*... now X == (unknown no.) ulps of 1 -... */
+ do
+ {
+ U1 = X;
+ Y = Half * U1 + ThirtyTwo * U1 * U1;
+ Y = Half - Y;
+ X = Half + Y;
+ Y = Half - X;
+ X = Half + Y;
+ }
+ while (!((U1 <= X) || (X <= Zero)));
+ /*... now U1 == 1 ulp of 1 - ... */
+ if (U1 == E1)
+ printf ("confirms closest relative separation U1 .\n");
+ else
+ printf ("gets better closest relative separation U1 = %s .\n", U1.str());
+ W = One / U1;
+ F9 = (Half - U1) + Half;
+
+ Radix = FLOOR (FLOAT ("0.01") + U2 / U1);
+ if (Radix == E0)
+ printf ("Radix confirmed.\n");
+ else
+ printf ("MYSTERY: recalculated Radix = %s .\n", Radix.str());
+ TstCond (Defect, Radix <= Eight + Eight,
+ "Radix is too big: roundoff problems");
+ TstCond (Flaw, (Radix == Two) || (Radix == 10)
+ || (Radix == One), "Radix is not as good as 2 or 10");
+ /*=============================================*/
+ Milestone = 20;
+ /*=============================================*/
+ TstCond (Failure, F9 - Half < Half,
+ "(1-U1)-1/2 < 1/2 is FALSE, prog. fails?");
+ X = F9;
+ I = 1;
+ Y = X - Half;
+ Z = Y - Half;
+ TstCond (Failure, (X != One)
+ || (Z == Zero), "Comparison is fuzzy,X=1 but X-1/2-1/2 != 0");
+ X = One + U2;
+ I = 0;
+ /*=============================================*/
+ Milestone = 25;
+ /*=============================================*/
+ /*... BMinusU2 = nextafter(Radix, 0) */
+ BMinusU2 = Radix - One;
+ BMinusU2 = (BMinusU2 - U2) + One;
+ /* Purify Integers */
+ if (Radix != One)
+ {
+ X = -TwoForty * LOG (U1) / LOG (Radix);
+ Y = FLOOR (Half + X);
+ if (FABS (X - Y) * Four < One)
+ X = Y;
+ Precision = X / TwoForty;
+ Y = FLOOR (Half + Precision);
+ if (FABS (Precision - Y) * TwoForty < Half)
+ Precision = Y;
+ }
+ if ((Precision != FLOOR (Precision)) || (Radix == One))
+ {
+ printf ("Precision cannot be characterized by an Integer number\n");
+ printf
+ ("of significant digits but, by itself, this is a minor flaw.\n");
+ }
+ if (Radix == One)
+ printf
+ ("logarithmic encoding has precision characterized solely by U1.\n");
+ else
+ printf ("The number of significant digits of the Radix is %s .\n",
+ Precision.str());
+ TstCond (Serious, U2 * Nine * Nine * TwoForty < One,
+ "Precision worse than 5 decimal figures ");
+ /*=============================================*/
+ Milestone = 30;
+ /*=============================================*/
+ /* Test for extra-precise subexpressions */
+ X = FABS (((Four / Three - One) - One / Four) * Three - One / Four);
+ do
+ {
+ Z2 = X;
+ X = (One + (Half * Z2 + ThirtyTwo * Z2 * Z2)) - One;
+ }
+ while (!((Z2 <= X) || (X <= Zero)));
+ X = Y = Z = FABS ((Three / Four - Two / Three) * Three - One / Four);
+ do
+ {
+ Z1 = Z;
+ Z = (One / Two - ((One / Two - (Half * Z1 + ThirtyTwo * Z1 * Z1))
+ + One / Two)) + One / Two;
+ }
+ while (!((Z1 <= Z) || (Z <= Zero)));
+ do
+ {
+ do
+ {
+ Y1 = Y;
+ Y =
+ (Half - ((Half - (Half * Y1 + ThirtyTwo * Y1 * Y1)) + Half)) +
+ Half;
+ }
+ while (!((Y1 <= Y) || (Y <= Zero)));
+ X1 = X;
+ X = ((Half * X1 + ThirtyTwo * X1 * X1) - F9) + F9;
+ }
+ while (!((X1 <= X) || (X <= Zero)));
+ if ((X1 != Y1) || (X1 != Z1))
+ {
+ BadCond (Serious, "Disagreements among the values X1, Y1, Z1,\n");
+ printf ("respectively %s, %s, %s,\n", X1.str(), Y1.str(), Z1.str());
+ printf ("are symptoms of inconsistencies introduced\n");
+ printf ("by extra-precise evaluation of arithmetic subexpressions.\n");
+ notify ("Possibly some part of this");
+ if ((X1 == U1) || (Y1 == U1) || (Z1 == U1))
+ printf ("That feature is not tested further by this program.\n");
+ }
+ else
+ {
+ if ((Z1 != U1) || (Z2 != U2))
+ {
+ if ((Z1 >= U1) || (Z2 >= U2))
+ {
+ BadCond (Failure, "");
+ notify ("Precision");
+ printf ("\tU1 = %s, Z1 - U1 = %s\n", U1.str(), (Z1 - U1).str());
+ printf ("\tU2 = %s, Z2 - U2 = %s\n", U2.str(), (Z2 - U2).str());
+ }
+ else
+ {
+ if ((Z1 <= Zero) || (Z2 <= Zero))
+ {
+ printf ("Because of unusual Radix = %s", Radix.str());
+ printf (", or exact rational arithmetic a result\n");
+ printf ("Z1 = %s, or Z2 = %s ", Z1.str(), Z2.str());
+ notify ("of an\nextra-precision");
+ }
+ if (Z1 != Z2 || Z1 > Zero)
+ {
+ X = Z1 / U1;
+ Y = Z2 / U2;
+ if (Y > X)
+ X = Y;
+ Q = -LOG (X);
+ printf ("Some subexpressions appear to be calculated "
+ "extra precisely\n");
+ printf ("with about %s extra B-digits, i.e.\n",
+ (Q / LOG (Radix)).str());
+ printf ("roughly %s extra significant decimals.\n",
+ (Q / LOG (FLOAT (10))).str());
+ }
+ printf
+ ("That feature is not tested further by this program.\n");
+ }
+ }
+ }
+ Pause ();
+ /*=============================================*/
+ Milestone = 35;
+ /*=============================================*/
+ if (Radix >= Two)
+ {
+ X = W / (Radix * Radix);
+ Y = X + One;
+ Z = Y - X;
+ T = Z + U2;
+ X = T - Z;
+ TstCond (Failure, X == U2,
+ "Subtraction is not normalized X=Y,X+Z != Y+Z!");
+ if (X == U2)
+ printf ("Subtraction appears to be normalized, as it should be.");
+ }
+ printf ("\nChecking for guard digit in *, /, and -.\n");
+ Y = F9 * One;
+ Z = One * F9;
+ X = F9 - Half;
+ Y = (Y - Half) - X;
+ Z = (Z - Half) - X;
+ X = One + U2;
+ T = X * Radix;
+ R = Radix * X;
+ X = T - Radix;
+ X = X - Radix * U2;
+ T = R - Radix;
+ T = T - Radix * U2;
+ X = X * (Radix - One);
+ T = T * (Radix - One);
+ if ((X == Zero) && (Y == Zero) && (Z == Zero) && (T == Zero))
+ GMult = Yes;
+ else
+ {
+ GMult = No;
+ TstCond (Serious, false, "* lacks a Guard Digit, so 1*X != X");
+ }
+ Z = Radix * U2;
+ X = One + Z;
+ Y = FABS ((X + Z) - X * X) - U2;
+ X = One - U2;
+ Z = FABS ((X - U2) - X * X) - U1;
+ TstCond (Failure, (Y <= Zero)
+ && (Z <= Zero), "* gets too many final digits wrong.\n");
+ Y = One - U2;
+ X = One + U2;
+ Z = One / Y;
+ Y = Z - X;
+ X = One / Three;
+ Z = Three / Nine;
+ X = X - Z;
+ T = Nine / TwentySeven;
+ Z = Z - T;
+ TstCond (Defect, X == Zero && Y == Zero && Z == Zero,
+ "Division lacks a Guard Digit, so error can exceed 1 ulp\n"
+ "or 1/3 and 3/9 and 9/27 may disagree");
+ Y = F9 / One;
+ X = F9 - Half;
+ Y = (Y - Half) - X;
+ X = One + U2;
+ T = X / One;
+ X = T - X;
+ if ((X == Zero) && (Y == Zero) && (Z == Zero))
+ GDiv = Yes;
+ else
+ {
+ GDiv = No;
+ TstCond (Serious, false, "Division lacks a Guard Digit, so X/1 != X");
+ }
+ X = One / (One + U2);
+ Y = X - Half - Half;
+ TstCond (Serious, Y < Zero, "Computed value of 1/1.000..1 >= 1");
+ X = One - U2;
+ Y = One + Radix * U2;
+ Z = X * Radix;
+ T = Y * Radix;
+ R = Z / Radix;
+ StickyBit = T / Radix;
+ X = R - X;
+ Y = StickyBit - Y;
+ TstCond (Failure, X == Zero && Y == Zero,
+ "* and/or / gets too many last digits wrong");
+ Y = One - U1;
+ X = One - F9;
+ Y = One - Y;
+ T = Radix - U2;
+ Z = Radix - BMinusU2;
+ T = Radix - T;
+ if ((X == U1) && (Y == U1) && (Z == U2) && (T == U2))
+ GAddSub = Yes;
+ else
+ {
+ GAddSub = No;
+ TstCond (Serious, false,
+ "- lacks Guard Digit, so cancellation is obscured");
+ }
+ if (F9 != One && F9 - One >= Zero)
+ {
+ BadCond (Serious, "comparison alleges (1-U1) < 1 although\n");
+ printf (" subtraction yields (1-U1) - 1 = 0 , thereby vitiating\n");
+ printf (" such precautions against division by zero as\n");
+ printf (" ... if (X == 1.0) {.....} else {.../(X-1.0)...}\n");
+ }
+ if (GMult == Yes && GDiv == Yes && GAddSub == Yes)
+ printf
+ (" *, /, and - appear to have guard digits, as they should.\n");
+ /*=============================================*/
+ Milestone = 40;
+ /*=============================================*/
+ Pause ();
+ printf ("Checking rounding on multiply, divide and add/subtract.\n");
+ RMult = Other;
+ RDiv = Other;
+ RAddSub = Other;
+ RadixD2 = Radix / Two;
+ A1 = Two;
+ Done = false;
+ do
+ {
+ AInvrse = Radix;
+ do
+ {
+ X = AInvrse;
+ AInvrse = AInvrse / A1;
+ }
+ while (!(FLOOR (AInvrse) != AInvrse));
+ Done = (X == One) || (A1 > Three);
+ if (!Done)
+ A1 = Nine + One;
+ }
+ while (!(Done));
+ if (X == One)
+ A1 = Radix;
+ AInvrse = One / A1;
+ X = A1;
+ Y = AInvrse;
+ Done = false;
+ do
+ {
+ Z = X * Y - Half;
+ TstCond (Failure, Z == Half, "X * (1/X) differs from 1");
+ Done = X == Radix;
+ X = Radix;
+ Y = One / X;
+ }
+ while (!(Done));
+ Y2 = One + U2;
+ Y1 = One - U2;
+ X = OneAndHalf - U2;
+ Y = OneAndHalf + U2;
+ Z = (X - U2) * Y2;
+ T = Y * Y1;
+ Z = Z - X;
+ T = T - X;
+ X = X * Y2;
+ Y = (Y + U2) * Y1;
+ X = X - OneAndHalf;
+ Y = Y - OneAndHalf;
+ if ((X == Zero) && (Y == Zero) && (Z == Zero) && (T <= Zero))
+ {
+ X = (OneAndHalf + U2) * Y2;
+ Y = OneAndHalf - U2 - U2;
+ Z = OneAndHalf + U2 + U2;
+ T = (OneAndHalf - U2) * Y1;
+ X = X - (Z + U2);
+ StickyBit = Y * Y1;
+ S = Z * Y2;
+ T = T - Y;
+ Y = (U2 - Y) + StickyBit;
+ Z = S - (Z + U2 + U2);
+ StickyBit = (Y2 + U2) * Y1;
+ Y1 = Y2 * Y1;
+ StickyBit = StickyBit - Y2;
+ Y1 = Y1 - Half;
+ if ((X == Zero) && (Y == Zero) && (Z == Zero) && (T == Zero)
+ && (StickyBit == Zero) && (Y1 == Half))
+ {
+ RMult = Rounded;
+ printf ("Multiplication appears to round correctly.\n");
+ }
+ else if ((X + U2 == Zero) && (Y < Zero) && (Z + U2 == Zero)
+ && (T < Zero) && (StickyBit + U2 == Zero) && (Y1 < Half))
+ {
+ RMult = Chopped;
+ printf ("Multiplication appears to chop.\n");
+ }
+ else
+ printf ("* is neither chopped nor correctly rounded.\n");
+ if ((RMult == Rounded) && (GMult == No))
+ notify ("Multiplication");
+ }
+ else
+ printf ("* is neither chopped nor correctly rounded.\n");
+ /*=============================================*/
+ Milestone = 45;
+ /*=============================================*/
+ Y2 = One + U2;
+ Y1 = One - U2;
+ Z = OneAndHalf + U2 + U2;
+ X = Z / Y2;
+ T = OneAndHalf - U2 - U2;
+ Y = (T - U2) / Y1;
+ Z = (Z + U2) / Y2;
+ X = X - OneAndHalf;
+ Y = Y - T;
+ T = T / Y1;
+ Z = Z - (OneAndHalf + U2);
+ T = (U2 - OneAndHalf) + T;
+ if (!((X > Zero) || (Y > Zero) || (Z > Zero) || (T > Zero)))
+ {
+ X = OneAndHalf / Y2;
+ Y = OneAndHalf - U2;
+ Z = OneAndHalf + U2;
+ X = X - Y;
+ T = OneAndHalf / Y1;
+ Y = Y / Y1;
+ T = T - (Z + U2);
+ Y = Y - Z;
+ Z = Z / Y2;
+ Y1 = (Y2 + U2) / Y2;
+ Z = Z - OneAndHalf;
+ Y2 = Y1 - Y2;
+ Y1 = (F9 - U1) / F9;
+ if ((X == Zero) && (Y == Zero) && (Z == Zero) && (T == Zero)
+ && (Y2 == Zero) && (Y2 == Zero) && (Y1 - Half == F9 - Half))
+ {
+ RDiv = Rounded;
+ printf ("Division appears to round correctly.\n");
+ if (GDiv == No)
+ notify ("Division");
+ }
+ else if ((X < Zero) && (Y < Zero) && (Z < Zero) && (T < Zero)
+ && (Y2 < Zero) && (Y1 - Half < F9 - Half))
+ {
+ RDiv = Chopped;
+ printf ("Division appears to chop.\n");
+ }
+ }
+ if (RDiv == Other)
+ printf ("/ is neither chopped nor correctly rounded.\n");
+ BInvrse = One / Radix;
+ TstCond (Failure, (BInvrse * Radix - Half == Half),
+ "Radix * ( 1 / Radix ) differs from 1");
+ /*=============================================*/
+ Milestone = 50;
+ /*=============================================*/
+ TstCond (Failure, ((F9 + U1) - Half == Half)
+ && ((BMinusU2 + U2) - One == Radix - One),
+ "Incomplete carry-propagation in Addition");
+ X = One - U1 * U1;
+ Y = One + U2 * (One - U2);
+ Z = F9 - Half;
+ X = (X - Half) - Z;
+ Y = Y - One;
+ if ((X == Zero) && (Y == Zero))
+ {
+ RAddSub = Chopped;
+ printf ("Add/Subtract appears to be chopped.\n");
+ }
+ if (GAddSub == Yes)
+ {
+ X = (Half + U2) * U2;
+ Y = (Half - U2) * U2;
+ X = One + X;
+ Y = One + Y;
+ X = (One + U2) - X;
+ Y = One - Y;
+ if ((X == Zero) && (Y == Zero))
+ {
+ X = (Half + U2) * U1;
+ Y = (Half - U2) * U1;
+ X = One - X;
+ Y = One - Y;
+ X = F9 - X;
+ Y = One - Y;
+ if ((X == Zero) && (Y == Zero))
+ {
+ RAddSub = Rounded;
+ printf ("Addition/Subtraction appears to round correctly.\n");
+ if (GAddSub == No)
+ notify ("Add/Subtract");
+ }
+ else
+ printf ("Addition/Subtraction neither rounds nor chops.\n");
+ }
+ else
+ printf ("Addition/Subtraction neither rounds nor chops.\n");
+ }
+ else
+ printf ("Addition/Subtraction neither rounds nor chops.\n");
+ S = One;
+ X = One + Half * (One + Half);
+ Y = (One + U2) * Half;
+ Z = X - Y;
+ T = Y - X;
+ StickyBit = Z + T;
+ if (StickyBit != Zero)
+ {
+ S = Zero;
+ BadCond (Flaw, "(X - Y) + (Y - X) is non zero!\n");
+ }
+ StickyBit = Zero;
+ if ((GMult == Yes) && (GDiv == Yes) && (GAddSub == Yes)
+ && (RMult == Rounded) && (RDiv == Rounded)
+ && (RAddSub == Rounded) && (FLOOR (RadixD2) == RadixD2))
+ {
+ printf ("Checking for sticky bit.\n");
+ X = (Half + U1) * U2;
+ Y = Half * U2;
+ Z = One + Y;
+ T = One + X;
+ if ((Z - One <= Zero) && (T - One >= U2))
+ {
+ Z = T + Y;
+ Y = Z - X;
+ if ((Z - T >= U2) && (Y - T == Zero))
+ {
+ X = (Half + U1) * U1;
+ Y = Half * U1;
+ Z = One - Y;
+ T = One - X;
+ if ((Z - One == Zero) && (T - F9 == Zero))
+ {
+ Z = (Half - U1) * U1;
+ T = F9 - Z;
+ Q = F9 - Y;
+ if ((T - F9 == Zero) && (F9 - U1 - Q == Zero))
+ {
+ Z = (One + U2) * OneAndHalf;
+ T = (OneAndHalf + U2) - Z + U2;
+ X = One + Half / Radix;
+ Y = One + Radix * U2;
+ Z = X * Y;
+ if (T == Zero && X + Radix * U2 - Z == Zero)
+ {
+ if (Radix != Two)
+ {
+ X = Two + U2;
+ Y = X / Two;
+ if ((Y - One == Zero))
+ StickyBit = S;
+ }
+ else
+ StickyBit = S;
+ }
+ }
+ }
+ }
+ }
+ }
+ if (StickyBit == One)
+ printf ("Sticky bit apparently used correctly.\n");
+ else
+ printf ("Sticky bit used incorrectly or not at all.\n");
+ TstCond (Flaw, !(GMult == No || GDiv == No || GAddSub == No ||
+ RMult == Other || RDiv == Other || RAddSub == Other),
+ "lack(s) of guard digits or failure(s) to correctly round or chop\n\
+(noted above) count as one flaw in the final tally below");
+ /*=============================================*/
+ Milestone = 60;
+ /*=============================================*/
+ printf ("\n");
+ printf ("Does Multiplication commute? ");
+ printf ("Testing on %d random pairs.\n", NoTrials);
+ Random9 = SQRT (FLOAT (3));
+ Random1 = Third;
+ I = 1;
+ do
+ {
+ X = Random ();
+ Y = Random ();
+ Z9 = Y * X;
+ Z = X * Y;
+ Z9 = Z - Z9;
+ I = I + 1;
+ }
+ while (!((I > NoTrials) || (Z9 != Zero)));
+ if (I == NoTrials)
+ {
+ Random1 = One + Half / Three;
+ Random2 = (U2 + U1) + One;
+ Z = Random1 * Random2;
+ Y = Random2 * Random1;
+ Z9 = (One + Half / Three) * ((U2 + U1) + One) - (One + Half /
+ Three) * ((U2 + U1) +
+ One);
+ }
+ if (!((I == NoTrials) || (Z9 == Zero)))
+ BadCond (Defect, "X * Y == Y * X trial fails.\n");
+ else
+ printf (" No failures found in %d integer pairs.\n", NoTrials);
+ /*=============================================*/
+ Milestone = 70;
+ /*=============================================*/
+ printf ("\nRunning test of square root(x).\n");
+ TstCond (Failure, (Zero == SQRT (Zero))
+ && (-Zero == SQRT (-Zero))
+ && (One == SQRT (One)), "Square root of 0.0, -0.0 or 1.0 wrong");
+ MinSqEr = Zero;
+ MaxSqEr = Zero;
+ J = Zero;
+ X = Radix;
+ OneUlp = U2;
+ SqXMinX (Serious);
+ X = BInvrse;
+ OneUlp = BInvrse * U1;
+ SqXMinX (Serious);
+ X = U1;
+ OneUlp = U1 * U1;
+ SqXMinX (Serious);
+ if (J != Zero)
+ Pause ();
+ printf ("Testing if sqrt(X * X) == X for %d Integers X.\n", NoTrials);
+ J = Zero;
+ X = Two;
+ Y = Radix;
+ if ((Radix != One))
+ do
+ {
+ X = Y;
+ Y = Radix * Y;
+ }
+ while (!((Y - X >= NoTrials)));
+ OneUlp = X * U2;
+ I = 1;
+ while (I <= NoTrials)
+ {
+ X = X + One;
+ SqXMinX (Defect);
+ if (J > Zero)
+ break;
+ I = I + 1;
+ }
+ printf ("Test for sqrt monotonicity.\n");
+ I = -1;
+ X = BMinusU2;
+ Y = Radix;
+ Z = Radix + Radix * U2;
+ NotMonot = false;
+ Monot = false;
+ while (!(NotMonot || Monot))
+ {
+ I = I + 1;
+ X = SQRT (X);
+ Q = SQRT (Y);
+ Z = SQRT (Z);
+ if ((X > Q) || (Q > Z))
+ NotMonot = true;
+ else
+ {
+ Q = FLOOR (Q + Half);
+ if (!(I > 0 || Radix == Q * Q))
+ Monot = true;
+ else if (I > 0)
+ {
+ if (I > 1)
+ Monot = true;
+ else
+ {
+ Y = Y * BInvrse;
+ X = Y - U1;
+ Z = Y + U1;
+ }
+ }
+ else
+ {
+ Y = Q;
+ X = Y - U2;
+ Z = Y + U2;
+ }
+ }
+ }
+ if (Monot)
+ printf ("sqrt has passed a test for Monotonicity.\n");
+ else
+ {
+ BadCond (Defect, "");
+ printf ("sqrt(X) is non-monotonic for X near %s .\n", Y.str());
+ }
+ /*=============================================*/
+ Milestone = 110;
+ /*=============================================*/
+ printf ("Seeking Underflow thresholds UfThold and E0.\n");
+ D = U1;
+ if (Precision != FLOOR (Precision))
+ {
+ D = BInvrse;
+ X = Precision;
+ do
+ {
+ D = D * BInvrse;
+ X = X - One;
+ }
+ while (X > Zero);
+ }
+ Y = One;
+ Z = D;
+ /* ... D is power of 1/Radix < 1. */
+ do
+ {
+ C = Y;
+ Y = Z;
+ Z = Y * Y;
+ }
+ while ((Y > Z) && (Z + Z > Z));
+ Y = C;
+ Z = Y * D;
+ do
+ {
+ C = Y;
+ Y = Z;
+ Z = Y * D;
+ }
+ while ((Y > Z) && (Z + Z > Z));
+ if (Radix < Two)
+ HInvrse = Two;
+ else
+ HInvrse = Radix;
+ H = One / HInvrse;
+ /* ... 1/HInvrse == H == Min(1/Radix, 1/2) */
+ CInvrse = One / C;
+ E0 = C;
+ Z = E0 * H;
+ /* ...1/Radix^(BIG Integer) << 1 << CInvrse == 1/C */
+ do
+ {
+ Y = E0;
+ E0 = Z;
+ Z = E0 * H;
+ }
+ while ((E0 > Z) && (Z + Z > Z));
+ UfThold = E0;
+ E1 = Zero;
+ Q = Zero;
+ E9 = U2;
+ S = One + E9;
+ D = C * S;
+ if (D <= C)
+ {
+ E9 = Radix * U2;
+ S = One + E9;
+ D = C * S;
+ if (D <= C)
+ {
+ BadCond (Failure,
+ "multiplication gets too many last digits wrong.\n");
+ Underflow = E0;
+ Y1 = Zero;
+ PseudoZero = Z;
+ Pause ();
+ }
+ }
+ else
+ {
+ Underflow = D;
+ PseudoZero = Underflow * H;
+ UfThold = Zero;
+ do
+ {
+ Y1 = Underflow;
+ Underflow = PseudoZero;
+ if (E1 + E1 <= E1)
+ {
+ Y2 = Underflow * HInvrse;
+ E1 = FABS (Y1 - Y2);
+ Q = Y1;
+ if ((UfThold == Zero) && (Y1 != Y2))
+ UfThold = Y1;
+ }
+ PseudoZero = PseudoZero * H;
+ }
+ while ((Underflow > PseudoZero)
+ && (PseudoZero + PseudoZero > PseudoZero));
+ }
+ /* Comment line 4530 .. 4560 */
+ if (PseudoZero != Zero)
+ {
+ printf ("\n");
+ Z = PseudoZero;
+ /* ... Test PseudoZero for "phoney- zero" violates */
+ /* ... PseudoZero < Underflow or PseudoZero < PseudoZero + PseudoZero
+ ... */
+ if (PseudoZero <= Zero)
+ {
+ BadCond (Failure, "Positive expressions can underflow to an\n");
+ printf ("allegedly negative value\n");
+ printf ("PseudoZero that prints out as: %s .\n", PseudoZero.str());
+ X = -PseudoZero;
+ if (X <= Zero)
+ {
+ printf ("But -PseudoZero, which should be\n");
+ printf ("positive, isn't; it prints out as %s .\n", X.str());
+ }
+ }
+ else
+ {
+ BadCond (Flaw, "Underflow can stick at an allegedly positive\n");
+ printf ("value PseudoZero that prints out as %s .\n",
+ PseudoZero.str());
+ }
+ TstPtUf ();
+ }
+ /*=============================================*/
+ Milestone = 120;
+ /*=============================================*/
+ if (CInvrse * Y > CInvrse * Y1)
+ {
+ S = H * S;
+ E0 = Underflow;
+ }
+ if (!((E1 == Zero) || (E1 == E0)))
+ {
+ BadCond (Defect, "");
+ if (E1 < E0)
+ {
+ printf ("Products underflow at a higher");
+ printf (" threshold than differences.\n");
+ if (PseudoZero == Zero)
+ E0 = E1;
+ }
+ else
+ {
+ printf ("Difference underflows at a higher");
+ printf (" threshold than products.\n");
+ }
+ }
+ printf ("Smallest strictly positive number found is E0 = %s .\n", E0.str());
+ Z = E0;
+ TstPtUf ();
+ Underflow = E0;
+ if (N == 1)
+ Underflow = Y;
+ I = 4;
+ if (E1 == Zero)
+ I = 3;
+ if (UfThold == Zero)
+ I = I - 2;
+ UfNGrad = true;
+ switch (I)
+ {
+ case 1:
+ UfThold = Underflow;
+ if ((CInvrse * Q) != ((CInvrse * Y) * S))
+ {
+ UfThold = Y;
+ BadCond (Failure, "Either accuracy deteriorates as numbers\n");
+ printf ("approach a threshold = %s\n", UfThold.str());
+ printf (" coming down from %s\n", C.str());
+ printf
+ (" or else multiplication gets too many last digits wrong.\n");
+ }
+ Pause ();
+ break;
+
+ case 2:
+ BadCond (Failure,
+ "Underflow confuses Comparison, which alleges that\n");
+ printf ("Q == Y while denying that |Q - Y| == 0; these values\n");
+ printf ("print out as Q = %s, Y = %s .\n", Q.str(), Y2.str());
+ printf ("|Q - Y| = %s .\n", FABS (Q - Y2).str());
+ UfThold = Q;
+ break;
+
+ case 3:
+ X = X;
+ break;
+
+ case 4:
+ if ((Q == UfThold) && (E1 == E0) && (FABS (UfThold - E1 / E9) <= E1))
+ {
+ UfNGrad = false;
+ printf ("Underflow is gradual; it incurs Absolute Error =\n");
+ printf ("(roundoff in UfThold) < E0.\n");
+ Y = E0 * CInvrse;
+ Y = Y * (OneAndHalf + U2);
+ X = CInvrse * (One + U2);
+ Y = Y / X;
+ IEEE = (Y == E0);
+ }
+ }
+ if (UfNGrad)
+ {
+ printf ("\n");
+ if (setjmp (ovfl_buf))
+ {
+ printf ("Underflow / UfThold failed!\n");
+ R = H + H;
+ }
+ else
+ R = SQRT (Underflow / UfThold);
+ if (R <= H)
+ {
+ Z = R * UfThold;
+ X = Z * (One + R * H * (One + H));
+ }
+ else
+ {
+ Z = UfThold;
+ X = Z * (One + H * H * (One + H));
+ }
+ if (!((X == Z) || (X - Z != Zero)))
+ {
+ BadCond (Flaw, "");
+ printf ("X = %s\n\tis not equal to Z = %s .\n", X.str(), Z.str());
+ Z9 = X - Z;
+ printf ("yet X - Z yields %s .\n", Z9.str());
+ printf (" Should this NOT signal Underflow, ");
+ printf ("this is a SERIOUS DEFECT\nthat causes ");
+ printf ("confusion when innocent statements like\n");;
+ printf (" if (X == Z) ... else");
+ printf (" ... (f(X) - f(Z)) / (X - Z) ...\n");
+ printf ("encounter Division by Zero although actually\n");
+ if (setjmp (ovfl_buf))
+ printf ("X / Z fails!\n");
+ else
+ printf ("X / Z = 1 + %s .\n", ((X / Z - Half) - Half).str());
+ }
+ }
+ printf ("The Underflow threshold is %s, below which\n", UfThold.str());
+ printf ("calculation may suffer larger Relative error than ");
+ printf ("merely roundoff.\n");
+ Y2 = U1 * U1;
+ Y = Y2 * Y2;
+ Y2 = Y * U1;
+ if (Y2 <= UfThold)
+ {
+ if (Y > E0)
+ {
+ BadCond (Defect, "");
+ I = 5;
+ }
+ else
+ {
+ BadCond (Serious, "");
+ I = 4;
+ }
+ printf ("Range is too narrow; U1^%d Underflows.\n", I);
+ }
+ /*=============================================*/
+ Milestone = 130;
+ /*=============================================*/
+ Y = -FLOOR (Half - TwoForty * LOG (UfThold) / LOG (HInvrse)) / TwoForty;
+ Y2 = Y + Y;
+ printf ("Since underflow occurs below the threshold\n");
+ printf ("UfThold = (%s) ^ (%s)\nonly underflow ", HInvrse.str(), Y.str());
+ printf ("should afflict the expression\n\t(%s) ^ (%s);\n",
+ HInvrse.str(), Y2.str());
+ printf ("actually calculating yields:");
+ if (setjmp (ovfl_buf))
+ {
+ BadCond (Serious, "trap on underflow.\n");
+ }
+ else
+ {
+ V9 = POW (HInvrse, Y2);
+ printf (" %s .\n", V9.str());
+ if (!((V9 >= Zero) && (V9 <= (Radix + Radix + E9) * UfThold)))
+ {
+ BadCond (Serious, "this is not between 0 and underflow\n");
+ printf (" threshold = %s .\n", UfThold.str());
+ }
+ else if (!(V9 > UfThold * (One + E9)))
+ printf ("This computed value is O.K.\n");
+ else
+ {
+ BadCond (Defect, "this is not between 0 and underflow\n");
+ printf (" threshold = %s .\n", UfThold.str());
+ }
+ }
+ /*=============================================*/
+ Milestone = 160;
+ /*=============================================*/
+ Pause ();
+ printf ("Searching for Overflow threshold:\n");
+ printf ("This may generate an error.\n");
+ Y = -CInvrse;
+ V9 = HInvrse * Y;
+ if (setjmp (ovfl_buf))
+ {
+ I = 0;
+ V9 = Y;
+ goto overflow;
+ }
+ do
+ {
+ V = Y;
+ Y = V9;
+ V9 = HInvrse * Y;
+ }
+ while (V9 < Y);
+ I = 1;
+overflow:
+ Z = V9;
+ printf ("Can `Z = -Y' overflow?\n");
+ printf ("Trying it on Y = %s .\n", Y.str());
+ V9 = -Y;
+ V0 = V9;
+ if (V - Y == V + V0)
+ printf ("Seems O.K.\n");
+ else
+ {
+ printf ("finds a ");
+ BadCond (Flaw, "-(-Y) differs from Y.\n");
+ }
+ if (Z != Y)
+ {
+ BadCond (Serious, "");
+ printf ("overflow past %s\n\tshrinks to %s .\n", Y.str(), Z.str());
+ }
+ if (I)
+ {
+ Y = V * (HInvrse * U2 - HInvrse);
+ Z = Y + ((One - HInvrse) * U2) * V;
+ if (Z < V0)
+ Y = Z;
+ if (Y < V0)
+ V = Y;
+ if (V0 - V < V0)
+ V = V0;
+ }
+ else
+ {
+ V = Y * (HInvrse * U2 - HInvrse);
+ V = V + ((One - HInvrse) * U2) * Y;
+ }
+ printf ("Overflow threshold is V = %s .\n", V.str());
+ if (I)
+ printf ("Overflow saturates at V0 = %s .\n", V0.str());
+ else
+ printf ("There is no saturation value because "
+ "the system traps on overflow.\n");
+ V9 = V * One;
+ printf ("No Overflow should be signaled for V * 1 = %s\n", V9.str());
+ V9 = V / One;
+ printf (" nor for V / 1 = %s.\n", V9.str());
+ printf ("Any overflow signal separating this * from the one\n");
+ printf ("above is a DEFECT.\n");
+ /*=============================================*/
+ Milestone = 170;
+ /*=============================================*/
+ if (!(-V < V && -V0 < V0 && -UfThold < V && UfThold < V))
+ {
+ BadCond (Failure, "Comparisons involving ");
+ printf ("+-%s, +-%s\nand +-%s are confused by Overflow.",
+ V.str(), V0.str(), UfThold.str());
+ }
+ /*=============================================*/
+ Milestone = 175;
+ /*=============================================*/
+ printf ("\n");
+ for (Indx = 1; Indx <= 3; ++Indx)
+ {
+ switch (Indx)
+ {
+ case 1:
+ Z = UfThold;
+ break;
+ case 2:
+ Z = E0;
+ break;
+ case 3:
+ Z = PseudoZero;
+ break;
+ }
+ if (Z != Zero)
+ {
+ V9 = SQRT (Z);
+ Y = V9 * V9;
+ if (Y / (One - Radix * E9) < Z || Y > (One + Radix * E9) * Z)
+ { /* dgh: + E9 --> * E9 */
+ if (V9 > U1)
+ BadCond (Serious, "");
+ else
+ BadCond (Defect, "");
+ printf ("Comparison alleges that what prints as Z = %s\n",
+ Z.str());
+ printf (" is too far from sqrt(Z) ^ 2 = %s .\n", Y.str());
+ }
+ }
+ }
+ /*=============================================*/
+ Milestone = 180;
+ /*=============================================*/
+ for (Indx = 1; Indx <= 2; ++Indx)
+ {
+ if (Indx == 1)
+ Z = V;
+ else
+ Z = V0;
+ V9 = SQRT (Z);
+ X = (One - Radix * E9) * V9;
+ V9 = V9 * X;
+ if (((V9 < (One - Two * Radix * E9) * Z) || (V9 > Z)))
+ {
+ Y = V9;
+ if (X < W)
+ BadCond (Serious, "");
+ else
+ BadCond (Defect, "");
+ printf ("Comparison alleges that Z = %s\n", Z.str());
+ printf (" is too far from sqrt(Z) ^ 2 (%s) .\n", Y.str());
+ }
+ }
+ /*=============================================*/
+ Milestone = 190;
+ /*=============================================*/
+ Pause ();
+ X = UfThold * V;
+ Y = Radix * Radix;
+ if (X * Y < One || X > Y)
+ {
+ if (X * Y < U1 || X > Y / U1)
+ BadCond (Defect, "Badly");
+ else
+ BadCond (Flaw, "");
+
+ printf (" unbalanced range; UfThold * V = %s\n\t%s\n",
+ X.str(), "is too far from 1.\n");
+ }
+ /*=============================================*/
+ Milestone = 200;
+ /*=============================================*/
+ for (Indx = 1; Indx <= 5; ++Indx)
+ {
+ X = F9;
+ switch (Indx)
+ {
+ case 2:
+ X = One + U2;
+ break;
+ case 3:
+ X = V;
+ break;
+ case 4:
+ X = UfThold;
+ break;
+ case 5:
+ X = Radix;
+ }
+ Y = X;
+ if (setjmp (ovfl_buf))
+ printf (" X / X traps when X = %s\n", X.str());
+ else
+ {
+ V9 = (Y / X - Half) - Half;
+ if (V9 == Zero)
+ continue;
+ if (V9 == -U1 && Indx < 5)
+ BadCond (Flaw, "");
+ else
+ BadCond (Serious, "");
+ printf (" X / X differs from 1 when X = %s\n", X.str());
+ printf (" instead, X / X - 1/2 - 1/2 = %s .\n", V9.str());
+ }
+ }
+ /*=============================================*/
+ Milestone = 210;
+ /*=============================================*/
+ MyZero = Zero;
+ printf ("\n");
+ printf ("What message and/or values does Division by Zero produce?\n");
+ printf (" Trying to compute 1 / 0 produces ...");
+ if (!setjmp (ovfl_buf))
+ printf (" %s .\n", (One / MyZero).str());
+ printf ("\n Trying to compute 0 / 0 produces ...");
+ if (!setjmp (ovfl_buf))
+ printf (" %s .\n", (Zero / MyZero).str());
+ /*=============================================*/
+ Milestone = 220;
+ /*=============================================*/
+ Pause ();
+ printf ("\n");
+ {
+ static const char *msg[] = {
+ "FAILUREs encountered =",
+ "SERIOUS DEFECTs discovered =",
+ "DEFECTs discovered =",
+ "FLAWs discovered ="
+ };
+ int i;
+ for (i = 0; i < 4; i++)
+ if (ErrCnt[i])
+ printf ("The number of %-29s %d.\n", msg[i], ErrCnt[i]);
+ }
+ printf ("\n");
+ if ((ErrCnt[Failure] + ErrCnt[Serious] + ErrCnt[Defect] + ErrCnt[Flaw]) > 0)
+ {
+ if ((ErrCnt[Failure] + ErrCnt[Serious] + ErrCnt[Defect] == 0)
+ && (ErrCnt[Flaw] > 0))
+ {
+ printf ("The arithmetic diagnosed seems ");
+ printf ("Satisfactory though flawed.\n");
+ }
+ if ((ErrCnt[Failure] + ErrCnt[Serious] == 0) && (ErrCnt[Defect] > 0))
+ {
+ printf ("The arithmetic diagnosed may be Acceptable\n");
+ printf ("despite inconvenient Defects.\n");
+ }
+ if ((ErrCnt[Failure] + ErrCnt[Serious]) > 0)
+ {
+ printf ("The arithmetic diagnosed has ");
+ printf ("unacceptable Serious Defects.\n");
+ }
+ if (ErrCnt[Failure] > 0)
+ {
+ printf ("Potentially fatal FAILURE may have spoiled this");
+ printf (" program's subsequent diagnoses.\n");
+ }
+ }
+ else
+ {
+ printf ("No failures, defects nor flaws have been discovered.\n");
+ if (!((RMult == Rounded) && (RDiv == Rounded)
+ && (RAddSub == Rounded) && (RSqrt == Rounded)))
+ printf ("The arithmetic diagnosed seems Satisfactory.\n");
+ else
+ {
+ if (StickyBit >= One &&
+ (Radix - Two) * (Radix - Nine - One) == Zero)
+ {
+ printf ("Rounding appears to conform to ");
+ printf ("the proposed IEEE standard P");
+ if ((Radix == Two) &&
+ ((Precision - Four * Three * Two) *
+ (Precision - TwentySeven - TwentySeven + One) == Zero))
+ printf ("754");
+ else
+ printf ("854");
+ if (IEEE)
+ printf (".\n");
+ else
+ {
+ printf (",\nexcept for possibly Double Rounding");
+ printf (" during Gradual Underflow.\n");
+ }
+ }
+ printf ("The arithmetic diagnosed appears to be Excellent!\n");
+ }
+ }
+ printf ("END OF TEST.\n");
+ return 0;
+}
+
+template<typename FLOAT>
+FLOAT
+Paranoia<FLOAT>::Sign (FLOAT X)
+{
+ return X >= FLOAT (long (0)) ? 1 : -1;
+}
+
+template<typename FLOAT>
+void
+Paranoia<FLOAT>::Pause ()
+{
+ if (do_pause)
+ {
+ fputs ("Press return...", stdout);
+ fflush (stdout);
+ getchar();
+ }
+ printf ("\nDiagnosis resumes after milestone Number %d", Milestone);
+ printf (" Page: %d\n\n", PageNo);
+ ++Milestone;
+ ++PageNo;
+}
+
+template<typename FLOAT>
+void
+Paranoia<FLOAT>::TstCond (int K, int Valid, const char *T)
+{
+ if (!Valid)
+ {
+ BadCond (K, T);
+ printf (".\n");
+ }
+}
+
+template<typename FLOAT>
+void
+Paranoia<FLOAT>::BadCond (int K, const char *T)
+{
+ static const char *msg[] = { "FAILURE", "SERIOUS DEFECT", "DEFECT", "FLAW" };
+
+ ErrCnt[K] = ErrCnt[K] + 1;
+ printf ("%s: %s", msg[K], T);
+}
+
+/* Random computes
+ X = (Random1 + Random9)^5
+ Random1 = X - FLOOR(X) + 0.000005 * X;
+ and returns the new value of Random1. */
+
+template<typename FLOAT>
+FLOAT
+Paranoia<FLOAT>::Random ()
+{
+ FLOAT X, Y;
+
+ X = Random1 + Random9;
+ Y = X * X;
+ Y = Y * Y;
+ X = X * Y;
+ Y = X - FLOOR (X);
+ Random1 = Y + X * FLOAT ("0.000005");
+ return (Random1);
+}
+
+template<typename FLOAT>
+void
+Paranoia<FLOAT>::SqXMinX (int ErrKind)
+{
+ FLOAT XA, XB;
+
+ XB = X * BInvrse;
+ XA = X - XB;
+ SqEr = ((SQRT (X * X) - XB) - XA) / OneUlp;
+ if (SqEr != Zero)
+ {
+ if (SqEr < MinSqEr)
+ MinSqEr = SqEr;
+ if (SqEr > MaxSqEr)
+ MaxSqEr = SqEr;
+ J = J + 1;
+ BadCond (ErrKind, "\n");
+ printf ("sqrt(%s) - %s = %s\n", (X * X).str(), X.str(),
+ (OneUlp * SqEr).str());
+ printf ("\tinstead of correct value 0 .\n");
+ }
+}
+
+template<typename FLOAT>
+void
+Paranoia<FLOAT>::NewD ()
+{
+ X = Z1 * Q;
+ X = FLOOR (Half - X / Radix) * Radix + X;
+ Q = (Q - X * Z) / Radix + X * X * (D / Radix);
+ Z = Z - Two * X * D;
+ if (Z <= Zero)
+ {
+ Z = -Z;
+ Z1 = -Z1;
+ }
+ D = Radix * D;
+}
+
+template<typename FLOAT>
+void
+Paranoia<FLOAT>::SR3750 ()
+{
+ if (!((X - Radix < Z2 - Radix) || (X - Z2 > W - Z2)))
+ {
+ I = I + 1;
+ X2 = SQRT (X * D);
+ Y2 = (X2 - Z2) - (Y - Z2);
+ X2 = X8 / (Y - Half);
+ X2 = X2 - Half * X2 * X2;
+ SqEr = (Y2 + Half) + (Half - X2);
+ if (SqEr < MinSqEr)
+ MinSqEr = SqEr;
+ SqEr = Y2 - X2;
+ if (SqEr > MaxSqEr)
+ MaxSqEr = SqEr;
+ }
+}
+
+template<typename FLOAT>
+void
+Paranoia<FLOAT>::IsYeqX ()
+{
+ if (Y != X)
+ {
+ if (N <= 0)
+ {
+ if (Z == Zero && Q <= Zero)
+ printf ("WARNING: computing\n");
+ else
+ BadCond (Defect, "computing\n");
+ printf ("\t(%s) ^ (%s)\n", Z.str(), Q.str());
+ printf ("\tyielded %s;\n", Y.str());
+ printf ("\twhich compared unequal to correct %s ;\n", X.str());
+ printf ("\t\tthey differ by %s .\n", (Y - X).str());
+ }
+ N = N + 1; /* ... count discrepancies. */
+ }
+}
+
+template<typename FLOAT>
+void
+Paranoia<FLOAT>::PrintIfNPositive ()
+{
+ if (N > 0)
+ printf ("Similar discrepancies have occurred %d times.\n", N);
+}
+
+template<typename FLOAT>
+void
+Paranoia<FLOAT>::TstPtUf ()
+{
+ N = 0;
+ if (Z != Zero)
+ {
+ printf ("Since comparison denies Z = 0, evaluating ");
+ printf ("(Z + Z) / Z should be safe.\n");
+ if (setjmp (ovfl_buf))
+ goto very_serious;
+ Q9 = (Z + Z) / Z;
+ printf ("What the machine gets for (Z + Z) / Z is %s .\n", Q9.str());
+ if (FABS (Q9 - Two) < Radix * U2)
+ {
+ printf ("This is O.K., provided Over/Underflow");
+ printf (" has NOT just been signaled.\n");
+ }
+ else
+ {
+ if ((Q9 < One) || (Q9 > Two))
+ {
+ very_serious:
+ N = 1;
+ ErrCnt[Serious] = ErrCnt[Serious] + 1;
+ printf ("This is a VERY SERIOUS DEFECT!\n");
+ }
+ else
+ {
+ N = 1;
+ ErrCnt[Defect] = ErrCnt[Defect] + 1;
+ printf ("This is a DEFECT!\n");
+ }
+ }
+ V9 = Z * One;
+ Random1 = V9;
+ V9 = One * Z;
+ Random2 = V9;
+ V9 = Z / One;
+ if ((Z == Random1) && (Z == Random2) && (Z == V9))
+ {
+ if (N > 0)
+ Pause ();
+ }
+ else
+ {
+ N = 1;
+ BadCond (Defect, "What prints as Z = ");
+ printf ("%s\n\tcompares different from ", Z.str());
+ if (Z != Random1)
+ printf ("Z * 1 = %s ", Random1.str());
+ if (!((Z == Random2) || (Random2 == Random1)))
+ printf ("1 * Z == %s\n", Random2.str());
+ if (!(Z == V9))
+ printf ("Z / 1 = %s\n", V9.str());
+ if (Random2 != Random1)
+ {
+ ErrCnt[Defect] = ErrCnt[Defect] + 1;
+ BadCond (Defect, "Multiplication does not commute!\n");
+ printf ("\tComparison alleges that 1 * Z = %s\n", Random2.str());
+ printf ("\tdiffers from Z * 1 = %s\n", Random1.str());
+ }
+ Pause ();
+ }
+ }
+}
+
+template<typename FLOAT>
+void
+Paranoia<FLOAT>::notify (const char *s)
+{
+ printf ("%s test appears to be inconsistent...\n", s);
+ printf (" PLEASE NOTIFY KARPINKSI!\n");
+}
+
+/* ====================================================================== */
+
+int main(int ac, char **av)
+{
+ setbuf(stdout, NULL);
+ setbuf(stderr, NULL);
+
+ while (1)
+ switch (getopt (ac, av, "pvg:fdl"))
+ {
+ case -1:
+ return 0;
+ case 'p':
+ do_pause = true;
+ break;
+ case 'v':
+ verbose = true;
+ break;
+ case 'g':
+ {
+ static const struct {
+ const char *name;
+ const struct real_format *fmt;
+ } fmts[] = {
+#define F(x) { #x, &x##_format }
+ F(ieee_single),
+ F(ieee_double),
+ F(ieee_extended_motorola),
+ F(ieee_extended_intel_96),
+ F(ieee_extended_intel_128),
+ F(ibm_extended),
+ F(ieee_quad),
+ F(vax_f),
+ F(vax_d),
+ F(vax_g),
+ F(i370_single),
+ F(i370_double),
+ F(real_internal),
+#undef F
+ };
+
+ int i, n = sizeof (fmts)/sizeof(*fmts);
+
+ for (i = 0; i < n; ++i)
+ if (strcmp (fmts[i].name, optarg) == 0)
+ break;
+
+ if (i == n)
+ {
+ printf ("Unknown implementation \"%s\"; "
+ "available implementations:\n", optarg);
+ for (i = 0; i < n; ++i)
+ printf ("\t%s\n", fmts[i].name);
+ return 1;
+ }
+
+ // We cheat and use the same mode all the time, but vary
+ // the format used for that mode.
+ real_format_for_mode[int(real_c_float::MODE) - int(QFmode)]
+ = fmts[i].fmt;
+
+ Paranoia<real_c_float>().main();
+ break;
+ }
+
+ case 'f':
+ Paranoia < native_float<float> >().main();
+ break;
+ case 'd':
+ Paranoia < native_float<double> >().main();
+ break;
+ case 'l':
+#ifndef NO_LONG_DOUBLE
+ Paranoia < native_float<long double> >().main();
+#endif
+ break;
+
+ case '?':
+ puts ("-p\tpause between pages");
+ puts ("-g<FMT>\treal.c implementation FMT");
+ puts ("-f\tnative float");
+ puts ("-d\tnative double");
+ puts ("-l\tnative long double");
+ return 0;
+ }
+}
+
+/* GCC stuff referenced by real.o. */
+
+extern "C" void
+fancy_abort ()
+{
+ abort ();
+}
+
+int target_flags = 0;
+
+extern "C" int
+floor_log2_wide (unsigned HOST_WIDE_INT x)
+{
+ int log = -1;
+ while (x != 0)
+ log++,
+ x >>= 1;
+ return log;
+}
diff --git a/gcc-4.3.1/contrib/patch_tester.sh b/gcc-4.3.1/contrib/patch_tester.sh
new file mode 100755
index 000000000..ab6847653
--- /dev/null
+++ b/gcc-4.3.1/contrib/patch_tester.sh
@@ -0,0 +1,450 @@
+#!/bin/sh
+
+# Tests a set of patches from a directory.
+# Copyright (C) 2007 Free Software Foundation, Inc.
+# Contributed by Sebastian Pop <sebastian.pop@amd.com>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+cat <<EOF
+
+WARNING: This script should only be fed with patches from known
+ authorized and trusted sources. Don't even think about
+ hooking it up to a raw feed from the gcc-patches list or
+ you'll regret it.
+
+EOF
+
+args=$@
+
+dashj=
+default_standby=1
+standby=$default_standby
+default_watermark=0.60
+watermark=$default_watermark
+savecompilers=false
+nogpg=false
+
+usage() {
+ cat <<EOF
+patch_tester.sh [-j<N>] [-standby N] [-watermark N] [-savecompilers] [-nogpg]
+ <source_dir> [patches_dir [state_dir [build_dir]]]
+
+ J is the flag passed to make. Default is empty string.
+
+ STANDBY is the number of minutes between checks for new patches in
+ PATCHES_DIR. Default is ${default_standby} minutes.
+
+ WATERMARK is the 5 minute average system charge under which a new
+ compile can start. Default is ${default_watermark}. Note that the comparison
+ is done in lexicographical order, so don't forget the leading 0.
+
+ SAVECOMPILERS copies the compilers in the same directory as the
+ test results for the non patched version. Default is not copy.
+
+ NOGPG can be used to avoid checking the GPG signature of patches.
+
+ SOURCE_DIR is the directory containing GCC's toplevel configure.
+
+ PATCHES_DIR is the directory containing the patches to be tested.
+ Default is SOURCE_DIR/patches.
+
+ STATE_DIR is where the tester maintains its internal state.
+ Default is SOURCE_DIR/state.
+
+ BUILD_DIR is the build tree, a temporary directory that this
+ script will delete and recreate. Default is SOURCE_DIR/obj.
+
+EOF
+ exit 1
+}
+
+while [ $# -ne 0 ]; do
+ case $1 in
+ -j*)
+ dashj=$1; shift
+ ;;
+ -standby)
+ [[ $# > 2 ]] || usage
+ standby=$2; shift; shift
+ ;;
+ -watermark)
+ [[ $# > 2 ]] || usage
+ watermark=$2; shift; shift
+ ;;
+ -savecompilers)
+ savecompilers=true; shift
+ ;;
+ -nogpg)
+ nogpg=true; shift
+ ;;
+ -*)
+ echo "Invalid option: $1"
+ usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+test $# -eq 0 && usage
+
+SOURCE=$1
+PATCHES=
+STATE=
+BUILD=
+
+if [[ $# < 2 ]]; then
+ PATCHES=$SOURCE/patches
+else
+ PATCHES=$2
+fi
+if [[ $# < 3 ]]; then
+ STATE=$SOURCE/state
+else
+ STATE=$3
+fi
+if [[ $# < 4 ]]; then
+ BUILD=$SOURCE/obj
+else
+ BUILD=$4
+fi
+
+[ -d $PATCHES ] || mkdir -p $PATCHES
+[ -d $STATE ] || mkdir -p $STATE
+[ -d $STATE/patched ] || mkdir -p $STATE/patched
+[ -d $SOURCE ] || mkdir -p $SOURCE
+[ -f $SOURCE/config.guess ] || {
+ cd $SOURCE
+ svn -q co svn://gcc.gnu.org/svn/gcc/trunk .
+}
+
+VERSION=`svn info $SOURCE | grep "^Revision:" | sed -e "s/^Revision://g" -e "s/ //g"`
+
+exec >> $STATE/tester.log 2>&1 || exit 1
+set -x
+
+TESTING=$STATE/testing
+REPORT=$TESTING/report
+PRISTINE=$TESTING/pristine
+PATCHED=$TESTING/patched
+PATCH=
+TARGET=`$SOURCE/config.guess || exit 1`
+TESTLOGS="gcc/testsuite/gcc/gcc.sum
+gcc/testsuite/gfortran/gfortran.sum
+gcc/testsuite/g++/g++.sum
+gcc/testsuite/objc/objc.sum
+$TARGET/libstdc++-v3/testsuite/libstdc++.sum
+$TARGET/libffi/testsuite/libffi.sum
+$TARGET/libjava/testsuite/libjava.sum
+$TARGET/libgomp/testsuite/libgomp.sum
+$TARGET/libmudflap/testsuite/libmudflap.sum"
+COMPILERS="gcc/cc1
+gcc/cc1obj
+gcc/cc1plus
+gcc/f951
+gcc/jc1
+gcc/gnat1
+gcc/tree1"
+
+now () {
+ echo `TZ=UTC date +"%Y_%m_%d_%H_%M_%S"`
+}
+
+report () {
+ echo "Checker: (`now`): $@" >> $REPORT
+}
+
+freport () {
+ if [ -s $1 ]; then
+ report "(cat $1"
+ cat $1 >> $REPORT
+ report "tac)"
+ fi
+}
+
+cleanup () {
+ cd $SOURCE
+ svn cleanup && svn revert -R . && svn st | cut -d' ' -f5- | xargs rm -v
+}
+
+selfexec () {
+ exec ${CONFIG_SHELL-/bin/sh} $SOURCE/contrib/patch_tester.sh $args
+}
+
+update () {
+ svn_branch=`grep "^branch:" $PATCH | sed -e "s/^branch://g" -e "s/ //g"`
+ if [ x$svn_branch = x ]; then
+ svn_branch=trunk
+ fi
+
+ svn_revision=`grep "^revision:" $PATCH | sed -e "s/^revision://g" -e "s/ //g"`
+ if [ x$svn_revision = x ]; then
+ svn_revision=HEAD
+ fi
+
+ cleanup
+ cd $SOURCE
+ case $svn_branch in
+ trunk)
+ if ! svn switch -r $svn_revision svn://gcc.gnu.org/svn/gcc/trunk &> $TESTING/svn ; then
+ report "failed to update svn sources with"
+ report "svn switch -r $svn_revision svn://gcc.gnu.org/svn/gcc/trunk"
+ freport $TESTING/svn
+ return 1
+ fi
+ ;;
+
+ svn://gcc.gnu.org/svn/gcc/*)
+ if ! svn switch -r $svn_revision $svn_branch &> $TESTING/svn ; then
+ report "failed to update svn sources with"
+ report "svn switch -r $svn_revision $svn_branch"
+ freport $TESTING/svn
+ return 1
+ fi
+ ;;
+
+ *)
+ if ! svn switch -r $svn_revision svn://gcc.gnu.org/svn/gcc/branches/$svn_branch &> $TESTING/svn ; then
+ report "failed to update svn sources with"
+ report "svn switch -r $svn_revision svn://gcc.gnu.org/svn/gcc/branches/$svn_branch"
+ freport $TESTING/svn
+ return 1
+ fi
+ ;;
+ esac
+
+ current_version=`svn info $SOURCE | grep "^Revision:" | sed -e "s/^Revision://g" -e "s/ //g"`
+ if [[ $VERSION < $current_version ]]; then
+ if [ -f $SOURCE/contrib/patch_tester.sh ]; then
+ selfexec
+ fi
+ fi
+
+ return 0
+}
+
+apply_patch () {
+ if [ $nogpg = false ]; then
+ if ! gpg --batch --verify $PATCH &> $TESTING/gpgverify ; then
+ report "your patch failed to verify:"
+ freport $TESTING/gpgverify
+ return 1
+ fi
+ fi
+
+ # Detect if the patch was created in toplev GCC.
+ grep "^Index: " $PATCH | grep "gcc/"
+ if [ $? = 0 ]; then
+ cd $SOURCE
+ if ! patch -p0 < $PATCH &> $TESTING/patching ; then
+ report "your patch failed to apply:"
+ freport $TESTING/patching
+ return 1
+ fi
+ else
+ cd $SOURCE/gcc
+ if ! patch -p0 < $PATCH &> $TESTING/patching ; then
+ report "your patch failed to apply:"
+ freport $TESTING/patching
+ return 1
+ fi
+ fi
+}
+
+save_compilers () {
+ for COMPILER in $COMPILERS ; do
+ if [ -f $BUILD/$COMPILER ]; then
+ cp $BUILD/$COMPILER $PRISTINE
+ fi
+ done
+}
+
+bootntest () {
+ rm -rf $BUILD
+ mkdir $BUILD
+ cd $BUILD
+
+ CONFIG_OPTIONS=`grep "^configure:" $PATCH | sed -e "s/^configure://g"`
+ if ! $SOURCE/configure $CONFIG_OPTIONS &> $1/configure ; then
+ report "configure failed with:"
+ freport $1/configure
+ return 1
+ fi
+
+ if ! make $dashj `grep "^make:" $PATCH | sed -e "s/^make://g"` bootstrap &> $1/bootstrap ; then
+ report "bootstrap failed with last lines:"
+ tail -30 $1/bootstrap > $1/last_bootstrap
+ freport $1/last_bootstrap
+ report "grep --context=20 Error bootstrap:"
+ grep --context=20 Error $1/bootstrap > $1/bootstrap_error
+ freport $1/bootstrap_error
+ return 1
+ fi
+
+ CHECK_OPTIONS=`grep "^check:" $PATCH | sed -e "s/^check://g"`
+ make $dashj $CHECK_OPTIONS -k check &> $1/check
+
+ for LOG in $TESTLOGS ; do
+ if [ -f $BUILD/$LOG ]; then
+ mv $BUILD/$LOG $1
+ mv `echo "$BUILD/$LOG" | sed -e "s/\.sum/\.log/g"` $1
+ fi
+ done
+
+ return 0
+}
+
+bootntest_patched () {
+ cleanup
+ mkdir -p $PATCHED
+ apply_patch && bootntest $PATCHED
+ return $?
+}
+
+# Build the pristine tree with exactly the same options as the patch under test.
+bootntest_pristine () {
+ cleanup
+ current_branch=`svn info $SOURCE | grep "^URL:" | sed -e "s/URL: //g" -e "s/svn:\/\/gcc.gnu.org\/svn\/gcc\///g"`
+ current_version=`svn info $SOURCE | grep "^Revision:" | sed -e "s/^Revision://g" -e "s/ //g"`
+ PRISTINE=$STATE/$current_branch/$current_version
+
+ if [ -d $PRISTINE ]; then
+ ln -s $PRISTINE $TESTING/pristine
+ return 0
+ else
+ mkdir -p $PRISTINE
+ ln -s $PRISTINE $TESTING/pristine
+ bootntest $PRISTINE
+ RETVAL=$?
+ if [ $RETVAL = 0 -a $savecompilers = true ]; then
+ save_compilers
+ fi
+ return $RETVAL
+ fi
+}
+
+regtest () {
+ touch $1/report
+ touch $1/passes
+ touch $1/failed
+ touch $1/regress
+
+ for LOG in $TESTLOGS ; do
+ NLOG=`basename $LOG`
+ if [ -f $1/$NLOG ]; then
+ awk '/^FAIL: / { print "'$NLOG'",$2; }' $1/$NLOG
+ fi
+ done | sort | uniq > $1/failed
+
+ comm -12 $1/failed $1/passes >> $1/regress
+ NUMREGRESS=`wc -l < $1/regress | tr -d ' '`
+
+ if [ $NUMREGRESS -eq 0 ] ; then
+ for LOG in $TESTLOGS ; do
+ NLOG=`basename $LOG`
+ if [ -f $1/$NLOG ] ; then
+ awk '/^PASS: / { print "'$NLOG'",$2; }' $1/$NLOG
+ fi
+ done | sort | uniq | comm -23 - $1/failed > $1/passes
+ echo "there are no regressions with your patch." >> $1/report
+ else
+ echo "with your patch there are $NUMREGRESS regressions." >> $1/report
+ echo "list of regressions with your patch:" >> $1/report
+ cat $1/regress >> $1/report
+ fi
+}
+
+contrib_compare_tests () {
+ report "comparing logs with contrib/compare_tests:"
+ for LOG in $TESTLOGS ; do
+ NLOG=`basename $LOG`
+ if [ -f $PRISTINE/$NLOG -a -f $PATCHED/$NLOG ]; then
+ $SOURCE/contrib/compare_tests $PRISTINE/$NLOG $PATCHED/$NLOG > $TESTING/compare_$NLOG
+ freport $TESTING/compare_$NLOG
+ fi
+ done
+}
+
+compare_passes () {
+ regtest $PRISTINE
+ cp $PRISTINE/passes $PATCHED
+ regtest $PATCHED
+ freport $PATCHED/report
+ report "FAILs with patched version:"
+ freport $PATCHED/failed
+ report "FAILs with pristine version:"
+ freport $PRISTINE/failed
+
+ # contrib_compare_tests
+}
+
+write_report () {
+ backup_patched=$STATE/patched/`now`
+ report "The files used for the validation of your patch are stored in $backup_patched on the tester machine."
+
+ EMAIL=`grep "^email:" $PATCH | sed -e "s/^email://g" -e "s/ //g"`
+ if [ x$EMAIL != x ]; then
+ mutt -s "[regtest] Results for `basename $PATCH` on $TARGET" -i $REPORT -a $PATCH $EMAIL
+ fi
+
+ mv $TESTING $backup_patched
+}
+
+announce () {
+ EMAIL=`grep "^email:" $PATCH | sed -e "s/^email://g" -e "s/ //g"`
+ if [ x$EMAIL != x ]; then
+
+ START_REPORT=$TESTING/start_report
+ echo "Hi, " >> $START_REPORT
+ echo "I'm the automatic tester running on $TARGET." >> $START_REPORT
+ echo "I just started to look at your patch `basename $PATCH`." >> $START_REPORT
+ echo "Bye, your automatic tester." >> $START_REPORT
+ mutt -s "[regtest] Starting bootstrap for `basename $PATCH` on $TARGET" -i $START_REPORT $EMAIL
+ fi
+}
+
+# After selfexec, $TESTING is already set up.
+if [ -d $TESTING ]; then
+ # The only file in $TESTING is the patch.
+ PATCH=`ls -rt -1 $TESTING | head -1`
+ PATCH=$TESTING/$PATCH
+ if [ -f $PATCH ]; then
+ bootntest_patched && bootntest_pristine && compare_passes
+ write_report
+ fi
+fi
+
+while true; do
+ PATCH=`ls -rt -1 $PATCHES | head -1`
+ if [ x$PATCH = x ]; then
+ sleep ${standby}m
+ else
+ sysload=`uptime | cut -d, -f 5`
+ if [[ $sysload > $watermark ]]; then
+ # Wait a bit when system load is too high.
+ sleep ${standby}m
+ else
+ mkdir -p $TESTING
+ mv $PATCHES/$PATCH $TESTING/
+ PATCH=$TESTING/$PATCH
+
+ announce
+ update && bootntest_patched && bootntest_pristine && compare_passes
+ write_report
+ fi
+ fi
+done
diff --git a/gcc-4.3.1/contrib/prepare_patch.sh b/gcc-4.3.1/contrib/prepare_patch.sh
new file mode 100755
index 000000000..8385f315c
--- /dev/null
+++ b/gcc-4.3.1/contrib/prepare_patch.sh
@@ -0,0 +1,96 @@
+#!/bin/sh
+#set -x
+
+# Prepares a patch for the patch tester.
+# Copyright (C) 2007 Free Software Foundation, Inc.
+# Contributed by Sebastian Pop <sebastian.pop@amd.com>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+usage() {
+ cat <<EOF
+prepare_patch.sh <source_dir> [patches_dir]
+
+ SOURCE_DIR is the directory containing GCC's toplevel configure.
+
+ PATCHES_DIR is the directory where the patch will be copied to.
+ Default is SOURCE_DIR/patches.
+
+EOF
+ exit 1
+}
+
+test $# -eq 0 && usage
+
+SOURCE=$1
+PATCHES=
+
+if [[ "$#" < 2 ]]; then
+ PATCHES=$SOURCE/patches
+else
+ PATCHES=$2
+fi
+
+[ -f $SOURCE/config.guess ] || usage
+[ -d $PATCHES ] || mkdir -p $PATCHES
+
+echo "Enter a name for this patch: "
+read name
+PATCH=$PATCHES/`TZ=UTC date +"%Y_%m_%d_%H_%M_%S"`_$name.diff
+
+echo "Enter the email where the report should be sent: "
+read email
+echo "email:$email" >> $PATCH
+
+branch=`svn info $SOURCE | grep URL: | sed -e "s/^URL: //g"`
+echo "Enter svn branch (svn info in $SOURCE reports $branch, default is trunk): "
+read svn_branch
+if [ x$svn_branch = x ]; then
+ svn_branch=trunk
+fi
+echo "branch:$svn_branch" >> $PATCH
+
+revision=`svn info $SOURCE | grep Revision: | sed -e "s/^Revision: //g"`
+echo "Enter svn revision (svn info in $SOURCE reports $revision, default is HEAD): "
+read svn_revision
+if [ x$svn_revision = x ]; then
+ svn_revision=HEAD
+fi
+echo "revision:$svn_revision" >> $PATCH
+
+echo "Enter configure options: "
+read configure_options
+echo "configure:$configure_options" >> $PATCH
+
+echo "Enter make options: "
+read make_options
+echo "make:$make_options" >> $PATCH
+
+echo "Enter make check options: "
+read check_options
+echo "check:$check_options" >> $PATCH
+
+echo "" >> $PATCH
+
+svn diff $SOURCE | tee -a $PATCH
+
+cat <<EOF
+
+You can now edit your patch, include a ChangeLog, and before
+submitting to the patch tester, don't forget to sign it with:
+
+ gpg --clearsign $PATCH
+
+EOF
diff --git a/gcc-4.3.1/contrib/reghunt/ChangeLog b/gcc-4.3.1/contrib/reghunt/ChangeLog
new file mode 100644
index 000000000..253040fe4
--- /dev/null
+++ b/gcc-4.3.1/contrib/reghunt/ChangeLog
@@ -0,0 +1,28 @@
+2008-06-06 Release Manager
+
+ * GCC 4.3.1 released.
+
+2008-03-05 Release Manager
+
+ * GCC 4.3.0 released.
+
+2005-07-14 Ben Elliston <bje@au.ibm.com>
+
+ * reg_test_template: Typo fix.
+
+2005-01-17 Kazu Hirata <kazu@cs.umass.edu>
+
+ * reg_periodic, reg_search: Fix the uses of date command.
+
+2003-06-24 Janis Johnson <janis187@us.ibm.com>
+
+ * reg_search: Replace existing uses of DATE with MADE_DATE and
+ use DATE for the date command.
+ * reg_periodic: Ditto.
+
+2003-03-27 Janis Johnson <janis187@us.ibm.com>
+ * README: New file.
+ * reg_search: New file.
+ * reg_periodic: New file.
+ * reg_test_template: New file.
+
diff --git a/gcc-4.3.1/contrib/reghunt/README b/gcc-4.3.1/contrib/reghunt/README
new file mode 100644
index 000000000..c674ffc6c
--- /dev/null
+++ b/gcc-4.3.1/contrib/reghunt/README
@@ -0,0 +1,16 @@
+This directory contains scripts that are used for identifying the
+patch that introduced a regression. General information about such
+searches is covered in http://gcc.gnu.org/bugs/reghunt.html.
+
+ reg_search searches for a small time interval within a range of
+ dates in which results for a test changed, using a binary search.
+ The functionality for getting sources, building the component to
+ test, and running the test are in other scripts that are run from
+ here.
+
+ reg_periodic invokes separate tools (the same scripts invoked by
+ reg_search) over a range of dates at specified intervals.
+
+ reg_test_template shows the format for the script that runs a test
+ and determines whether to continue the search with a later or
+ earlier date.
diff --git a/gcc-4.3.1/contrib/reghunt/reg_periodic b/gcc-4.3.1/contrib/reghunt/reg_periodic
new file mode 100755
index 000000000..e3b6b1119
--- /dev/null
+++ b/gcc-4.3.1/contrib/reghunt/reg_periodic
@@ -0,0 +1,171 @@
+#! /bin/bash
+
+########################################################################
+#
+# File: reg_periodic
+# Author: Janis Johnson
+# Date: 2002/12/28
+#
+# Over a range of dates at specified intervals, invoke separate tools to
+# update sources, do a build, and run one or more tests.
+#
+# Define these in a file whose name is the argument to this script:
+# LOW_DATE: Date string recognized by the date command.
+# HIGH_DATE: Date string recognized by the date command.
+# INTERVAL: Time (in seconds) between dates for which to build.
+# REG_UPDATE: Pathname of script to update your source tree.
+# REG_BUILD: Pathname of script to build enough of the product to run
+# the test.
+# REG_TEST: Pathname of script to run one or more tests.
+# Optional:
+# VERBOSITY: Default is 0, to print only errors and final message.
+# DATE_IN_MSG If set to anything but 0, include the time and date in
+# messages
+# REG_STOP Pathname of a file whose existence says to quit; default
+# is STOP in the current directory.
+#
+#
+# Copyright (c) 2002, 2003, 2005 Free Software Foundation, Inc.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# For a copy of the GNU General Public License, write the the
+# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+########################################################################
+
+########################################################################
+# Functions
+########################################################################
+
+# Issue a message if its verbosity level is high enough.
+
+msg() {
+ test ${1} -gt ${VERBOSITY} && return
+
+ if [ "x${DATE_IN_MSG}" = "x" ]; then
+ echo "${2}"
+ else
+ echo "`${DATE}` ${2}"
+ fi
+}
+
+# Issue an error message and exit with a nonzero status.
+
+error() {
+ msg 0 "error: ${1}"
+ exit 1
+}
+
+# Turn seconds since the epoch into a date we can use with source
+# control tools and report to the user.
+
+make_date() {
+ MADE_DATE="`${DATE} -u +\"%Y-%m-%d %H:%M %Z\" --date \"1970-01-01 ${1} seconds\"`" \
+ || error "make_date: date command failed"
+}
+
+# Build the components to test using sources as of a particular date and
+# run a test case. Pass each of the scripts the date that we're
+# testing; the first one needs it, the others can ignore it if they want.
+
+process_date() {
+ TEST_DATE="${1}"
+
+ ${REG_UPDATE} "${TEST_DATE}"
+ if [ $? -ne 0 ]; then
+ msg 0 "source update failed for ${TEST_DATE}"
+ return
+ fi
+ ${REG_BUILD} "${TEST_DATE}"
+ if [ $? -ne 0 ]; then
+ msg 0 "build failed for ${TEST_DATE}"
+ return
+ fi
+ ${REG_TEST} "${TEST_DATE}"
+}
+
+########################################################################
+# Main program (so to speak)
+########################################################################
+
+# If DATE isn't defined, use the default date command; the configuration
+# file can override this.
+
+if [ "x${DATE}" = "x" ]; then
+ DATE=date
+fi
+
+# Process the configuration file.
+
+if [ $# -ne 1 ]; then
+ echo Usage: $0 config_file
+ exit 1
+fi
+
+CONFIG=${1}
+if [ ! -f ${CONFIG} ]; then
+ error "configuration file ${CONFIG} does not exist"
+fi
+
+# OK, the config file exists. Source it, make sure required parameters
+# are defined and their files exist, and give default values to optional
+# parameters.
+
+. ${CONFIG}
+
+test "x${REG_UPDATE}" = "x" && error "REG_UPDATE is not defined"
+test "x${REG_BUILD}" = "x" && error "REG_BUILD is not defined"
+test "x${REG_TEST}" = "x" && error "REG_TEST is not defined"
+test "x${INTERVAL}" = "x" && error "INTERVAL is not defined"
+test -x ${REG_TEST} || error "REG_TEST is not an executable file"
+test "x${VERBOSITY}" = "x" && VERBOSITY=0
+test "x${REG_STOP}" = "x" && REG_STOP="STOP"
+
+msg 2 "LOW_DATE = ${LOW_DATE}"
+msg 2 "HIGH_DATE = ${HIGH_DATE}"
+msg 2 "INTERVAL = ${INTERVAL}"
+msg 2 "REG_UPDATE = ${REG_UPDATE}"
+msg 2 "REG_BUILD = ${REG_BUILD}"
+msg 2 "REG_TEST = ${REG_TEST}"
+msg 2 "VERBOSITY = ${VERBOSITY}"
+
+# Change the dates into seconds since the epoch. This uses an extension
+# in GNU date.
+
+LOW_DATE=`${DATE} +%s --date "${LOW_DATE}"` || \
+ error "date command failed for \"${LOW_DATE}\""
+HIGH_DATE=`${DATE} +%s --date "${HIGH_DATE}"` || \
+ error "date command failed for \"${LOW_DATE}\""
+
+# Process each date in the range.
+
+while [ ${LOW_DATE} -le ${HIGH_DATE} ]; do
+
+ # If a file called STOP appears, stop; this allows a clean way to
+ # interrupt a search.
+
+ if [ -f ${REG_STOP} ]; then
+ msg 0 "STOP file detected"
+ rm -f ${REG_STOP}
+ exit 1
+ fi
+
+ # Get a version of the date that is usable by tools and readable
+ # by people, then process it.
+
+ make_date ${LOW_DATE}
+ process_date "${MADE_DATE}"
+ let LOW_DATE=LOW_DATE+INTERVAL
+done
+
+msg 1 "done"
diff --git a/gcc-4.3.1/contrib/reghunt/reg_search b/gcc-4.3.1/contrib/reghunt/reg_search
new file mode 100755
index 000000000..fc9022d53
--- /dev/null
+++ b/gcc-4.3.1/contrib/reghunt/reg_search
@@ -0,0 +1,300 @@
+#! /bin/bash
+
+########################################################################
+#
+# File: reg_search
+# Author: Janis Johnson <janis187@us.ibm.com>
+# Date: 2002/12/15
+#
+# Search for a small time interval within a range of dates in which
+# results for a test changed, using a binary search. The functionality
+# for getting sources, building the component to test, and running the
+# test are in other scripts that are run from here. Before the search
+# begins, we verify that we get the expected behavior for the first and
+# last dates.
+#
+# Define these in a file whose name is the argument to this script:
+# LOW_DATE: Date string recognized by the date command (local time).
+# HIGH_DATE: Date string recognized by the date command (local time).
+# REG_UPDATE: Pathname of script to update your source tree; returns
+# zero for success, nonzero for failure.
+# REG_BUILD: Pathname of script to build enough of the product to run
+# the test; returns zero for success, nonzero for failure.
+# REG_TEST: Pathname of script to run the test; returns 1 if we
+# should search later dates, 0 if we should search earlier
+# dates.
+# Optional:
+# DELTA: Search to an interval within this many seconds; default
+# is one hour (although 300 works well).
+# REG_FINISH Pathname of script to call at the end with the two final
+# dates as arguments.
+# SKIP_LOW If 1, skip verifying the low date of the range;
+# define this only if you're restarting and have already
+# tested the low date.
+# SKIP_HIGH If 1, skip verifying the high date of the range;
+# define this only if you're restarting and have already
+# tested the high date.
+# FIRST_MID Use this as the first midpoint, to avoid a midpoint that
+# is known not to build.
+# HAS_CHANGES Pathname of script to report whether the current date has
+# no differences from one of the ends of the current range
+# to skip unnecessary build and testing; default is "true".
+# VERBOSITY Default is 0, to print only errors and final message.
+# DATE_IN_MSG If set to anything but 0, include the time and date in
+# messages.
+#
+#
+#
+# Copyright (c) 2002, 2003, 2005 Free Software Foundation, Inc.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# For a copy of the GNU General Public License, write the the
+# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+########################################################################
+
+########################################################################
+# Functions
+########################################################################
+
+# Issue a message if its verbosity level is high enough.
+
+msg() {
+ test ${1} -gt ${VERBOSITY} && return
+
+ if [ "x${DATE_IN_MSG}" = "x" ]; then
+ echo "${2}"
+ else
+ echo "`${DATE}` ${2}"
+ fi
+}
+
+# Issue an error message and exit with a non-zero status. If there
+# is a valid current range whose end points have been tested, report
+# it so the user can start again from there.
+
+error() {
+ msg 0 "error: ${1}"
+ test ${VALID_RANGE} -eq 1 && \
+ echo "current range:"
+ echo "LOW_DATE=\"${LATER_THAN}\""
+ echo "HIGH_DATE=\"${EARLIER_THAN}\""
+ exit 1
+}
+
+# Turn seconds since the epoch into a date we can use with source
+# control tools and report to the user.
+
+make_date() {
+ MADE_DATE="`${DATE} -u +\"%Y-%m-%d %H:%M %Z\" --date \"1970-01-01 ${1} seconds\"`" \
+ || error "make_date: date command failed"
+}
+
+# Build the components to test using sources as of a particular date and
+# run a test case. Pass each of the scripts the date that we're
+# testing; the first one needs it, the others can ignore it if they want.
+
+process_date() {
+ TEST_DATE="${1}"
+
+ ${REG_UPDATE} "${TEST_DATE}" || error "source update failed for ${TEST_DATE}"
+
+ # If we're already in a valid range, skip this date if there are no
+ # differences from either end of the range and adjust LATER.
+
+ if [ ${VALID_RANGE} = 1 ]; then
+ ${HAS_CHANGES} "${TEST_DATE}" "${LATER_THAN}" "${EARLIER_THAN}"
+ RET=$?
+ case ${RET} in
+ 0) ;;
+ 1) LATER=1; return;;
+ 2) LATER=0; return;;
+ *) error "process_date: unexpected return value from ${HAS_CHANGES}";;
+ esac
+ fi
+
+ ${REG_BUILD} "${TEST_DATE}" || error "build failed for ${TEST_DATE}"
+ ${REG_TEST} "${TEST_DATE}"
+ LATER=$?
+}
+
+# Perform a binary search on dates within the range specified by
+# the arguments, bounded by the number of seconds in DELTA.
+
+search_dates() {
+ let LOW=$1
+ let HIGH=$2
+ let DIFF=HIGH-LOW
+
+ # Get the date in the middle of the range; MID is in seconds since
+ # the epoch, DATE is readable by humans and tools. The user can
+ # override the initial mid date if it is known to have problems,
+ # e.g., if a build fails for that date.
+
+ if [ ${FIRST_MID} -ne 0 ]; then
+ let MID=${FIRST_MID}
+ else
+ let MID=LOW/2+HIGH/2
+ fi
+
+ while [ ${DIFF} -ge ${DELTA} ]; do
+ make_date ${MID}
+ TEST_DATE="${MADE_DATE}"
+
+ # Test it.
+
+ process_date "${TEST_DATE}"
+
+ # Narrow the search based on the outcome of testing DATE.
+
+ if [ ${LATER} -eq 1 ]; then
+ msg 1 "search dates later than \"${TEST_DATE}\""
+ LATER_THAN="${TEST_DATE}"
+ let LOW=MID
+ else
+ msg 1 "search dates earlier than \"${TEST_DATE}\""
+ EARLIER_THAN="${TEST_DATE}"
+ let HIGH=MID
+ fi
+
+ let DIFF=HIGH-LOW
+ let MID=LOW/2+HIGH/2
+ done
+}
+
+########################################################################
+# Main program (so to speak)
+########################################################################
+
+# If DATE isn't defined, use the default date command; the configuration
+# file can override this.
+
+if [ "x${DATE}" = "x" ]; then
+ DATE=date
+fi
+
+# The error function uses this.
+
+VALID_RANGE=0
+
+# Process the configuration file.
+
+if [ $# != 1 ]; then
+ echo Usage: $0 config_file
+ exit 1
+fi
+
+CONFIG=${1}
+if [ ! -f ${CONFIG} ]; then
+ error "configuration file ${CONFIG} does not exist"
+fi
+
+# OK, the config file exists. Source it, make sure required parameters
+# are defined and their files exist, and give default values to optional
+# parameters.
+
+. ${CONFIG}
+
+test "x${REG_UPDATE}" = "x" && error "REG_UPDATE is not defined"
+test "x${REG_BUILD}" = "x" && error "REG_BUILD is not defined"
+test "x${REG_TEST}" = "x" && error "REG_TEST is not defined"
+test -x ${REG_TEST} || error "REG_TEST is not an executable file"
+test "x${SKIP_LOW}" = "x" && SKIP_LOW=0
+test "x${SKIP_HIGH}" = "x" && SKIP_HIGH=0
+test "x${DELTA}" = "x" && DELTA=3600
+test "x${VERBOSITY}" = "x" && VERBOSITY=0
+test "x${HAS_CHANGES}" = "x" && HAS_CHANGES=true
+test "x${REG_FINISH}" = "x" && REG_FINISH=true
+
+msg 2 "LOW_DATE = ${LOW_DATE}"
+msg 2 "HIGH_DATE = ${HIGH_DATE}"
+msg 2 "REG_UPDATE = ${REG_UPDATE}"
+msg 2 "REG_BUILD = ${REG_BUILD}"
+msg 2 "REG_TEST = ${REG_TEST}"
+msg 2 "SKIP_LOW = ${SKIP_LOW}"
+msg 2 "SKIP_HIGH = ${SKIP_HIGH}"
+msg 2 "FIRST_MID = ${FIRST_MID}"
+msg 2 "VERBOSITY = ${VERBOSITY}"
+msg 2 "DELTA = ${DELTA}"
+
+# Verify that DELTA is at least two minutes.
+
+test ${DELTA} -lt 120 && \
+ error "DELTA is ${DELTA}, must be at least 120 (two minutes)"
+
+# Change the dates into seconds since the epoch. This uses an extension
+# in GNU date.
+
+LOW_DATE=`${DATE} +%s --date "${LOW_DATE}"` || \
+ error "date command failed for \"${LOW_DATE}\""
+HIGH_DATE=`${DATE} +%s --date "${HIGH_DATE}"` || \
+ error "date command failed for \"${LOW_DATE}\""
+
+# If FIRST_MID was defined, convert it and make sure it's in the range.
+
+if [ "x${FIRST_MID}" != "x" ]; then
+ FIRST_MID=`${DATE} +%s --date "${FIRST_MID}"` || \
+ error "date command failed for \"${FIRST_MID}\""
+ test ${FIRST_MID} -le ${LOW_DATE} && \
+ error "FIRST_MID date is earlier than LOW_DATE"
+ test ${FIRST_MID} -ge ${HIGH_DATE} && \
+ error "FIRST_MID is later than HIGH_DATE"
+else
+ FIRST_MID=0
+fi
+
+# Keep track of the bounds of the range where the test behavior changes,
+# using a human-readable version of each date.
+
+make_date ${LOW_DATE}
+LATER_THAN="${MADE_DATE}"
+make_date ${HIGH_DATE}
+EARLIER_THAN="${MADE_DATE}"
+
+msg 2 "LATER_THAN = ${LATER_THAN}"
+msg 2 "EARLIER_THAN = ${EARLIER_THAN}"
+
+# Verify that the range isn't backwards.
+
+test ${LOW_DATE} -lt ${HIGH_DATE} || error "date range is backwards"
+
+# Verify that the first and last date in the range get the results we
+# expect. If not, quit, because any of several things could be wrong.
+
+if [ ${SKIP_LOW} -eq 0 ]; then
+ process_date "${LATER_THAN}"
+ test ${LATER} -ne 1 && \
+ error "unexpected result for low date ${LATER_THAN}"
+ msg 1 "result for low date is as expected"
+fi
+
+if [ ${SKIP_HIGH} -eq 0 ]; then
+ process_date "${EARLIER_THAN}"
+ test ${LATER} -ne 0 && \
+ error "unexpected result for high date ${EARLIER_THAN}"
+ msg 1 "result for high date is as expected"
+fi
+
+# Search within the range, now that we know that the end points are valid.
+
+VALID_RANGE=1
+search_dates ${LOW_DATE} ${HIGH_DATE}
+
+# Report the range that's left to investigate.
+
+echo "Continue search between ${LATER_THAN} and ${EARLIER_THAN}"
+
+# Invoke the optional script to report additional information about
+# changes between the two dates.
+
+${REG_FINISH} "${LATER_THAN}" "${EARLIER_THAN}"
diff --git a/gcc-4.3.1/contrib/reghunt/reg_test_template b/gcc-4.3.1/contrib/reghunt/reg_test_template
new file mode 100755
index 000000000..ab5bbc106
--- /dev/null
+++ b/gcc-4.3.1/contrib/reghunt/reg_test_template
@@ -0,0 +1,41 @@
+#! /bin/sh
+
+# Template for the test script specified for REG_TEST.
+
+# Run the test case for a regression search. The argument is the date
+# of the sources. The return value is 1 if the binary search should
+# continue with later dates, 0 if it should continue with earlier
+# dates.
+
+DATE="${1}"
+
+# Specify the PR number and the directory where the test should be run.
+PR=xxxx
+DIR=xxxx
+
+LOG_DATE="`echo ${DATE} | sed 's/[-: ]/_/g'`"
+LOG="${PR}.${LOG_DATE}.out"
+
+echo "`date` running test for PR ${PR}"
+cd ${DIR}
+
+# Compile the test case with whatever options are needed to trigger the
+# error.
+
+<compiler_to_run> <options> ${PR}.<x> > ${LOG} 2>&1
+
+# Some tests will require additional commands to determine whether the
+# test passed or failed, such as grepping compiler output for a
+# particular message, or running the test and possibly comparing its
+# output with the expected output.
+
+xxxxx
+
+# The return value depends on whether the last command is expected to be
+# zero or nonzero for a passing test, and whether we're looking for a
+# regression or for the patch that fixed the bug.
+
+# Return 1 to continue the search with later dates, 0 for earlier dates.
+
+test $? -eq 0 && exit 1
+exit 0
diff --git a/gcc-4.3.1/contrib/regression/ChangeLog b/gcc-4.3.1/contrib/regression/ChangeLog
new file mode 100644
index 000000000..2fd88397b
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/ChangeLog
@@ -0,0 +1,138 @@
+2008-06-06 Release Manager
+
+ * GCC 4.3.1 released.
+
+2008-03-05 Release Manager
+
+ * GCC 4.3.0 released.
+
+2007-12-24 Geoff Keating <geoffk@geoffk.org>
+
+ * objs-gcc.sh: Revert previous change.
+
+2007-12-23 Geoff Keating <geoffk@geoffk.org>
+
+ * objs-gcc.sh: Set up the GDB testsuite even if the gdb installed
+ is just called 'gdb'.
+
+2007-12-22 Geoff Keating <geoffk@geoffk.org>
+
+ * btest-gcc.sh (TESTLOGS): Add gfortran, and optionally libgomp.
+
+ * GCC_Regression_Tester.wdgt/widget.html: Mark as HTML 4.01,
+ which it now is, rather than XHTML, which it never was.
+ (<head>): Mark as UTF-8. Add title. Move CSS to top of
+ document, specify type. Mark script as 'defer'.
+ (updateContents): If loaded from HTTP, look for status in same
+ place as widget.
+ (gotContents): Use DOM methods to change text rather than innerHTML.
+ (<body>): Eliminate unnecessary DIV element.
+ * GCC_Regression_Tester.wdgt/Info.plist: Update version, copyright
+ notice.
+
+2006-09-05 Geoffrey Keating <geoffk@apple.com>
+
+ * btest-gcc.sh: .bad_compare has moved to toplevel from gcc/.
+
+2006-01-18 Andrew Pinski <pinskia@physics.uc.edu>
+
+ * btest-gcc.sh: gcc.sum has moved to gcc/testsuite/gcc/gcc.sum.
+ g++.sum has moved to gcc/testsuite/g++/g++.sum.
+ objc.sum has moved to gcc/testsuite/objc/objc.sum.
+
+2005-12-20 Geoffrey Keating <geoffk@apple.com>
+
+ * btest-gcc.sh: Support -j option.
+
+2005-11-28 Geoffrey Keating <geoffk@geoffk.org>
+
+ * GCC Regression Tester.wdgt: Rename to GCC_Regression_Tester.wdgt.
+ * GCC_Regression_Tester.wdgt/Info.plist
+ (CFBundleDisplayName): Update for rename.
+ (CFBundleName): Use shorter name.
+ (CFBundleVersion): Update to 1.0.1.
+ (CFBundleShortVersionString): Update to 1.0.1.
+ (LSHasLocalizedDisplayName): New.
+ (NSHumanReadableCopyright): New.
+ * GCC_Regression_Tester.wdgt/de.lproj/InfoPlist.strings: New.
+ * GCC_Regression_Tester.wdgt/en.lproj/InfoPlist.strings: New.
+ * GCC_Regression_Tester.wdgt/ja.lproj/InfoPlist.strings: New.
+
+ * GCC_Regression_Tester.wdgt/widget.html: Use max-age=30
+ to help out intermediate caches.
+
+2005-11-23 Geoffrey Keating <geoffk@geoffk.org>
+
+ * GCC Regression Tester.wdgt/widget.html: New file.
+ * GCC Regression Tester.wdgt/Info.plist: New file.
+ * GCC Regression Tester.wdgt/Icon.png: New file.
+ * GCC Regression Tester.wdgt/Default.png: New file.
+ * README: Describe new files.
+
+2005-06-07 Hans-Peter Nilsson <hp@bitrange.com>
+
+ * btest-gcc.sh: Add support for option
+ --add-passes-despite-regression.
+
+2005-06-06 Hans-Peter Nilsson <hp@axis.com>
+
+ * btest-gcc.sh <Build>: Don't pass --with-newlib when target is
+ "*-linux*".
+
+ * btest-gcc.sh (TESTLOGS): Make libstdc++.sum optional.
+
+2004-11-07 James A. Morrison <phython@gcc.gnu.org>
+
+ * README: Update website URL.
+
+2004-10-27 Geoffrey Keating <geoffk@apple.com>
+
+ * btest-gcc.sh (TESTLOGS): The libstdc++-v3 test log is called
+ libstd++.sum not libstdc++-v3.sum.
+
+2004-10-26 Geoffrey Keating <geoffk@apple.com>
+
+ * btest-gcc.sh (TESTLOGS): Examine regressions in libstdc++,
+ libffi, and libjava.
+
+2004-05-13 Andrew Pinski <pinskia@physics.uc.edu>
+
+ * btest-gcc.sh: Remove g77.sum from TESTLOGS.
+
+2003-09-18 Andreas Tobler <a.tobler@schweiz.ch>
+
+ * btest-gcc.sh: Add make check-target-libffi.
+
+2003-07-31 Andreas Tobler <a.tobler@schweiz.ch>
+
+ * btest-gcc.sh: Add make check-target-libjava.
+
+2002-10-22 Geoffrey Keating <geoffk@apple.com>
+
+ * btest-gcc.sh: Add gdb.sum to TESTLOGS only when GDB testsuite is run.
+
+2002-10-11 Geoffrey Keating <geoffk@apple.com>
+
+ * objs-gcc.sh: Don't install GDB testsuite if GDB was not built.
+ * btest-gcc.sh: Don't run GDB testsuite if it doesn't exist.
+
+2002-10-09 Phil Edwards <pme@gcc.gnu.org>
+
+ * btest-gcc.sh, objs-gcc.sh: Update TARGET comments.
+
+2002-10-09 Geoffrey Keating <geoffk@apple.com>
+
+ * objs-gcc.sh: On (non-linux) native hosts, use 'make bootstrap'.
+
+2002-03-09 Geoffrey Keating <geoffk@redhat.com>
+
+ * btest-gcc.sh: For crosses, assume newlib and GNU binutils.
+ * site.exp: Correct mips-elf triplet.
+
+2002-01-31 Geoffrey Keating <geoffk@redhat.com>
+
+ * btest-gcc.sh: New file.
+ * objs-gcc.sh: New file.
+ * site.exp: New file.
+ * ChangeLog: New file.
+ * README: New file.
diff --git a/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Default.png b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Default.png
new file mode 100644
index 000000000..a6dfd4655
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Default.png
Binary files differ
diff --git a/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Icon.png b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Icon.png
new file mode 100644
index 000000000..662b5169d
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Icon.png
Binary files differ
diff --git a/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Info.plist b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Info.plist
new file mode 100644
index 000000000..a011e69a3
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/Info.plist
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>AllowNetworkAccess</key>
+ <true/>
+ <key>CFBundleDisplayName</key>
+ <string>GCC_Regression_Tester</string>
+ <key>CFBundleIdentifier</key>
+ <string>org.geoffk.widget.regress</string>
+ <key>CFBundleName</key>
+ <string>GCC Regr. Tester</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0.2</string>
+ <key>CFBundleVersion</key>
+ <string>1.0.2</string>
+ <key>CloseBoxInsetX</key>
+ <integer>0</integer>
+ <key>CloseBoxInsetY</key>
+ <integer>0</integer>
+ <key>Height</key>
+ <integer>192</integer>
+ <key>LSHasLocalizedDisplayName</key>
+ <true/>
+ <key>MainHTML</key>
+ <string>widget.html</string>
+ <key>NSHumanReadableCopyright</key>
+ <string>Copyright © 2005, 2007 Free Software Foundation, Inc.</string>
+ <key>Width</key>
+ <integer>261</integer>
+</dict>
+</plist>
diff --git a/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/de.lproj/InfoPlist.strings b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/de.lproj/InfoPlist.strings
new file mode 100644
index 000000000..c1455c46f
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/de.lproj/InfoPlist.strings
Binary files differ
diff --git a/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/en.lproj/InfoPlist.strings b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/en.lproj/InfoPlist.strings
new file mode 100644
index 000000000..e44971359
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/en.lproj/InfoPlist.strings
Binary files differ
diff --git a/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/ja.lproj/InfoPlist.strings b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/ja.lproj/InfoPlist.strings
new file mode 100644
index 000000000..9a7361f07
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/ja.lproj/InfoPlist.strings
Binary files differ
diff --git a/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/widget.html b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/widget.html
new file mode 100644
index 000000000..94f44ad3f
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/GCC_Regression_Tester.wdgt/widget.html
@@ -0,0 +1,153 @@
+<!-- Get and update the GCC regression tester's web page.
+ Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. -->
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<head>
+<meta http-equiv="Content-Script-Type" content="text/javascript">
+<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+<title>Regression Tester Status</title>
+<style type='text/css'>
+body {
+ margin: 0px;
+ padding: 0px;
+}
+pre {
+ font-family: Monaco;
+ font-size: 9px;
+ margin: 0px;
+ padding: 1px 2px 1px 2px;
+ color: black;
+ background-color: white;
+ opacity: 0.8;
+}
+</style>
+<script type='text/javascript' defer>
+// A string representing NUM, with a leading zero if it would be 1 digit long
+function dig2 (num)
+{
+ var result = num.toString();
+ if (result.length == 1)
+ return '0' + result;
+ else
+ return result;
+}
+
+// Get DATE as a string in standard ISO format in UTC
+function getISO (date)
+{
+ return (date.getUTCFullYear().toString() + '-'
+ + dig2 (date.getUTCMonth() + 1) + '-'
+ + dig2 (date.getUTCDate()) + 'T'
+ + dig2 (date.getUTCHours()) + ':'
+ + dig2 (date.getUTCMinutes()) + 'Z');
+}
+
+// STR is a bunch of lines of the form '<key>: <date>' where <date> is in
+// standard ISO UTC format. Return a Date object corresponding to KEY, or null
+// if none is found.
+function fromISO (str, key)
+{
+ var rx = new RegExp (key + ": (\\d+)-(\\d+)-(\\d+)T(\\d+):(\\d+):(\\d+)Z");
+ var match = rx.exec (str);
+ if (match == null || match.length != 7)
+ return null;
+ var date = new Date(0);
+ date.setUTCFullYear (match[1], match[2] - 1, match[3]);
+ date.setUTCHours (match[4], match[5], match[6], 0);
+ return date;
+}
+
+// Update the data
+function updateContents () {
+ var url = 'http://gcc.gnu.org/regtest/HEAD/status.txt';
+ if (document.URL && document.URL.substring (0,5) == 'http:') {
+ url = document.URL.replace ('widget.html','status.txt');
+ }
+ var xml_request = new XMLHttpRequest();
+
+ xml_request.onload = function(e)
+ {
+ gotContents(e, xml_request);
+ }
+ xml_request.open("GET", url);
+ xml_request.setRequestHeader("Cache-Control", "max-age=30");
+ xml_request.send(null);
+}
+
+function gotContents (event, request) {
+ if (request.status != 200)
+ return;
+
+ if (! request.responseText)
+ return;
+
+ var txt = request.responseText;
+ var today = new Date();
+ var date_r = fromISO (txt, "Date");
+ var completed_r = fromISO (txt, "Test-Completed");
+ var now_test_r = fromISO (txt, "Now-Testing");
+ var eta = "";
+
+ if (date_r != null && completed_r != null && now_test_r != null)
+ {
+ var eta_r = new Date (now_test_r.getTime()
+ + completed_r.getTime() - date_r.getTime());
+ eta = "ETA: " + getISO (eta_r) + '\n';
+ }
+
+ var val = txt + "Now: " + getISO (today) + '\n' + eta;
+ var contEl = document.getElementById ("contents");
+ contEl.removeChild(contEl.firstChild);
+ contEl.appendChild (document.createTextNode (val));
+}
+
+var mainTimer = null;
+
+function myOnShow ()
+{
+ if (! mainTimer) {
+ mainTimer = setInterval (updateContents, 60000);
+ }
+ updateContents();
+}
+
+function myOnHide ()
+{
+ if (mainTimer) {
+ clearInterval (mainTimer);
+ mainTimer = null;
+ }
+}
+
+function myOnLoad ()
+{
+ if ( window.widget ) {
+ widget.onshow = myOnShow;
+ widget.onhide = myOnHide;
+ }
+ myOnShow();
+}
+</script>
+</head>
+
+<body onLoad='myOnLoad();'>
+<pre id="contents">Loading...</pre>
+</body>
+</html>
diff --git a/gcc-4.3.1/contrib/regression/README b/gcc-4.3.1/contrib/regression/README
new file mode 100644
index 000000000..9e94e5c39
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/README
@@ -0,0 +1,24 @@
+This directory contains scripts that are used by the regression
+tester, <http://gcc.gnu.org/regtest/>
+
+The primary script is 'btest-gcc.sh'. This is the script that is run
+to actually test the compiler.
+
+'objs-gcc.sh' takes a combined tree and builds (but does not test) the
+tools required for 'btest-gcc.sh'. It is run periodically to update
+the tools. This script is followed by running 'btest-gcc.sh' using
+the newly-build tools to check that they will not cause regressions.
+
+'site.exp' is what $DEJAGNU points to when the regression tester runs
+these scripts.
+
+'GCC_Regression_Tester.wdgt' is a Dashboard widget that displays the
+current state of the tester using Javascript. You can use it without
+needing Dashboard by pointing your web browser at
+'GCC_Regression_Tester.wdgt/widget.html', if your browser supports
+and permits it.
+
+Note that any changes made here need to be approved by the regression
+tester's maintainer (see MAINTAINERS). The changes will be used on
+the tester's next run, so `experimental' changes are very strongly
+discouraged :-).
diff --git a/gcc-4.3.1/contrib/regression/btest-gcc.sh b/gcc-4.3.1/contrib/regression/btest-gcc.sh
new file mode 100755
index 000000000..b08f357b1
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/btest-gcc.sh
@@ -0,0 +1,232 @@
+#!/bin/sh
+
+# Test GCC.
+# Copyright (C) 1999, 2000, 2001, 2002, 2005, 2006 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# INPUT:
+# btest <options> <target> <source> <prefix> <state> <build>
+
+add_passes_despite_regression=0
+dashj=''
+
+# <options> can be
+# --add-passes-despite-regression:
+# Add new "PASSes" despite there being some regressions.
+# -j<n>:
+# Pass '-j<n>' to make.
+
+case "$1" in
+ --add-passes-despite-regression)
+ add_passes_despite_regression=1; shift;;
+ -j*)
+ dashj=$1; shift;;
+ -*) echo "Invalid option: $1"; exit 2;;
+esac
+
+# TARGET is the target triplet. It should be the same one as used in
+# constructing PREFIX. Or it can be the keyword 'native', indicating
+# a target of whatever platform the script is running on.
+TARGET=$1
+# SOURCE is the directory containing the toplevel configure.
+SOURCE=$2
+
+# PREFIX is the directory for the --prefix option to configure.
+# For cross compilers, it needs to contain header files,
+# libraries, and binutils. PATH should probably include
+# $PREFIX/bin.
+PREFIX=$3
+# This script also needs to include the GDB testsuite in
+# $PREFIX/share/gdb-testsuite.
+GDB_TESTSUITE=$PREFIX/share/gdb-testsuite
+
+# STATE is where the tester maintains its internal state,
+# described below.
+STATE=$4
+
+# BUILD is a temporary directory that this script will
+# delete and recreate, containing the build tree.
+BUILD=$5
+
+# you also probably need to set these variables:
+# PATH: should contain a native gcc, and a cross gdb.
+# DEJAGNU: should point to a site.exp suitable for testing
+# the compiler and debugger.
+
+
+# OUTPUT: in $RESULT, one of the following keywords:
+# error the script failed due to
+# a misconfiguration or resource limitation
+# build the build failed
+# regress-<n> the build succeeded, but there were <n>
+# testsuite regressions, listed in $REGRESS
+# pass build succeeded and there were no regressions
+RESULT=$STATE/RESULT
+# in BUILD_LOG, the output of the build
+BUILD_LOG=$STATE/build_log
+# in FAILED, a list of failing testcases
+FAILED=$STATE/failed
+# in PASSES, the list of testcases we expect to pass
+PASSES=$STATE/passes
+# in REGRESS, a list of testcases we expected to pass but that failed
+REGRESS=$STATE/regress
+
+# Make sure various files exist.
+[ -d $STATE ] || mkdir $STATE
+[ -f $PASSES ] || touch $PASSES
+
+# These lines should stay in this order, because
+# that way if something is badly wrong and $RESULT can't
+# be modified then cron will mail the error message.
+# The reverse order could lead to the testsuite claiming that
+# everything always passes, without running any tests.
+echo error > $RESULT || exit 1
+exec > $BUILD_LOG 2>&1 || exit 1
+
+set -x
+
+# Nuke $BUILD and recreate it.
+rm -rf $BUILD $REGRESS $FAILED
+mkdir $BUILD || exit 1
+cd $BUILD || exit 1
+
+H_BUILD=`$SOURCE/config.guess || exit 1`
+H_HOST=$H_BUILD
+if [ $TARGET = native ] ; then
+ H_TARGET=$H_HOST
+else
+ H_TARGET=$TARGET
+fi
+H_REAL_TARGET=`$SOURCE/config.sub $H_TARGET || exit 1`
+
+# TESTLOGS is the list of dejagnu .sum files that the tester should
+# look at.
+TESTLOGS="gcc/testsuite/gcc/gcc.sum
+gcc/testsuite/g++/g++.sum
+gcc/testsuite/gfortran/gfortran.sum
+gcc/testsuite/objc/objc.sum"
+
+# Build.
+echo build > $RESULT
+if [ $H_HOST = $H_TARGET ] ; then
+ $SOURCE/configure --prefix=$PREFIX --target=$H_TARGET || exit 1
+ if ! make $dashj bootstrap ; then
+ [ -s .bad_compare ] || exit 1
+ cat .bad_compare >> $REGRESS || exit 1
+ make $dashj all || exit 1
+ fi
+else
+ withopt="--with-gnu-ld --with-gnu-as"
+ case "$H_TARGET" in
+ *-linux*) ;;
+ *) withopt="$withopt --with-newlib";;
+ esac
+ $SOURCE/configure --prefix=$PREFIX --target=$H_TARGET $withopt || exit 1
+ make $dashj || exit 1
+fi
+echo error > $RESULT || exit 1
+
+# Test GCC against its internal testsuite.
+make $dashj -k check
+
+if [ -f $BUILD/$H_TARGET/libstdc++-v3/testsuite/libstdc++.sum ] ; then
+ TESTLOGS="$TESTLOGS $H_TARGET/libstdc++-v3/testsuite/libstdc++.sum"
+fi
+
+if [ -f $BUILD/$H_TARGET/libffi/testsuite/libffi.sum ] ; then
+ TESTLOGS="$TESTLOGS $H_TARGET/libffi/testsuite/libffi.sum"
+fi
+
+if [ -f $BUILD/$H_TARGET/libjava/testsuite/libjava.sum ] ; then
+ TESTLOGS="$TESTLOGS $H_TARGET/libjava/testsuite/libjava.sum"
+fi
+
+if [ -f $BUILD/$H_TARGET/libgomp/testsuite/libgomp.sum ] ; then
+ TESTLOGS="$TESTLOGS $H_TARGET/libgomp/testsuite/libgomp.sum"
+fi
+
+# Test the just-built GCC with the GDB testsuite.
+if [ -d $GDB_TESTSUITE ] ; then
+ mkdir test-gdb || exit 1
+ cd $GDB_TESTSUITE || exit 1
+ for i in gdb.* ; do
+ if [ -d $i ] ; then
+ mkdir $BUILD/test-gdb/$i
+ fi
+ done
+ cd $BUILD/test-gdb || exit 1
+ echo "set host_alias $H_HOST" > site.exp
+ echo "set host_triplet $H_HOST" >> site.exp
+ echo "set target_alias $H_TARGET" >> site.exp
+ echo "set target_triplet $H_REAL_TARGET" >> site.exp
+ echo "set build_alias $H_BUILD" >> site.exp
+ echo "set build_triplet $H_BUILD" >> site.exp
+ echo "set srcdir $GDB_TESTSUITE" >> site.exp
+ runtest --tool gdb
+ TESTLOGS="$TESTLOGS test-gdb/gdb.sum"
+fi
+
+# Sanity-check the testlogs. They should contain at least one PASS.
+cd $BUILD || exit 1
+for LOG in $TESTLOGS ; do
+ if ! grep ^PASS: $LOG > /dev/null ; then
+ echo build > $RESULT
+ exit 1
+ fi
+done
+
+# Work out what failed
+for LOG in $TESTLOGS ; do
+ L=`basename $LOG`
+ awk '/^FAIL: / { print "'$L'",$2; }' $LOG || exit 1
+done | sort | uniq > $FAILED || exit 1
+comm -12 $FAILED $PASSES >> $REGRESS || exit 1
+NUMREGRESS=`wc -l < $REGRESS | tr -d ' '`
+
+if [ $NUMREGRESS -eq 0 ] || [ $add_passes_despite_regression -ne 0 ] ; then
+ # Update the state.
+ for LOG in $TESTLOGS ; do
+ L=`basename $LOG`
+ awk '/^PASS: / { print "'$L'",$2; }' $LOG || exit 1
+ done | sort | uniq | comm -23 - $FAILED > ${PASSES}~ || exit 1
+ [ -s ${PASSES}~ ] || exit 1
+ if [ $NUMREGRESS -ne 0 ] ; then
+ # The way we keep track of new PASSes when in "regress-N" for
+ # --add-passes-despite-regression, is to *add* them to previous
+ # PASSes. Just as without this option, we don't forget *any* PASS
+ # lines, because besides the ones in $REGRESS that we definitely
+ # don't want to lose, their removal or rename may have been a
+ # mistake (as in, the cause of the "regress-N" state). If they
+ # come back, we then know they're regressions.
+ cat ${PASSES}~ ${PASSES} | sort -u > ${PASSES}~~
+ mv ${PASSES}~~ ${PASSES} || exit 1
+ rm ${PASSES}~ || exit 1
+ else
+ # In contrast to the merging for "regress-N", we just overwrite
+ # the known PASSes when in the "pass" state, so we get rid of
+ # stale PASS lines for removed, moved or otherwise changed tests
+ # which may be added back with a different meaning later on.
+ mv ${PASSES}~ ${PASSES} || exit 1
+ fi
+fi
+
+if [ $NUMREGRESS -ne 0 ] ; then
+ echo regress-$NUMREGRESS > $RESULT
+ exit 1
+fi
+
+echo pass > $RESULT
+exit 0
diff --git a/gcc-4.3.1/contrib/regression/objs-gcc.sh b/gcc-4.3.1/contrib/regression/objs-gcc.sh
new file mode 100755
index 000000000..d926f8b85
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/objs-gcc.sh
@@ -0,0 +1,127 @@
+#!/bin/sh
+
+# Build tools for testing GCC.
+# Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# INPUT:
+# btest <target> <source> <prefix> <state> <build>
+# TARGET is the target triplet. It should be the same one as used in
+# constructing PREFIX. Or it can be the keyword 'native', indicating
+# a target of whatever platform the script is running on.
+TARGET=$1
+# SOURCE is the directory containing the toplevel configure.
+SOURCE=$2
+
+# PREFIX is the directory for the --prefix option to configure.
+PREFIX=$3
+
+# STATE is where the tester maintains its internal state,
+# described below.
+STATE=$4
+
+# BUILD is a temporary directory that this script will
+# delete and recreate, containing the build tree.
+BUILD=$5
+
+# you also probably need to set these variables:
+# DEJAGNU: should point to a site.exp suitable for testing
+# the compiler and debugger.
+
+# OUTPUT: in $RESULT, one of the following keywords:
+# error the script failed due to
+# a misconfiguration or resource limitation
+# build the build failed
+# regress-<n> the build succeeded, but there were <n>
+# testsuite regressions, listed in $REGRESS
+# pass build succeeded and there were no regressions
+RESULT=$STATE/RESULT
+# in BUILD_LOG, the output of the build
+BUILD_LOG=$STATE/build_log
+# in FAILED, a list of failing testcases
+FAILED=$STATE/failed
+# in PASSES, the list of testcases we expect to pass
+PASSES=$STATE/passes
+# in REGRESS, a list of testcases we expected to pass but that failed
+REGRESS=$STATE/regress
+
+# Make sure various files exist.
+[ -d $STATE ] || mkdir $STATE
+[ -f $PASSES ] || touch $PASSES
+
+# These lines should stay in this order, because
+# that way if something is badly wrong and $RESULT can't
+# be modified then cron will mail the error message.
+# The reverse order could lead to the testsuite claiming that
+# everything always passes, without running any tests.
+echo error > $RESULT || exit 1
+exec > $BUILD_LOG 2>&1 || exit 1
+
+set -x
+
+# TESTLOGS is the list of dejagnu .sum files that the tester should
+# look at.
+TESTLOGS="test/gcc/gcc.sum
+test/g++/g++.sum"
+
+# Nuke $BUILD and recreate it.
+rm -rf $BUILD $REGRESS $FAILED
+mkdir $BUILD $BUILD/build $BUILD/objs || exit 1
+cd $BUILD || exit 1
+
+# This script used to use config.guess, but that is not how releng
+# determines hostnames.
+H_BUILD=`$SOURCE/config.guess || exit 1`
+H_HOST=$H_BUILD
+if [ $TARGET = native ] ; then
+ H_TARGET=$H_HOST
+else
+ H_TARGET=$TARGET
+fi
+H_REAL_TARGET=`$SOURCE/config.sub $H_TARGET || exit 1`
+H_REAL_BUILD=`$SOURCE/config.sub $H_BUILD || exit 1`
+H_REAL_HOST=`$SOURCE/config.sub $H_HOST || exit 1`
+
+# Build.
+echo build > $RESULT
+
+cd $BUILD/build || exit 1
+TMP_PREFIX=$BUILD/install
+$SOURCE/configure --prefix=$PREFIX --target=$H_TARGET || exit 1
+if [ $H_REAL_TARGET = $H_REAL_HOST -a $H_REAL_TARGET = i686-pc-linux-gnu ]
+ then
+ make all-gdb all-dejagnu all-ld || exit 1
+ make install-gdb install-dejagnu install-ld || exit 1
+elif [ $H_REAL_TARGET = $H_REAL_HOST ] ; then
+ make bootstrap || exit 1
+ make install || exit 1
+else
+ make || exit 1
+ make install || exit 1
+fi
+
+if [ -x $PREFIX/bin/$TARGET-gdb ] ; then
+ mkdir -p $PREFIX/share/gdb-testsuite || exit 1
+ cd $SOURCE/gdb/testsuite || exit 1
+ find . -print | cpio -pdmu $PREFIX/share/gdb-testsuite || exit 1
+ # selftest.exp requires keeping old sources around, which is impractical
+ rm $PREFIX/share/gdb-testsuite/gdb.base/selftest.exp
+ # these tests seem to be broken and randomly failing
+ rm -r $PREFIX/share/gdb-testsuite/gdb.mi
+fi
+
+echo pass > $RESULT
+exit 0
diff --git a/gcc-4.3.1/contrib/regression/site.exp b/gcc-4.3.1/contrib/regression/site.exp
new file mode 100644
index 000000000..0892afa3a
--- /dev/null
+++ b/gcc-4.3.1/contrib/regression/site.exp
@@ -0,0 +1,18 @@
+global target_list
+
+case "$target_triplet" in {
+ { "powerpc*-*eabi*" } {
+# if { [info exists tool] && $tool == "gcc" } {
+# set target_list { "powerpc-sim{,-fpic}" }
+# } else {
+ set target_list { "powerpc-sim" }
+# }
+ }
+ { "mips*-*-elf" } {
+ set target_list { "mips-sim" }
+ }
+
+ default {
+ set target_list { "unix" }
+ }
+}
diff --git a/gcc-4.3.1/contrib/test_installed b/gcc-4.3.1/contrib/test_installed
new file mode 100755
index 000000000..e518cbcf7
--- /dev/null
+++ b/gcc-4.3.1/contrib/test_installed
@@ -0,0 +1,121 @@
+#! /bin/sh
+
+# (C) 1998, 2000, 2002, 2003, 2007 Free Software Foundation
+# Originally by Alexandre Oliva <oliva@dcc.unicamp.br>
+
+# This script is Free Software, and it can be copied, distributed and
+# modified as defined in the GNU General Public License. A copy of
+# its license can be downloaded from http://www.gnu.org/copyleft/gpl.html
+
+# This scripts assumes it lives in the contrib directory of the GCC
+# source tree, so it will find the testsuite tree from its location.
+# If you move it elsewhere, or want to use another testsuite tree, you
+# can override the defaults with --srcdir=/some/dir/GCC or
+# --testsuite=/some/dir/GCC/gcc/testsuite. If you specify
+# --testsuite, --srcdir will be ignored; otherwise, `/gcc/testsuite'
+# will be appended to the srcdir.
+
+# You may specify where the binaries to be tested should be picked up
+# from. If you specify --prefix=/some/dir, gcc, g++ and gfortran will be
+# looked for at /some/dir/bin. Each one may be overridden by
+# specifying --with-gcc=/pathname/to/gcc, --with-g++=/pathname/to/g++
+# and --with-gfortran=/pathname/to/gfortran. If you specify --without-gcc,
+# --without-g++ or --without-gfortran, the test for the specified program
+# will be skipped. By default, gcc, g++ and gfortran will be searched in
+# the PATH.
+
+# An additional argument may specify --tmpdir=/some/dir; by default,
+# temporaries will be stored in the current directory, where the log
+# files will be stored.
+
+# The script will interpret arguments until it finds one it does not
+# understand. The remaining ones will be passed to `runtest'. A
+# double-dash can be used to explicitly separate the arguments to
+# `test_installed' from the ones to `runtest'.
+
+# This script should be run in an empty directory; it will refuse to
+# run if it finds a file named site.exp in the current directory.
+
+
+if test -f site.exp; then
+ echo site.exp already exists >&2
+ exit 1
+fi
+
+while true; do
+ case "$1" in
+ --with-testsuite=*) testsuite=`echo "$1" | sed 's/[^=]*=//'`; shift;;
+ --srcdir=*) srcdir=`echo "$1" | sed 's/[^=]*=//'`; shift;;
+
+ --prefix=*) prefix=`echo "$1" | sed 's/[^=]*=//'`; shift;;
+ --with-gcc=*) GCC_UNDER_TEST=`echo "$1" | sed 's/[^=]*=//'`; shift;;
+ --with-g++=*) GXX_UNDER_TEST=`echo "$1" | sed 's/[^=]*=//'`; shift;;
+ --with-gfortran=*) GFORTRAN_UNDER_TEST=`echo "$1" | sed 's/[^=]*=//'`; shift;;
+ --without-gcc) GCC_UNDER_TEST=no; shift;;
+ --without-g++) GXX_UNDER_TEST=no; shift;;
+ --without-gfortran) GFORTRAN_UNDER_TEST=no; shift;;
+ --without-objc) OBJC_UNDER_TEST=no; shift;;
+
+ --tmpdir=*) tmpdir=`echo "$1" | sed 's/[^=]*=//'`; shift;;
+
+ --help) cat <<\EOF
+Runs the testsuite for an installed version of gcc/g++/gfortran/objc
+Copyright (C) 1998 Free Software Foundation
+by Alexandre Oliva <oliva@dcc.unicamp.br>
+
+Supported arguments:
+
+--help prints this page
+
+--with-testsuite=/some/dir/gcc/testsuite specify the testsuite directory
+--srcdir=/some/dir same as --with-testsuite=/some/dir/gcc/testsuite
+ [deduced from shell-script pathname]
+
+--prefix=/some/dir use gcc, g++ and gfortran from /some/dir/bin [PATH]
+--with-gcc=/some/dir/bin/gcc use specified gcc program [gcc]
+--with-g++=/some/dir/bin/g++ use specified g++ program [g++]
+--with-gfortran=/some/dir/bin/gfortran use specified gfortran program [gfortran]
+--without-gcc do not run gcc testsuite
+--without-g++ do not run g++ testsuite
+--without-gfortran do not run gfortran testsuite
+--without-objc do not run objc testsuite
+
+--tmpdir=/some/dir create temporaries and leave failed programs
+ at specified directory [.]
+
+-- end of argument list; following arguments are passed to runtest
+EOF
+ exit
+ ;;
+
+ --) shift; break;;
+ *) break;;
+ esac
+done
+
+if test x"${testsuite+set}" != x"set" && test x"${srcdir+set}" != x"set"; then
+ file=$0
+ while [ -h $file ]; do
+ file=`ls -l $file | sed s/'.* -> '//`
+ done
+ srcdir=`CDPATH=. && cd \`echo "$file" | sed 's,/*[^/]*$,,;s,^$,.,'\`/.. >/dev/null && ${PWDCMD-pwd}`
+fi
+
+cat >site.exp <<EOF
+set rootme "."
+set tmpdir "${tmpdir-`${PWDCMD-pwd}`}"
+set srcdir "${testsuite-${srcdir}/gcc/testsuite}"
+set CFLAGS ""
+set CXXFLAGS ""
+set GCC_UNDER_TEST "${GCC_UNDER_TEST-${prefix}${prefix+/bin/}gcc}"
+set GXX_UNDER_TEST "${GXX_UNDER_TEST-${prefix}${prefix+/bin/}g++}"
+set GFORTRAN_UNDER_TEST "${GFORTRAN_UNDER_TEST-${prefix}${prefix+/bin/}gfortran}"
+set OBJC_UNDER_TEST "${OBJC_UNDER_TEST-${prefix}${prefix+/bin/}gcc}"
+EOF
+
+test x"${GCC_UNDER_TEST}" = x"no" || runtest --tool gcc ${1+"$@"}
+test x"${GXX_UNDER_TEST}" = x"no" || runtest --tool g++ ${1+"$@"}
+test x"${GFORTRAN_UNDER_TEST}" = x"no" || runtest --tool gfortran ${1+"$@"}
+test x"${OBJC_UNDER_TEST}" = x"no" || runtest --tool objc ${1+"$@"}
+
+exit 0
diff --git a/gcc-4.3.1/contrib/test_summary b/gcc-4.3.1/contrib/test_summary
new file mode 100755
index 000000000..98664359b
--- /dev/null
+++ b/gcc-4.3.1/contrib/test_summary
@@ -0,0 +1,156 @@
+#! /bin/sh
+
+# (C) 1998, 1999, 2000, 2002 Free Software Foundation
+# Originally by Alexandre Oliva <oliva@dcc.unicamp.br>
+
+# This script is Free Software, and it can be copied, distributed and
+# modified as defined in the GNU General Public License. A copy of
+# its license can be downloaded from http://www.gnu.org/copyleft/gpl.html
+
+# This script processes *.{sum,log} files, producing a shell-script
+# that sends e-mail to the appropriate lists and renames files to
+# *.sent. It currently handles only gcc, but it should be quite easy
+# to modify it to handle other packages and its mailing lists.
+
+# The scripts assumes it is run in the root directory of the build
+# tree, and it will include all .sum files it finds in the mail
+# report.
+
+# configure flags are extracted from ./config.status
+
+# if the BOOT_CFLAGS environment variable is set, it will be included
+# in the mail report too.
+
+# The usage pattern of this script is as follows:
+
+# test_summary | more # so as to observe what should be done
+
+# test_summary | sh # so as to actually send e-mail and move log files
+
+# It accepts a few command line arguments. For example:
+if test x"$1" = "x-h"; then
+ cat <<_EOF
+ -o: re-reads logs that have been mailed already (.sum.sent)
+ -t: prevents logs from being renamed
+ -p: prepend specified file (or list of files: -p "a b") to the report
+ -i: append specified file (or list of files: -i "a b") to the report
+ -m: specify the e-mail address to send notes to. An appropriate default
+ should be selected from the log files.
+ -f: force reports to be mailed; if omitted, only reports that differ
+ from the sent.* version are sent.
+_EOF
+ exit 0
+fi
+
+# Find a good awk.
+if test -z "$AWK" ; then
+ for AWK in gawk nawk awk ; do
+ if type $AWK 2>&1 | grep 'not found' > /dev/null 2>&1 ; then
+ :
+ else
+ break
+ fi
+ done
+fi
+
+: ${filesuffix=}; export filesuffix
+: ${move=true}; export move
+: ${forcemail=false}; export forcemail
+while true; do
+ case "$1" in
+ -o) filesuffix=.sent; move=false; : ${mailto=nobody}; shift;;
+ -t) move=false; shift;;
+ -p) prepend_logs=${prepend_logs+"$prepend_logs "}"$2"; shift 2;;
+ -i) append_logs=${append_logs+"$append_logs "}"$2"; shift 2;;
+ -m) mailto=$2; forcemail=true; shift 2;;
+ -f) unset mailto; forcemail=true; shift;;
+ *) break;;
+ esac
+done
+: ${mailto="\" address \""}; export mailto
+files=`find . -name \*.sum$filesuffix -print | sort`
+anyfile=false anychange=$forcemail &&
+for file in $files; do
+ [ -f $file ] &&
+ anyfile=true &&
+ { $anychange ||
+ anychange=`diff -u $file.sent $file 2>/dev/null |
+ if test ! -f $file.sent ||
+ egrep '^[-+](XPASS|FAIL)' >/dev/null; then
+ echo true
+ else
+ echo false
+ fi
+ `
+ }
+ true
+done &&
+$anyfile &&
+if $forcemail || $anychange; then :; else mailto=nobody; fi &&
+# We use cat instead of listing the files as arguments to AWK because
+# GNU awk 3.0.0 would break if any of the filenames contained `=' and
+# was preceded by an invalid ``variable'' name.
+cat ./config.status $files |
+$AWK '
+BEGIN {
+ lang=""; configflags = "";
+ address="gcc-testresults@gcc.gnu.org";
+ version="gcc";
+ print "cat <<'"'"'EOF'"'"' |";
+'${prepend_logs+" system(\"cat $prepend_logs\"); "}'
+}
+($0 ~ /^[^ ]*\/configure / || $0 ~ /^# [^ ]*\/configure /) &&
+configflags == "" {
+ configflags = $0 " ";
+ sub(/^# /, "", configflags);
+ sub(/^s,@TOPLEVEL_CONFIGURE_ARGUMENTS@,/, "", configflags);
+ srcdir = configflags;
+ sub(/\/configure .*/, "", srcdir);
+ printf "LAST_UPDATED: ";
+ system("tail -1 " srcdir "/LAST_UPDATED");
+ print "";
+
+ sub(/^[^ ]*\/configure */, " ", configflags);
+ sub(/,;t t $/, " ", configflags);
+ sub(/ --with-gcc-version-trigger=[^ ]* /, " ", configflags);
+ sub(/ --norecursion /, " ", configflags);
+ sub(/ $/, "", configflags);
+ sub(/^ *$/, " none", configflags);
+ configflags = "configure flags:" configflags;
+}
+/^Running target / { print ""; print; }
+/^Target / { if (host != "") next; else host = $3; }
+/^Host / && host ~ /^unix\{.*\}$/ { host = $3 " " substr(host, 5); }
+/^Native / { if (host != "") next; else host = $4; }
+/^[ ]*=== [^ ]+ tests ===/ {
+ if (lang == "") lang = " "$2" "; else lang = " ";
+}
+$2 == "version" { save = $0; $1 = ""; $2 = ""; version = $0; gsub(/^ */, "", version); gsub(/\r$/, "", version); $0 = save; }
+/\===.*Summary/ { print ""; print; blanks=1; }
+/tests ===/ || /^(Target|Host|Native)/ || $2 == "version" { print; blanks=1; }
+/^(XPASS|FAIL|UNRESOLVED|WARNING|ERROR|# of )/ { print; }
+/^using:/ { print ""; print; print ""; }
+# dumpall != 0 && /^X?(PASS|FAIL|UNTESTED)|^testcase/ { dumpall=0; }
+# dumpall != 0 { print; }
+# /^FAIL/ { dumpall=1; }
+/^$/ && blanks>0 { print; --blanks; }
+END { if (lang != "") {
+ print "";
+ print "Compiler version: " prefix version lang;
+ print "Platform: " host;
+ print configflags;
+ '${BOOT_CFLAGS+'print "BOOT_CFLAGS='"${BOOT_CFLAGS}"'";'}'
+ if (boot_cflags != 0) print boot_cflags;
+'${append_logs+" system(\"cat $append_logs\"); "}'
+ print "EOF";
+ print "Mail -s \"Results for " prefix version lang "testsuite on " host "\" '"${mailto}"' &&";
+}}
+{ next; }
+' | sed "s/\([\`\$\\\\]\)/\\\\\\1/g" &&
+if $move; then
+ for file in $files `ls -1 $files | sed s/sum$/log/`; do
+ [ -f $file ] && echo "mv `${PWDCMD-pwd}`/$file `${PWDCMD-pwd}`/$file.sent &&"
+ done
+fi &&
+echo true
+exit 0
diff --git a/gcc-4.3.1/contrib/texi2pod.pl b/gcc-4.3.1/contrib/texi2pod.pl
new file mode 100755
index 000000000..fcdc14550
--- /dev/null
+++ b/gcc-4.3.1/contrib/texi2pod.pl
@@ -0,0 +1,495 @@
+#! /usr/bin/perl -w
+
+# Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
+
+# This file is part of GCC.
+
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston MA 02110-1301, USA.
+
+# This does trivial (and I mean _trivial_) conversion of Texinfo
+# markup to Perl POD format. It's intended to be used to extract
+# something suitable for a manpage from a Texinfo document.
+
+$output = 0;
+$skipping = 0;
+%sects = ();
+$section = "";
+@icstack = ();
+@endwstack = ();
+@skstack = ();
+@instack = ();
+$shift = "";
+%defs = ();
+$fnno = 1;
+$inf = "";
+$ibase = "";
+@ipath = ();
+
+while ($_ = shift) {
+ if (/^-D(.*)$/) {
+ if ($1 ne "") {
+ $flag = $1;
+ } else {
+ $flag = shift;
+ }
+ $value = "";
+ ($flag, $value) = ($flag =~ /^([^=]+)(?:=(.+))?/);
+ die "no flag specified for -D\n"
+ unless $flag ne "";
+ die "flags may only contain letters, digits, hyphens, dashes and underscores\n"
+ unless $flag =~ /^[a-zA-Z0-9_-]+$/;
+ $defs{$flag} = $value;
+ } elsif (/^-I(.*)$/) {
+ if ($1 ne "") {
+ $flag = $1;
+ } else {
+ $flag = shift;
+ }
+ push (@ipath, $flag);
+ } elsif (/^-/) {
+ usage();
+ } else {
+ $in = $_, next unless defined $in;
+ $out = $_, next unless defined $out;
+ usage();
+ }
+}
+
+if (defined $in) {
+ $inf = gensym();
+ open($inf, "<$in") or die "opening \"$in\": $!\n";
+ $ibase = $1 if $in =~ m|^(.+)/[^/]+$|;
+} else {
+ $inf = \*STDIN;
+}
+
+if (defined $out) {
+ open(STDOUT, ">$out") or die "opening \"$out\": $!\n";
+}
+
+while(defined $inf) {
+while(<$inf>) {
+ # Certain commands are discarded without further processing.
+ /^\@(?:
+ [a-z]+index # @*index: useful only in complete manual
+ |need # @need: useful only in printed manual
+ |(?:end\s+)?group # @group .. @end group: ditto
+ |page # @page: ditto
+ |node # @node: useful only in .info file
+ |(?:end\s+)?ifnottex # @ifnottex .. @end ifnottex: use contents
+ )\b/x and next;
+
+ chomp;
+
+ # Look for filename and title markers.
+ /^\@setfilename\s+([^.]+)/ and $fn = $1, next;
+ /^\@settitle\s+([^.]+)/ and $tl = postprocess($1), next;
+
+ # Identify a man title but keep only the one we are interested in.
+ /^\@c\s+man\s+title\s+([A-Za-z0-9-]+)\s+(.+)/ and do {
+ if (exists $defs{$1}) {
+ $fn = $1;
+ $tl = postprocess($2);
+ }
+ next;
+ };
+
+ # Look for blocks surrounded by @c man begin SECTION ... @c man end.
+ # This really oughta be @ifman ... @end ifman and the like, but such
+ # would require rev'ing all other Texinfo translators.
+ /^\@c\s+man\s+begin\s+([A-Z]+)\s+([A-Za-z0-9-]+)/ and do {
+ $output = 1 if exists $defs{$2};
+ $sect = $1;
+ next;
+ };
+ /^\@c\s+man\s+begin\s+([A-Z]+)/ and $sect = $1, $output = 1, next;
+ /^\@c\s+man\s+end/ and do {
+ $sects{$sect} = "" unless exists $sects{$sect};
+ $sects{$sect} .= postprocess($section);
+ $section = "";
+ $output = 0;
+ next;
+ };
+
+ # handle variables
+ /^\@set\s+([a-zA-Z0-9_-]+)\s*(.*)$/ and do {
+ $defs{$1} = $2;
+ next;
+ };
+ /^\@clear\s+([a-zA-Z0-9_-]+)/ and do {
+ delete $defs{$1};
+ next;
+ };
+
+ next unless $output;
+
+ # Discard comments. (Can't do it above, because then we'd never see
+ # @c man lines.)
+ /^\@c\b/ and next;
+
+ # End-block handler goes up here because it needs to operate even
+ # if we are skipping.
+ /^\@end\s+([a-z]+)/ and do {
+ # Ignore @end foo, where foo is not an operation which may
+ # cause us to skip, if we are presently skipping.
+ my $ended = $1;
+ next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex|copying)$/;
+
+ die "\@end $ended without \@$ended at line $.\n" unless defined $endw;
+ die "\@$endw ended by \@end $ended at line $.\n" unless $ended eq $endw;
+
+ $endw = pop @endwstack;
+
+ if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex)$/) {
+ $skipping = pop @skstack;
+ next;
+ } elsif ($ended =~ /^(?:example|smallexample|display)$/) {
+ $shift = "";
+ $_ = ""; # need a paragraph break
+ } elsif ($ended =~ /^(?:itemize|enumerate|[fv]?table)$/) {
+ $_ = "\n=back\n";
+ $ic = pop @icstack;
+ } elsif ($ended eq "multitable") {
+ $_ = "\n=back\n";
+ } else {
+ die "unknown command \@end $ended at line $.\n";
+ }
+ };
+
+ # We must handle commands which can cause skipping even while we
+ # are skipping, otherwise we will not process nested conditionals
+ # correctly.
+ /^\@ifset\s+([a-zA-Z0-9_-]+)/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = "ifset";
+ $skipping = 1 unless exists $defs{$1};
+ next;
+ };
+
+ /^\@ifclear\s+([a-zA-Z0-9_-]+)/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = "ifclear";
+ $skipping = 1 if exists $defs{$1};
+ next;
+ };
+
+ /^\@(ignore|menu|iftex|copying)\b/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = $1;
+ $skipping = 1;
+ next;
+ };
+
+ next if $skipping;
+
+ # Character entities. First the ones that can be replaced by raw text
+ # or discarded outright:
+ s/\@copyright\{\}/(c)/g;
+ s/\@dots\{\}/.../g;
+ s/\@enddots\{\}/..../g;
+ s/\@([.!? ])/$1/g;
+ s/\@[:-]//g;
+ s/\@bullet(?:\{\})?/*/g;
+ s/\@TeX\{\}/TeX/g;
+ s/\@pounds\{\}/\#/g;
+ s/\@minus(?:\{\})?/-/g;
+ s/\\,/,/g;
+
+ # Now the ones that have to be replaced by special escapes
+ # (which will be turned back into text by unmunge())
+ s/&/&amp;/g;
+ s/\@\{/&lbrace;/g;
+ s/\@\}/&rbrace;/g;
+ s/\@\@/&at;/g;
+
+ # Inside a verbatim block, handle @var, @samp and @url specially.
+ if ($shift ne "") {
+ s/\@var\{([^\}]*)\}/<$1>/g;
+ s/\@samp\{([^\}]*)\}/"$1"/g;
+ s/\@url\{([^\}]*)\}/<$1>/g;
+ }
+
+ # POD doesn't interpret E<> inside a verbatim block.
+ if ($shift eq "") {
+ s/</&lt;/g;
+ s/>/&gt;/g;
+ } else {
+ s/</&LT;/g;
+ s/>/&GT;/g;
+ }
+
+ # Single line command handlers.
+
+ /^\@include\s+(.+)$/ and do {
+ push @instack, $inf;
+ $inf = gensym();
+ $file = postprocess($1);
+
+ # Try cwd and $ibase, then explicit -I paths.
+ $done = 0;
+ foreach $path ("", $ibase, @ipath) {
+ $mypath = $file;
+ $mypath = $path . "/" . $mypath if ($path ne "");
+ open($inf, "<" . $mypath) and ($done = 1, last);
+ }
+ die "cannot find $file" if !$done;
+ next;
+ };
+
+ /^\@(?:section|unnumbered|unnumberedsec|center|heading)\s+(.+)$/
+ and $_ = "\n=head2 $1\n";
+ /^\@subsection\s+(.+)$/
+ and $_ = "\n=head3 $1\n";
+ /^\@subsubsection\s+(.+)$/
+ and $_ = "\n=head4 $1\n";
+
+ # Block command handlers:
+ /^\@itemize(?:\s+(\@[a-z]+|\*|-))?/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ if (defined $1) {
+ $ic = $1;
+ } else {
+ $ic = '*';
+ }
+ $_ = "\n=over 4\n";
+ $endw = "itemize";
+ };
+
+ /^\@enumerate(?:\s+([a-zA-Z0-9]+))?/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ if (defined $1) {
+ $ic = $1 . ".";
+ } else {
+ $ic = "1.";
+ }
+ $_ = "\n=over 4\n";
+ $endw = "enumerate";
+ };
+
+ /^\@multitable\s.*/ and do {
+ push @endwstack, $endw;
+ $endw = "multitable";
+ $_ = "\n=over 4\n";
+ };
+
+ /^\@([fv]?table)\s+(\@[a-z]+)/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ $endw = $1;
+ $ic = $2;
+ $ic =~ s/\@(?:samp|strong|key|gcctabopt|env)/B/;
+ $ic =~ s/\@(?:code|kbd)/C/;
+ $ic =~ s/\@(?:dfn|var|emph|cite|i)/I/;
+ $ic =~ s/\@(?:file)/F/;
+ $ic =~ s/\@(?:asis)//;
+ $_ = "\n=over 4\n";
+ };
+
+ /^\@((?:small)?example|display)/ and do {
+ push @endwstack, $endw;
+ $endw = $1;
+ $shift = "\t";
+ $_ = ""; # need a paragraph break
+ };
+
+ /^\@item\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
+ @columns = ();
+ for $column (split (/\s*\@tab\s*/, $1)) {
+ # @strong{...} is used a @headitem work-alike
+ $column =~ s/^\@strong{(.*)}$/$1/;
+ push @columns, $column;
+ }
+ $_ = "\n=item ".join (" : ", @columns)."\n";
+ };
+
+ /^\@itemx?\s*(.+)?$/ and do {
+ if (defined $1) {
+ if ($ic) {
+ if ($endw eq "enumerate") {
+ $_ = "\n=item $ic $1\n";
+ $ic =~ s/(\d+)/$1 + 1/eg;
+ } else {
+ # Entity escapes prevent munging by the <>
+ # processing below.
+ $_ = "\n=item $ic\&LT;$1\&GT;\n";
+ }
+ } else {
+ $_ = "\n=item $1\n";
+ }
+ } else {
+ $_ = "\n=item $ic\n";
+ $ic =~ y/A-Ya-y/B-Zb-z/;
+ $ic =~ s/(\d+)/$1 + 1/eg;
+ }
+ };
+
+ $section .= $shift.$_."\n";
+}
+# End of current file.
+close($inf);
+$inf = pop @instack;
+}
+
+die "No filename or title\n" unless defined $fn && defined $tl;
+
+$sects{NAME} = "$fn \- $tl\n";
+$sects{FOOTNOTES} .= "=back\n" if exists $sects{FOOTNOTES};
+
+for $sect (qw(NAME SYNOPSIS DESCRIPTION OPTIONS ENVIRONMENT FILES
+ BUGS NOTES FOOTNOTES SEEALSO AUTHOR COPYRIGHT)) {
+ if(exists $sects{$sect}) {
+ $head = $sect;
+ $head =~ s/SEEALSO/SEE ALSO/;
+ print "=head1 $head\n\n";
+ print scalar unmunge ($sects{$sect});
+ print "\n";
+ }
+}
+
+sub usage
+{
+ die "usage: $0 [-D toggle...] [infile [outfile]]\n";
+}
+
+sub postprocess
+{
+ local $_ = $_[0];
+
+ # @value{foo} is replaced by whatever 'foo' is defined as.
+ while (m/(\@value\{([a-zA-Z0-9_-]+)\})/g) {
+ if (! exists $defs{$2}) {
+ print STDERR "Option $2 not defined\n";
+ s/\Q$1\E//;
+ } else {
+ $value = $defs{$2};
+ s/\Q$1\E/$value/;
+ }
+ }
+
+ # Formatting commands.
+ # Temporary escape for @r.
+ s/\@r\{([^\}]*)\}/R<$1>/g;
+ s/\@(?:dfn|var|emph|cite|i)\{([^\}]*)\}/I<$1>/g;
+ s/\@(?:code|kbd)\{([^\}]*)\}/C<$1>/g;
+ s/\@(?:samp|strong|key|option|env|command|b)\{([^\}]*)\}/B<$1>/g;
+ s/\@sc\{([^\}]*)\}/\U$1/g;
+ s/\@file\{([^\}]*)\}/F<$1>/g;
+ s/\@w\{([^\}]*)\}/S<$1>/g;
+ s/\@(?:dmn|math)\{([^\}]*)\}/$1/g;
+
+ # keep references of the form @ref{...}, print them bold
+ s/\@(?:ref)\{([^\}]*)\}/B<$1>/g;
+
+ # Change double single quotes to double quotes.
+ s/''/"/g;
+ s/``/"/g;
+
+ # Cross references are thrown away, as are @noindent and @refill.
+ # (@noindent is impossible in .pod, and @refill is unnecessary.)
+ # @* is also impossible in .pod; we discard it and any newline that
+ # follows it. Similarly, our macro @gol must be discarded.
+
+ s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g;
+ s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g;
+ s/;\s+\@pxref\{(?:[^\}]*)\}//g;
+ s/\@noindent\s*//g;
+ s/\@refill//g;
+ s/\@gol//g;
+ s/\@\*\s*\n?//g;
+
+ # Anchors are thrown away
+ s/\@anchor\{(?:[^\}]*)\}//g;
+
+ # @uref can take one, two, or three arguments, with different
+ # semantics each time. @url and @email are just like @uref with
+ # one argument, for our purposes.
+ s/\@(?:uref|url|email)\{([^\},]*)\}/&lt;B<$1>&gt;/g;
+ s/\@uref\{([^\},]*),([^\},]*)\}/$2 (C<$1>)/g;
+ s/\@uref\{([^\},]*),([^\},]*),([^\},]*)\}/$3/g;
+
+ # Handle gccoptlist here, so it can contain the above formatting
+ # commands.
+ s/\@gccoptlist\{([^\}]*)\}/B<$1>/g;
+
+ # Un-escape <> at this point.
+ s/&LT;/</g;
+ s/&GT;/>/g;
+
+ # Now un-nest all B<>, I<>, R<>. Theoretically we could have
+ # indefinitely deep nesting; in practice, one level suffices.
+ 1 while s/([BIR])<([^<>]*)([BIR])<([^<>]*)>/$1<$2>$3<$4>$1</g;
+
+ # Replace R<...> with bare ...; eliminate empty markup, B<>;
+ # shift white space at the ends of [BI]<...> expressions outside
+ # the expression.
+ s/R<([^<>]*)>/$1/g;
+ s/[BI]<>//g;
+ s/([BI])<(\s+)([^>]+)>/$2$1<$3>/g;
+ s/([BI])<([^>]+?)(\s+)>/$1<$2>$3/g;
+
+ # Extract footnotes. This has to be done after all other
+ # processing because otherwise the regexp will choke on formatting
+ # inside @footnote.
+ while (/\@footnote/g) {
+ s/\@footnote\{([^\}]+)\}/[$fnno]/;
+ add_footnote($1, $fnno);
+ $fnno++;
+ }
+
+ return $_;
+}
+
+sub unmunge
+{
+ # Replace escaped symbols with their equivalents.
+ local $_ = $_[0];
+
+ s/&lt;/E<lt>/g;
+ s/&gt;/E<gt>/g;
+ s/&lbrace;/\{/g;
+ s/&rbrace;/\}/g;
+ s/&at;/\@/g;
+ s/&amp;/&/g;
+ return $_;
+}
+
+sub add_footnote
+{
+ unless (exists $sects{FOOTNOTES}) {
+ $sects{FOOTNOTES} = "\n=over 4\n\n";
+ }
+
+ $sects{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++;
+ $sects{FOOTNOTES} .= $_[0];
+ $sects{FOOTNOTES} .= "\n\n";
+}
+
+# stolen from Symbol.pm
+{
+ my $genseq = 0;
+ sub gensym
+ {
+ my $name = "GEN" . $genseq++;
+ my $ref = \*{$name};
+ delete $::{$name};
+ return $ref;
+ }
+}
diff --git a/gcc-4.3.1/contrib/uninclude b/gcc-4.3.1/contrib/uninclude
new file mode 100755
index 000000000..8ba28e5cf
--- /dev/null
+++ b/gcc-4.3.1/contrib/uninclude
@@ -0,0 +1,52 @@
+#! /bin/sh
+
+# (C) 1998, 2007 Free Software Foundation
+# Originally by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
+
+# This gawk/shell script is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as published
+# by the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+
+# Given a preprocessed C/C++ code snippet, this script will replace any
+# standard header files with an actual #include <...> directive.
+
+# Example:
+# # 1 "test.c"
+# # 1 "/usr/include/stdio.h" 1 3
+# <snip>
+# # 1 "test.c" 2
+#
+# main() { printf("Hello world!\n"); }
+
+# is replaced with
+# # 1 "test.c"
+# #include <stdio.h>
+# main() { printf("Hello world!\n"); }
+
+
+# Header files whose pathnames contain any of the following patterns
+# are considered as standard headers: usr/include, g++-include,
+# include/g++, include/c++/<version>, gcc-lib/<anything>/include.
+
+gawk ${EXCLUDEPATT+-vexclude="$EXCLUDEPATT"} \
+ ${INCLUDEPATT+-vinclude="$INCLUDEPATT"} '
+BEGIN {
+ skipping = 0;
+ cppline = "^# [0-9]+ \"[^\"]*/(usr/include|g\\+\\+-include|include/g\\+\\+|include/c\\+\\+/[^/]+|gcc-lib/[^\"]+/include|gcc/include)/([^\"]+)\"( [1-4])*$"
+}
+!skipping && $0 ~ cppline &&
+(exclude == "" || $3 !~ exclude) && (include == "" || $3 ~ include) {
+ skipping = 1;
+ printf "%s\n", "#include <" gensub(cppline, "\\2", "", $0) ">"
+ next;
+}
+skipping && /^# [0-9]+ / && $3 == lastincluded {
+ skipping = 0;
+ next;
+}
+!skipping && /^# [0-9]+ / {
+ lastincluded = $3;
+}
+!skipping { print }
+' ${1+"$@"}
diff --git a/gcc-4.3.1/contrib/warn_summary b/gcc-4.3.1/contrib/warn_summary
new file mode 100755
index 000000000..38e2c01ef
--- /dev/null
+++ b/gcc-4.3.1/contrib/warn_summary
@@ -0,0 +1,214 @@
+#!/bin/sh
+#
+# This script parses the output of a gcc bootstrap when using warning
+# flags and determines various statistics.
+#
+# usage: warn_summary [-llf] [-s stage] [-nosub|-ch|-cp|-f|-fortran|-java|-ada|-intl|-fixinc]
+# [-pass|-wpass] [file(s)]
+#
+# -llf
+# Filter out long lines from the bootstrap output before any other
+# action. This is useful for systems with broken awks/greps which choke
+# on long lines. It is not done by default as it sometimes slows things
+# down.
+#
+# -s number
+# Take warnings from stage "Number". Stage 0 means show warnings from
+# before and after the gcc bootstrap directory. E.g. libraries, etc.
+# This presupposes using "gcc -W*" for the stage1 compiler.
+#
+# -nosub
+# Only show warnings from the gcc top level directory.
+# -ch|-cp|-f|-fortran|-java|-ada|-intl|-fixinc
+# Only show warnings from the specified gcc subdirectory.
+# These override each other so only the last one passed takes effect.
+#
+# -pass
+# Pass through the bootstrap output after filtering stage and subdir
+# (useful for manual inspection.) This is all lines, not just warnings.
+# -wpass
+# Pass through only warnings from the bootstrap output after filtering
+# stage and subdir.
+#
+# By Kaveh Ghazi (ghazi@caip.rutgers.edu) 12/13/97.
+
+
+# Some awks choke on long lines, sed seems to do a better job.
+# Truncate lines > 255 characters. RE '.\{255,\}' doesn't seem to work. :-(
+# Only do this if -llf was specified, because it can really slow things down.
+longLineFilter()
+{
+ if test -z "$llf" ; then
+ cat
+ else
+ sed 's/^\(...............................................................................................................................................................................................................................................................\).*/\1/'
+ fi
+}
+
+# This function does one of three things. It either passes through
+# all warning data, or passes through gcc toplevel warnings, or passes
+# through a particular subdirectory set of warnings.
+subdirectoryFilter()
+{
+ longLineFilter | (
+ if test -z "$filter" ; then
+ # Pass through all lines.
+ cat
+ else
+ if test "$filter" = nosub ; then
+ # Omit all subdirectories.
+ egrep -v '/gcc/(ch|cp|f|fortran|java|ada|intl|fixinc)/'
+ else
+ # Pass through only subdir $filter.
+ grep "/gcc/$filter/"
+ fi
+ fi )
+}
+
+# This function displays all lines from stageN of the bootstrap. If
+# stage==0, then show lines prior to stage1 and lines from after the last
+# stage. I.e. utilities, libraries, etc.
+stageNfilter()
+{
+ if test "$stageN" -lt 1 ; then
+ # stage "0" means check everything *but* gcc.
+ $AWK "BEGIN{t=1} ; /^Bootstrapping the compiler/{t=0} ; /^Building runtime libraries/{t=1} ; {if(t==1)print}"
+ else
+ if test "$stageN" -eq 1 ; then
+ $AWK "/^Bootstrapping the compiler|^Building the C and C\+\+ compiler/{t=1} ; /stage$stageN/{t=0} ; {if(t==1)print}"
+ else
+ stageNminus1=`expr $stageN - 1`
+ $AWK "/stage${stageNminus1}\//{t=1} ; /stage$stageN/{t=0} ; {if(t==1)print}"
+ fi
+ fi
+}
+
+# This function displays lines containing warnings.
+warningFilter()
+{
+ grep ' warning: '
+}
+
+# This function replaces `xxx' with `???', where xxx is usually some
+# variable or function name. This allows similar warnings to be
+# counted together when summarizing. However it avoids replacing
+# certain C keywords which are known appear in various messages.
+
+keywordFilter() {
+ sed 's/.*warning: //;
+ s/`\(int\)'"'"'/"\1"/g;
+ s/`\(long\)'"'"'/"\1"/g;
+ s/`\(char\)'"'"'/"\1"/g;
+ s/`\(inline\)'"'"'/"\1"/g;
+ s/`\(else\)'"'"'/"\1"/g;
+ s/`\(return\)'"'"'/"\1"/g;
+ s/`\(static\)'"'"'/"\1"/g;
+ s/`\(extern\)'"'"'/"\1"/g;
+ s/`\(const\)'"'"'/"\1"/g;
+ s/`\(noreturn\)'"'"'/"\1"/g;
+ s/`\(longjmp\)'"'"' or `\(vfork\)'"'"'/"\1" or "\2"/g;
+ s/'"[\`'][^']*'/"'"???"/g;
+ s/.*format, .* arg (arg [0-9][0-9]*)/??? format, ??? arg (arg ???)/;
+ s/\([( ]\)arg [0-9][0-9]*\([) ]\)/\1arg ???\2/;
+ s/"\([^"]*\)"/`\1'"'"'/g'
+}
+
+# This function strips out relative pathnames for source files printed
+# by the warningFilter function. This is done so that as the snapshot
+# directory name changes every week, the output of this program can be
+# compared to previous runs without spurious diffs caused by source
+# directory name changes.
+
+srcdirFilter()
+{
+ sed '
+s%^[^ ]*/\(gcc/\)%\1%;
+s%^[^ ]*/\(include/\)%\1%;
+s%^[^ ]*/\(texinfo/\)%\1%;
+s%^[^ ]*/\(fastjar/\)%\1%;
+s%^[^ ]*/\(zlib/\)%\1%;
+s%^[^ ]*/\(fixincludes/\)%\1%;
+s%^[^ ]*/\(sim/\)%\1%;
+s%^[^ ]*/\(newlib/\)%\1%;
+s%^[^ ]*/\(mpfr/\)%\1%;
+s%^[^ ]*/\(lib[a-z23+-]*/\)%\1%;'
+}
+
+# Start the main section.
+
+usage="usage: `basename $0` [-llf] [-s stage] [-nosub|-ch|-cp|-f|-fortran|-java|-ada|-intl|-fixinc] [-pass|-wpass] [file(s)]"
+stageN=3
+tmpfile=/tmp/tmp-warn.$$
+
+# Remove $tmpfile on exit and various signals.
+trap "rm -f $tmpfile" 0
+trap "rm -f $tmpfile ; exit 1" 1 2 3 5 9 13 15
+
+# Find a good awk.
+if test -z "$AWK" ; then
+ for AWK in gawk nawk awk ; do
+ if type $AWK 2>&1 | grep 'not found' > /dev/null 2>&1 ; then
+ :
+ else
+ break
+ fi
+ done
+fi
+
+# Parse command line arguments.
+while test -n "$1" ; do
+ case "$1" in
+ -llf) llf=1 ; shift ;;
+ -s) if test -z "$2"; then echo $usage 1>&2; exit 1; fi
+ stageN="$2"; shift 2 ;;
+ -s*) stageN="`expr $1 : '-s\(.*\)'`" ; shift ;;
+ -nosub|-ch|-cp|-f|-fortran|-java|-ada|-intl|-fixinc) filter="`expr $1 : '-\(.*\)'`" ; shift ;;
+ -pass) pass=1 ; shift ;;
+ -wpass) pass=w ; shift ;;
+ -*) echo $usage 1>&2 ; exit 1 ;;
+ *) break ;;
+ esac
+done
+
+# Check for a valid value of $stageN.
+case "$stageN" in
+ [0-9]) ;;
+ *) echo "Stage <$stageN> must be in the range [0..9]." 1>&2 ; exit 1 ;;
+esac
+
+for file in "$@" ; do
+
+ stageNfilter < $file | subdirectoryFilter > $tmpfile
+
+ # (Just) show me the warnings.
+ if test "$pass" != '' ; then
+ if test "$pass" = w ; then
+ warningFilter < $tmpfile
+ else
+ cat $tmpfile
+ fi
+ continue
+ fi
+
+ if test -z "$filter" ; then
+ echo "Counting all warnings,"
+ else
+ if test "$filter" = nosub ; then
+ echo "Counting non-subdirectory warnings,"
+ else
+ echo "Counting warnings in the gcc/$filter subdirectory,"
+ fi
+ fi
+ count=`warningFilter < $tmpfile | wc -l`
+ echo there are $count warnings in stage$stageN of this bootstrap.
+
+ echo
+ echo Number of warnings per file:
+ warningFilter < $tmpfile | srcdirFilter | $AWK -F: '{print$1}' | sort | \
+ uniq -c | sort -nr
+
+ echo
+ echo Number of warning types:
+ warningFilter < $tmpfile | keywordFilter | sort | uniq -c | sort -nr
+
+done