aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/gcc/ada/gcc-interface
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/gcc/ada/gcc-interface')
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/Make-lang.in1012
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/Makefile.in3103
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/ada-tree.def74
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/ada-tree.h513
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/ada.h73
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/config-lang.in41
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/cuintp.c183
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/decl.c8924
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/gadaint.h43
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/gigi.h1087
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/lang-specs.h74
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/lang.opt91
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/misc.c928
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/targtyps.c266
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/trans.c9469
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/utils.c6579
-rw-r--r--gcc-4.9/gcc/ada/gcc-interface/utils2.c2852
17 files changed, 35312 insertions, 0 deletions
diff --git a/gcc-4.9/gcc/ada/gcc-interface/Make-lang.in b/gcc-4.9/gcc/ada/gcc-interface/Make-lang.in
new file mode 100644
index 000000000..321c0d688
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/Make-lang.in
@@ -0,0 +1,1012 @@
+# Top level -*- makefile -*- fragment for GNU Ada (GNAT).
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
+# Free Software Foundation, Inc.
+
+#This file is part of GCC.
+
+#GCC is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3, or (at your option)
+#any later version.
+
+#GCC is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with GCC; see the file COPYING3. If not see
+#<http://www.gnu.org/licenses/>.
+
+# This file provides the language dependent support in the main Makefile.
+# Each language makefile fragment must provide the following targets:
+#
+# foo.all.cross, foo.start.encap, foo.rest.encap,
+# foo.install-common, foo.install-man, foo.install-info, foo.install-pdf,
+# foo.install-html, foo.info, foo.dvi, foo.pdf, foo.html, foo.uninstall,
+# foo.mostlyclean, foo.clean, foo.distclean,
+# foo.maintainer-clean, foo.stage1, foo.stage2, foo.stage3, foo.stage4
+#
+# where `foo' is the name of the language.
+#
+# It should also provide rules for:
+#
+# - making any compiler driver (eg: g++)
+# - the compiler proper (eg: cc1plus)
+# - define the names for selecting the language in LANGUAGES.
+# tool definitions
+CP = cp -p
+ECHO = echo
+MV = mv
+MKDIR = mkdir -p
+RM = rm -f
+RMDIR = rm -rf
+
+
+# Extra flags to pass to recursive makes.
+COMMON_ADAFLAGS= -gnatpg
+ifeq ($(TREECHECKING),)
+CHECKING_ADAFLAGS=
+else
+CHECKING_ADAFLAGS= -gnata
+endif
+WARN_ADAFLAGS= -W -Wall
+
+# For native builds, the base compiler might be old and we need to arrange for
+# style warnings not to be flagged as errors during stage1. Cross compilers
+# need to be built by a recent/matching native so we might as well leave the
+# checks fully active.
+
+ifeq ($(CROSS),)
+ADAFLAGS= $(COMMON_ADAFLAGS) -gnatwns
+else
+ADAFLAGS= $(COMMON_ADAFLAGS)
+endif
+
+ALL_ADAFLAGS = \
+ $(CFLAGS) $(ADA_CFLAGS) $(ADAFLAGS) $(CHECKING_ADAFLAGS) $(WARN_ADAFLAGS)
+FORCE_DEBUG_ADAFLAGS = -g
+ADA_CFLAGS =
+ADA_INCLUDES = -nostdinc -I- -I. -Iada -I$(srcdir)/ada -I$(srcdir)/ada/gcc-interface
+GNATLIBFLAGS= -W -Wall -gnatpg -nostdinc
+GNATLIBCFLAGS= -g -O2 $(TCFLAGS)
+ADA_INCLUDE_DIR = $(libsubdir)/adainclude
+ADA_RTL_OBJ_DIR = $(libsubdir)/adalib
+THREAD_KIND=native
+TRACE=no
+# We do not want the WARN_CFLAGS of the compiler in Ada as it is for C/C++.
+COMMON_FLAGS_TO_PASS = $(filter-out $(WARN_CFLAGS), $(FLAGS_TO_PASS))
+ADA_FLAGS_TO_PASS = \
+ "ADA_FOR_BUILD=$(ADA_FOR_BUILD)" \
+ "ADA_INCLUDE_DIR=$(ADA_INCLUDE_DIR)" \
+ "ADA_RTL_OBJ_DIR=$(ADA_RTL_OBJ_DIR)" \
+ "ADAFLAGS=$(ADAFLAGS) $(WARN_ADAFLAGS)" \
+ "ADA_FOR_TARGET=$(ADA_FOR_TARGET)" \
+ "INSTALL=$(INSTALL)" \
+ "INSTALL_DATA=$(INSTALL_DATA)" \
+ "INSTALL_PROGRAM=$(INSTALL_PROGRAM)"
+
+# List of Ada tools to build and install
+ADA_TOOLS=gnatbind gnatchop gnat gnatkr gnatlink gnatls gnatmake \
+ gnatname gnatprep gnatxref gnatfind gnatclean gnatsym
+
+# Say how to compile Ada programs.
+.SUFFIXES: .ada .adb .ads
+
+# FIXME: need to add $(ADA_CFLAGS) to .c.o suffix rule
+# Use mildly strict warnings for this front end and add special flags.
+ada-warn = $(ADA_CFLAGS) $(filter-out -pedantic, $(STRICT_WARN))
+# Unresolved warnings in specific files.
+ada/adaint.o-warn = -Wno-error
+
+ada/%.o: ada/gcc-interface/%.c
+ $(COMPILE) $<
+ $(POSTCOMPILE)
+
+# Function that dumps the dependencies of an Ada object file by parsing the
+# associated ALI file. We match the lines starting with D to achieve that.
+ADA_DEPS=case $@ in \
+ *sdefault.o);; \
+ *)a="`echo $@ | sed -e 's/.o$$/.ali/'`"; \
+ echo "$@: `cat $$a | \
+ sed -ne 's;^D \([a-z0-9_\.-]*\).*;ada/\1;gp' | \
+ tr -d '\015' | tr '\n' ' '`" > $(dir $@)/$(DEPDIR)/$(patsubst %.o,%.Po,$(notdir $@));; \
+ esac;
+
+.adb.o:
+ $(CC) -c $(ALL_ADAFLAGS) $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+ @$(ADA_DEPS)
+
+.ads.o:
+ $(CC) -c $(ALL_ADAFLAGS) $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+ @$(ADA_DEPS)
+
+# Define the names for selecting Ada in LANGUAGES.
+ada: gnat1$(exeext) gnatbind$(exeext)
+
+# Tell GNU Make to ignore these, if they exist.
+.PHONY: ada
+
+CXX_LFLAGS = \
+ -B../../../$(target_noncanonical)/libstdc++-v3/src/.libs \
+ -B../../../$(target_noncanonical)/libstdc++-v3/libsupc++/.libs \
+ -L../../../$(target_noncanonical)/libstdc++-v3/src/.libs \
+ -L../../../$(target_noncanonical)/libstdc++-v3/libsupc++/.libs
+
+# There are too many Ada sources to check against here. Let's
+# always force the recursive make.
+ifeq ($(build), $(host))
+ ifeq ($(host), $(target))
+ # This is a regular native. So use the compiler from our current build
+ # tree.
+ ADA_TOOLS_FLAGS_TO_PASS=\
+ CC="../../xgcc -B../../" \
+ CXX="../../xg++ -B../../ $(CXX_LFLAGS)" \
+ $(COMMON_FLAGS_TO_PASS) $(ADA_FLAGS_TO_PASS) \
+ ADA_INCLUDES="-I- -I../rts" \
+ GNATMAKE="../../gnatmake" \
+ GNATLINK="../../gnatlink" \
+ GNATBIND="../../gnatbind"
+ else
+ # This is a regular cross compiler. Use the native compiler to compile
+ # the tools.
+
+ # put the host RTS dir first in the PATH to hide the default runtime
+ # files that are among the sources
+ ifneq ($(findstring ada,$(LANGUAGES)),)
+ RTS_DIR:=$(strip $(subst \,/,$(shell gnatls -v | grep adalib )))
+ endif
+
+ ADA_TOOLS_FLAGS_TO_PASS=\
+ CC="$(CC)" \
+ CXX="$(CXX)" \
+ $(COMMON_FLAGS_TO_PASS) $(ADA_FLAGS_TO_PASS) \
+ ADA_INCLUDES="-I$(RTS_DIR)/../adainclude -I$(RTS_DIR)" \
+ GNATMAKE="gnatmake" \
+ GNATBIND="gnatbind" \
+ GNATLINK="gnatlink" \
+ LIBGNAT=""
+ endif
+else
+ # Build is different from host so we are either building a canadian cross
+ # or a cross-native compiler. We provide defaults for tools targeting the
+ # host platform, but they can be overriden by just setting <tool>_FOR_HOST
+ # variables.
+ GNATMAKE_FOR_HOST=$(host)-gnatmake
+ GNATBIND_FOR_HOST=$(host)-gnatbind
+ GNATLINK_FOR_HOST=$(host)-gnatlink
+ GNATLS_FOR_HOST=$(host)-gnatls
+
+ ifeq ($(host), $(target))
+ # This is a cross native. All the sources are taken from the currently
+ # built runtime.
+ ADA_TOOLS_FLAGS_TO_PASS=\
+ CC="$(CC)" \
+ CXX="$(CXX)" \
+ $(COMMON_FLAGS_TO_PASS) $(ADA_FLAGS_TO_PASS) \
+ ADA_INCLUDES="-I../rts" \
+ GNATMAKE="$(GNATMAKE_FOR_HOST)" \
+ GNATBIND="$(GNATBIND_FOR_HOST)" \
+ GNATLINK="$(GNATLINK_FOR_HOST)" \
+ LIBGNAT=""
+ else
+ # This is a canadian cross. We should use a toolchain running on the
+ # build platform and targeting the host platform.
+ ifneq ($(findstring ada,$(LANGUAGES)),)
+ RTS_DIR:=$(strip $(subst \,/,$(shell $(GNATLS_FOR_HOST) -v | grep adalib )))
+ endif
+ ADA_TOOLS_FLAGS_TO_PASS=\
+ CC="$(CC)" \
+ CXX="$(CXX)" \
+ $(COMMON_FLAGS_TO_PASS) $(ADA_FLAGS_TO_PASS) \
+ ADA_INCLUDES="-I$(RTS_DIR)/../adainclude -I$(RTS_DIR)" \
+ GNATMAKE="$(GNATMAKE_FOR_HOST)" \
+ GNATBIND="$(GNATBIND_FOR_HOST)" \
+ GNATLINK="$(GNATLINK_FOR_HOST)" \
+ LIBGNAT=""
+ endif
+endif
+
+# Strip -Werror during linking for the LTO bootstrap
+GCC_LINKERFLAGS = $(filter-out -Werror, $(ALL_LINKERFLAGS))
+
+GCC_LINK=$(LINKER) $(GCC_LINKERFLAGS) $(LDFLAGS)
+GCC_LLINK=$(LLINKER) $(GCC_LINKERFLAGS) $(LDFLAGS)
+
+# Lists of files for various purposes.
+
+# Languages-specific object files for Ada.
+
+# Object files for gnat1 from C sources.
+GNAT1_C_OBJS = ada/adadecode.o ada/adaint.o ada/argv.o ada/cio.o \
+ ada/cstreams.o ada/env.o ada/init.o ada/initialize.o ada/raise.o \
+ ada/seh_init.o ada/targext.o ada/cuintp.o ada/decl.o \
+ ada/misc.o ada/utils.o ada/utils2.o ada/trans.o ada/targtyps.o
+
+# Object files from Ada sources that are used by gnat1
+GNAT_ADA_OBJS = \
+ ada/a-charac.o \
+ ada/a-chlat1.o \
+ ada/a-elchha.o \
+ ada/a-except.o \
+ ada/a-ioexce.o \
+ ada/ada.o \
+ ada/spark_xrefs.o \
+ ada/ali.o \
+ ada/alloc.o \
+ ada/aspects.o \
+ ada/atree.o \
+ ada/butil.o \
+ ada/casing.o \
+ ada/checks.o \
+ ada/comperr.o \
+ ada/csets.o \
+ ada/cstand.o \
+ ada/debug.o \
+ ada/debug_a.o \
+ ada/einfo.o \
+ ada/elists.o \
+ ada/err_vars.o \
+ ada/errout.o \
+ ada/erroutc.o \
+ ada/eval_fat.o \
+ ada/exp_aggr.o \
+ ada/exp_spark.o \
+ ada/exp_atag.o \
+ ada/exp_attr.o \
+ ada/exp_cg.o \
+ ada/exp_ch11.o \
+ ada/exp_ch12.o \
+ ada/exp_ch13.o \
+ ada/exp_ch2.o \
+ ada/exp_ch3.o \
+ ada/exp_ch4.o \
+ ada/exp_ch5.o \
+ ada/exp_ch6.o \
+ ada/exp_ch7.o \
+ ada/exp_ch8.o \
+ ada/exp_ch9.o \
+ ada/exp_code.o \
+ ada/exp_dbug.o \
+ ada/exp_disp.o \
+ ada/exp_dist.o \
+ ada/exp_fixd.o \
+ ada/exp_imgv.o \
+ ada/exp_intr.o \
+ ada/exp_pakd.o \
+ ada/exp_prag.o \
+ ada/exp_sel.o \
+ ada/exp_smem.o \
+ ada/exp_strm.o \
+ ada/exp_tss.o \
+ ada/exp_util.o \
+ ada/exp_vfpt.o \
+ ada/expander.o \
+ ada/fmap.o \
+ ada/fname-uf.o \
+ ada/fname.o \
+ ada/freeze.o \
+ ada/frontend.o \
+ ada/g-byorma.o \
+ ada/g-hesora.o \
+ ada/g-htable.o \
+ ada/g-spchge.o \
+ ada/g-speche.o \
+ ada/g-u3spch.o \
+ ada/get_spark_xrefs.o \
+ ada/get_targ.o \
+ ada/gnat.o \
+ ada/gnatvsn.o \
+ ada/hostparm.o \
+ ada/impunit.o \
+ ada/inline.o \
+ ada/interfac.o \
+ ada/itypes.o \
+ ada/krunch.o \
+ ada/layout.o \
+ ada/lib-load.o \
+ ada/lib-util.o \
+ ada/lib-writ.o \
+ ada/lib-xref.o \
+ ada/lib.o \
+ ada/live.o \
+ ada/namet-sp.o \
+ ada/namet.o \
+ ada/nlists.o \
+ ada/nmake.o \
+ ada/opt.o \
+ ada/osint-c.o \
+ ada/osint.o \
+ ada/output.o \
+ ada/par.o \
+ ada/par_sco.o \
+ ada/prep.o \
+ ada/prepcomp.o \
+ ada/put_spark_xrefs.o \
+ ada/put_scos.o \
+ ada/repinfo.o \
+ ada/restrict.o \
+ ada/rident.o \
+ ada/rtsfind.o \
+ ada/s-addope.o \
+ ada/s-assert.o \
+ ada/s-bitops.o \
+ ada/s-carun8.o \
+ ada/s-casuti.o \
+ ada/s-conca2.o \
+ ada/s-conca3.o \
+ ada/s-conca4.o \
+ ada/s-conca5.o \
+ ada/s-conca6.o \
+ ada/s-conca7.o \
+ ada/s-conca8.o \
+ ada/s-conca9.o \
+ ada/s-crc32.o \
+ ada/s-crtl.o \
+ ada/s-excdeb.o \
+ ada/s-except.o \
+ ada/s-exctab.o \
+ ada/s-htable.o \
+ ada/s-imenne.o \
+ ada/s-imgenu.o \
+ ada/s-mastop.o \
+ ada/s-memory.o \
+ ada/s-os_lib.o \
+ ada/s-parame.o \
+ ada/s-purexc.o \
+ ada/s-restri.o \
+ ada/s-secsta.o \
+ ada/s-soflin.o \
+ ada/s-sopco3.o \
+ ada/s-sopco4.o \
+ ada/s-sopco5.o \
+ ada/s-stache.o \
+ ada/s-stalib.o \
+ ada/s-stoele.o \
+ ada/s-strcom.o \
+ ada/s-strhas.o \
+ ada/s-string.o \
+ ada/s-strops.o \
+ ada/s-traent.o \
+ ada/s-unstyp.o \
+ ada/s-utf_32.o \
+ ada/s-valint.o \
+ ada/s-valuns.o \
+ ada/s-valuti.o \
+ ada/s-wchcnv.o \
+ ada/s-wchcon.o \
+ ada/s-wchjis.o \
+ ada/scans.o \
+ ada/scil_ll.o \
+ ada/scn.o \
+ ada/scng.o \
+ ada/scos.o \
+ ada/sdefault.o \
+ ada/sem.o \
+ ada/sem_aggr.o \
+ ada/sem_attr.o \
+ ada/sem_aux.o \
+ ada/sem_case.o \
+ ada/sem_cat.o \
+ ada/sem_ch10.o \
+ ada/sem_ch11.o \
+ ada/sem_ch12.o \
+ ada/sem_ch13.o \
+ ada/sem_ch2.o \
+ ada/sem_ch3.o \
+ ada/sem_ch4.o \
+ ada/sem_ch5.o \
+ ada/sem_ch6.o \
+ ada/sem_ch7.o \
+ ada/sem_ch8.o \
+ ada/sem_ch9.o \
+ ada/sem_dim.o \
+ ada/sem_disp.o \
+ ada/sem_dist.o \
+ ada/sem_elab.o \
+ ada/sem_elim.o \
+ ada/sem_eval.o \
+ ada/sem_intr.o \
+ ada/sem_mech.o \
+ ada/sem_prag.o \
+ ada/sem_res.o \
+ ada/sem_scil.o \
+ ada/sem_smem.o \
+ ada/sem_type.o \
+ ada/sem_util.o \
+ ada/sem_vfpt.o \
+ ada/sem_warn.o \
+ ada/set_targ.o \
+ ada/sinfo-cn.o \
+ ada/sinfo.o \
+ ada/sinput-d.o \
+ ada/sinput-l.o \
+ ada/sinput.o \
+ ada/snames.o \
+ ada/sprint.o \
+ ada/stand.o \
+ ada/stringt.o \
+ ada/style.o \
+ ada/styleg.o \
+ ada/stylesw.o \
+ ada/switch-c.o \
+ ada/switch.o \
+ ada/system.o \
+ ada/table.o \
+ ada/targparm.o \
+ ada/tbuild.o \
+ ada/tree_gen.o \
+ ada/tree_in.o \
+ ada/tree_io.o \
+ ada/treepr.o \
+ ada/treeprs.o \
+ ada/ttypes.o \
+ ada/types.o \
+ ada/uintp.o \
+ ada/uname.o \
+ ada/urealp.o \
+ ada/usage.o \
+ ada/validsw.o \
+ ada/warnsw.o \
+ ada/widechar.o
+
+# Object files for gnat executables
+GNAT1_ADA_OBJS = $(GNAT_ADA_OBJS) ada/back_end.o ada/gnat1drv.o
+
+GNAT1_OBJS = $(GNAT1_C_OBJS) $(GNAT1_ADA_OBJS) ada/b_gnat1.o
+
+GNATBIND_OBJS = \
+ ada/a-clrefi.o \
+ ada/a-comlin.o \
+ ada/a-elchha.o \
+ ada/a-except.o \
+ ada/ada.o \
+ ada/adaint.o \
+ ada/ali-util.o \
+ ada/ali.o \
+ ada/alloc.o \
+ ada/argv.o \
+ ada/aspects.o \
+ ada/atree.o \
+ ada/bcheck.o \
+ ada/binde.o \
+ ada/binderr.o \
+ ada/bindgen.o \
+ ada/bindusg.o \
+ ada/butil.o \
+ ada/casing.o \
+ ada/cio.o \
+ ada/csets.o \
+ ada/cstreams.o \
+ ada/debug.o \
+ ada/einfo.o \
+ ada/elists.o \
+ ada/env.o \
+ ada/err_vars.o \
+ ada/errout.o \
+ ada/erroutc.o \
+ ada/exit.o \
+ ada/final.o \
+ ada/fmap.o \
+ ada/fname-uf.o \
+ ada/fname.o \
+ ada/g-byorma.o \
+ ada/g-hesora.o \
+ ada/g-htable.o \
+ ada/gnat.o \
+ ada/gnatbind.o \
+ ada/gnatvsn.o \
+ ada/hostparm.o \
+ ada/init.o \
+ ada/initialize.o \
+ ada/interfac.o \
+ ada/krunch.o \
+ ada/lib.o \
+ ada/link.o \
+ ada/namet.o \
+ ada/nlists.o \
+ ada/opt.o \
+ ada/osint-b.o \
+ ada/osint.o \
+ ada/output.o \
+ ada/raise.o \
+ ada/restrict.o \
+ ada/rident.o \
+ ada/s-addope.o \
+ ada/s-assert.o \
+ ada/s-carun8.o \
+ ada/s-casuti.o \
+ ada/s-conca2.o \
+ ada/s-conca3.o \
+ ada/s-conca4.o \
+ ada/s-conca5.o \
+ ada/s-conca6.o \
+ ada/s-conca7.o \
+ ada/s-conca8.o \
+ ada/s-conca9.o \
+ ada/s-crc32.o \
+ ada/s-crtl.o \
+ ada/s-excdeb.o \
+ ada/s-except.o \
+ ada/s-exctab.o \
+ ada/s-htable.o \
+ ada/s-imenne.o \
+ ada/s-imgenu.o \
+ ada/s-mastop.o \
+ ada/s-memory.o \
+ ada/s-os_lib.o \
+ ada/s-parame.o \
+ ada/s-restri.o \
+ ada/s-secsta.o \
+ ada/s-soflin.o \
+ ada/s-sopco3.o \
+ ada/s-sopco4.o \
+ ada/s-sopco5.o \
+ ada/s-stache.o \
+ ada/s-stalib.o \
+ ada/s-stoele.o \
+ ada/s-strhas.o \
+ ada/s-string.o \
+ ada/s-strops.o \
+ ada/s-traent.o \
+ ada/s-unstyp.o \
+ ada/s-utf_32.o \
+ ada/s-wchcnv.o \
+ ada/s-wchcon.o \
+ ada/s-wchjis.o \
+ ada/scans.o \
+ ada/scil_ll.o \
+ ada/scng.o \
+ ada/sdefault.o \
+ ada/seh_init.o \
+ ada/sem_aux.o \
+ ada/sinfo.o \
+ ada/sinput-c.o \
+ ada/sinput.o \
+ ada/snames.o \
+ ada/stand.o \
+ ada/stringt.o \
+ ada/style.o \
+ ada/styleg.o \
+ ada/stylesw.o \
+ ada/switch-b.o \
+ ada/switch.o \
+ ada/system.o \
+ ada/table.o \
+ ada/targext.o \
+ ada/targparm.o \
+ ada/tree_io.o \
+ ada/types.o \
+ ada/uintp.o \
+ ada/uname.o \
+ ada/urealp.o \
+ ada/widechar.o
+
+# Language-independent object files.
+ADA_BACKEND = $(BACKEND) attribs.o
+
+# List of target dependent sources, overridden below as necessary
+TARGET_ADA_SRCS =
+
+# Needs to be built with CC=gcc
+# Since the RTL should be built with the latest compiler, remove the
+# stamp target in the parent directory whenever gnat1 is rebuilt
+gnat1$(exeext): $(TARGET_ADA_SRCS) $(GNAT1_OBJS) $(ADA_BACKEND) libcommon-target.a $(LIBDEPS)
+ +$(GCC_LLINK) -o $@ $(GNAT1_OBJS) $(ADA_BACKEND) \
+ libcommon-target.a $(LIBS) $(SYSLIBS) $(BACKENDLIBS) $(CFLAGS)
+ $(RM) stamp-gnatlib2-rts stamp-tools
+
+gnatbind$(exeext): ada/b_gnatb.o $(CONFIG_H) $(GNATBIND_OBJS) ggc-none.o libcommon-target.a $(LIBDEPS)
+ +$(GCC_LINK) -o $@ ada/b_gnatb.o $(GNATBIND_OBJS) ggc-none.o libcommon-target.a $(LIBS) $(SYSLIBS) $(CFLAGS)
+
+# use target-gcc target-gnatmake target-gnatbind target-gnatlink
+gnattools: $(GCC_PARTS) $(CONFIG_H) prefix.o force
+ $(MAKE) -C ada $(ADA_TOOLS_FLAGS_TO_PASS) gnattools1
+ $(MAKE) -C ada $(ADA_TOOLS_FLAGS_TO_PASS) gnattools2
+
+regnattools:
+ $(MAKE) -C ada $(ADA_TOOLS_FLAGS_TO_PASS) gnattools1-re
+ $(MAKE) -C ada $(ADA_TOOLS_FLAGS_TO_PASS) gnattools2
+
+cross-gnattools: force
+ $(MAKE) -C ada $(ADA_TOOLS_FLAGS_TO_PASS) gnattools1-re
+ $(MAKE) -C ada $(ADA_TOOLS_FLAGS_TO_PASS) gnattools2
+ $(MAKE) -C ada $(ADA_TOOLS_FLAGS_TO_PASS) gnattools4
+
+canadian-gnattools: force
+ $(MAKE) -C ada $(ADA_TOOLS_FLAGS_TO_PASS) gnattools1-re
+ $(MAKE) -C ada $(ADA_TOOLS_FLAGS_TO_PASS) gnattools2
+ $(MAKE) -C ada $(ADA_TOOLS_FLAGS_TO_PASS) gnattools4
+
+gnatlib gnatlib-sjlj gnatlib-zcx gnatlib-shared: force
+ $(MAKE) -C ada $(COMMON_FLAGS_TO_PASS) \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS)" \
+ TARGET_LIBGCC2_CFLAGS="$(TARGET_LIBGCC2_CFLAGS)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ TRACE="$(TRACE)" \
+ FORCE_DEBUG_ADAFLAGS="$(FORCE_DEBUG_ADAFLAGS)" \
+ $@
+
+# use only for native compiler
+gnatlib_and_tools: gnatlib gnattools
+
+# Build hooks:
+
+ada.all.cross:
+ for tool in $(ADA_TOOLS) ; do \
+ if [ -f $$tool$(exeext) ] ; \
+ then \
+ $(MV) $$tool$(exeext) $$tool-cross$(exeext); \
+ fi; \
+ done
+
+ada.start.encap:
+ada.rest.encap:
+ada.man:
+ada.srcextra:
+ada.srcman:
+
+ada.tags: force
+ cd $(srcdir)/ada && etags -o TAGS.sub *.c *.h *.ads *.adb && \
+ etags --include TAGS.sub --include ../TAGS.sub
+
+
+# Generate documentation.
+
+ada/doctools/xgnatugn$(build_exeext): ada/xgnatugn.adb
+ -$(MKDIR) ada/doctools
+ $(CP) $^ ada/doctools
+ cd ada/doctools && gnatmake -q xgnatugn
+
+# Note that doc/gnat_ugn.texi and doc/projects.texi do not depend on
+# xgnatugn being built so we can distribute a pregenerated doc/gnat_ugn.info
+
+doc/gnat_ugn.texi: $(srcdir)/ada/gnat_ugn.texi $(srcdir)/ada/ug_words \
+ doc/projects.texi $(gcc_docdir)/include/gcc-common.texi gcc-vers.texi
+ $(MAKE) ada/doctools/xgnatugn$(build_exeext)
+ ada/doctools/xgnatugn unw $(srcdir)/ada/gnat_ugn.texi \
+ $(srcdir)/ada/ug_words doc/gnat_ugn.texi
+
+doc/projects.texi: $(srcdir)/ada/projects.texi
+ $(MAKE) ada/doctools/xgnatugn$(build_exeext)
+ ada/doctools/xgnatugn unw $(srcdir)/ada/projects.texi \
+ $(srcdir)/ada/ug_words doc/projects.texi
+
+doc/gnat_ugn.info: doc/gnat_ugn.texi \
+ $(gcc_docdir)/include/fdl.texi $(gcc_docdir)/include/gcc-common.texi \
+ gcc-vers.texi
+ if [ x$(BUILD_INFO) = xinfo ]; then \
+ rm -f $(@)*; \
+ $(MAKEINFO) $(MAKEINFOFLAGS) -I$(gcc_docdir)/include \
+ -I$(srcdir)/ada -o $@ $<; \
+ else true; fi
+
+doc/gnat_rm.info: ada/gnat_rm.texi $(gcc_docdir)/include/fdl.texi \
+ $(gcc_docdir)/include/gcc-common.texi gcc-vers.texi
+ if [ x$(BUILD_INFO) = xinfo ]; then \
+ rm -f $(@)*; \
+ $(MAKEINFO) $(MAKEINFOFLAGS) -I$(gcc_docdir)/include \
+ -I$(srcdir)/ada -o $@ $<; \
+ else true; fi
+
+doc/gnat-style.info: ada/gnat-style.texi $(gcc_docdir)/include/fdl.texi \
+ $(gcc_docdir)/include/gcc-common.texi gcc-vers.texi
+ if [ x$(BUILD_INFO) = xinfo ]; then \
+ rm -f $(@)*; \
+ $(MAKEINFO) $(MAKEINFOFLAGS) -I$(gcc_docdir)/include \
+ -I$(srcdir)/ada -o $@ $<; \
+ else true; fi
+
+ADA_INFOFILES = doc/gnat_ugn.info doc/gnat_ugn.texi \
+ doc/gnat_rm.info doc/gnat-style.info
+
+ada.info: $(ADA_INFOFILES)
+
+ada.srcinfo: $(ADA_INFOFILES)
+ -$(CP) $^ $(srcdir)/doc
+
+ada.install-info: $(DESTDIR)$(infodir)/gnat_ugn.info \
+ $(DESTDIR)$(infodir)/gnat_rm.info \
+ $(DESTDIR)$(infodir)/gnat-style.info
+
+ada.dvi: doc/gnat_ugn.dvi \
+ doc/gnat_rm.dvi doc/gnat-style.dvi
+
+ADA_PDFFILES = doc/gnat_ugn.pdf \
+ doc/gnat_rm.pdf doc/gnat-style.pdf
+
+ada.pdf: $(ADA_PDFFILES)
+
+ada.install-pdf: $(ADA_PDFFILES)
+ @$(NORMAL_INSTALL)
+ test -z "$(pdfdir)/gcc" || $(mkinstalldirs) "$(DESTDIR)$(pdfdir)/gcc"
+ @list='$(ADA_PDFFILES)'; for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ f=$(pdf__strip_dir) \
+ echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(pdfdir)/gcc/$$f'"; \
+ $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(pdfdir)/gcc/$$f"; \
+ done
+
+ada.html:
+
+ada.install-html:
+
+doc/gnat_ugn.dvi: doc/gnat_ugn.texi $(gcc_docdir)/include/fdl.texi \
+ $(gcc_docdir)/include/gcc-common.texi gcc-vers.texi
+ $(TEXI2DVI) -c -I $(abs_docdir)/include -o $@ $<
+
+doc/gnat_rm.dvi: ada/gnat_rm.texi $(gcc_docdir)/include/fdl.texi \
+ $(gcc_docdir)/include/gcc-common.texi gcc-vers.texi
+ $(TEXI2DVI) -c -I $(abs_docdir)/include -o $@ $<
+
+doc/gnat-style.dvi: ada/gnat-style.texi $(gcc_docdir)/include/fdl.texi
+ $(TEXI2DVI) -c -I $(abs_docdir)/include -o $@ $<
+
+doc/gnat_ugn.pdf: doc/gnat_ugn.texi $(gcc_docdir)/include/fdl.texi \
+ $(gcc_docdir)/include/gcc-common.texi gcc-vers.texi
+ $(TEXI2PDF) -c -I $(abs_docdir)/include -o $@ $<
+
+doc/gnat_rm.pdf: ada/gnat_rm.texi $(gcc_docdir)/include/fdl.texi \
+ $(gcc_docdir)/include/gcc-common.texi gcc-vers.texi
+ $(TEXI2PDF) -c -I $(abs_docdir)/include -o $@ $<
+
+doc/gnat-style.pdf: ada/gnat-style.texi $(gcc_docdir)/include/fdl.texi
+ $(TEXI2PDF) -c -I $(abs_docdir)/include -o $@ $<
+
+
+# Install hooks:
+# gnat1 is installed elsewhere as part of $(COMPILERS).
+
+# Install the binder program as gnatbind (native) or $(prefix)gnatbind
+# (cross). $(prefix) comes from the --program-prefix configure option,
+# or from the --target option if the former is not specified.
+# Do the same for the rest of the Ada tools (gnatchop, gnat, gnatkr,
+# gnatlink, gnatls, gnatmake, gnatname, gnatprep, gnatxref, gnatfind,
+# gnatclean, gnatsym).
+# gnatsym is only built on some platforms, including VMS.
+# gnatdll is only used on Windows.
+# vxaddr2line is only used for cross VxWorks ports (it calls the underlying
+# cross addr2line).
+ada.install-common:
+ $(MKDIR) $(DESTDIR)$(bindir)
+ -if [ -f gnat1$(exeext) ] ; \
+ then \
+ for tool in $(ADA_TOOLS) ; do \
+ install_name=`echo $$tool|sed '$(program_transform_name)'`$(exeext); \
+ $(RM) $(DESTDIR)$(bindir)/$$install_name; \
+ if [ -f $$tool-cross$(exeext) ] ; \
+ then \
+ $(INSTALL_PROGRAM) $$tool-cross$(exeext) $(DESTDIR)$(bindir)/$$install_name; \
+ else \
+ $(INSTALL_PROGRAM) $$tool$(exeext) $(DESTDIR)$(bindir)/$$install_name; \
+ fi ; \
+ done; \
+ $(RM) $(DESTDIR)$(bindir)/gnatdll$(exeext); \
+ $(INSTALL_PROGRAM) gnatdll$(exeext) $(DESTDIR)$(bindir)/gnatdll$(exeext); \
+ if [ -f vxaddr2line$(exeext) ] ; \
+ then \
+ $(RM) $(DESTDIR)$(bindir)/vxaddr2line$(exeext); \
+ $(INSTALL_PROGRAM) vxaddr2line$(exeext) $(DESTDIR)$(bindir)/vxaddr2line$(exeext); \
+ fi ; \
+ fi
+
+#
+# Finally, install the library
+#
+ -if [ -f gnat1$(exeext) ] ; \
+ then \
+ $(MAKE) $(COMMON_FLAGS_TO_PASS) $(ADA_FLAGS_TO_PASS) install-gnatlib; \
+ fi
+
+install-gnatlib:
+ $(MAKE) -C ada $(COMMON_FLAGS_TO_PASS) $(ADA_FLAGS_TO_PASS) install-gnatlib$(LIBGNAT_TARGET)
+
+install-gnatlib-obj:
+ $(MAKE) -C ada $(COMMON_FLAGS_TO_PASS) $(ADA_FLAGS_TO_PASS) install-gnatlib-obj
+
+ada.install-man:
+ada.install-plugin:
+
+ada.uninstall:
+ for tool in $(ADA_TOOLS) ; do \
+ install_name=`echo $$tool|sed '$(program_transform_name)'`$(exeext); \
+ -$(RM) $(DESTDIR)$(bindir)/$$install_name; \
+ done
+ -$(RM) $(DESTDIR)$(tooldir)/bin/gnatdll$(exeext)
+ -$(RM) $(DESTDIR)$(tooldir)/bin/vxaddr2line$(exeext)
+
+# Clean hooks:
+# A lot of the ancillary files are deleted by the main makefile.
+# We just have to delete files specific to us.
+
+ada.mostlyclean:
+ -$(RM) ada/*$(objext) ada/*.ali ada/b_gnat*.ads ada/b_gnat*.adb
+ -$(RM) ada/*$(coverageexts)
+ -$(RM) ada/sdefault.adb ada/stamp-sdefault ada/stamp-snames
+ -$(RMDIR) ada/tools
+ada.clean:
+ada.distclean:
+ -$(RM) ada/Makefile
+ -$(RM) gnatchop$(exeext)
+ -$(RM) gnat$(exeext)
+ -$(RM) gnatdll$(exeext)
+ -$(RM) gnatkr$(exeext)
+ -$(RM) gnatlink$(exeext)
+ -$(RM) gnatls$(exeext)
+ -$(RM) gnatmake$(exeext)
+ -$(RM) gnatname$(exeext)
+ -$(RM) gnatprep$(exeext)
+ -$(RM) gnatfind$(exeext)
+ -$(RM) gnatxref$(exeext)
+ -$(RM) gnatclean$(exeext)
+ -$(RM) gnatsym$(exeext)
+ -$(RM) ada/rts/*
+ -$(RMDIR) ada/rts
+ -$(RM) ada/tools/*
+ -$(RMDIR) ada/tools
+ada.maintainer-clean:
+ -$(RM) ada/sinfo.h
+ -$(RM) ada/einfo.h
+ -$(RM) ada/nmake.adb
+ -$(RM) ada/nmake.ads
+ -$(RM) ada/treeprs.ads
+ -$(RM) ada/snames.ads ada/snames.adb ada/snames.h
+
+# Stage hooks:
+# The main makefile has already created stage?/ada
+
+ada.stage1: stage1-start
+ -$(MV) ada/*$(objext) ada/*.ali ada/b_gnat*.ad* stage1/ada
+ -$(MV) ada/stamp-* stage1/ada
+ada.stage2: stage2-start
+ -$(MV) ada/*$(objext) ada/*.ali ada/b_gnat*.ad* stage2/ada
+ -$(MV) ada/stamp-* stage2/ada
+ada.stage3: stage3-start
+ -$(MV) ada/*$(objext) ada/*.ali ada/b_gnat*.ad* stage3/ada
+ -$(MV) ada/stamp-* stage3/ada
+ada.stage4: stage4-start
+ -$(MV) ada/*$(objext) ada/*.ali ada/b_gnat*.ad* stage4/ada
+ -$(MV) ada/stamp-* stage4/ada
+ada.stageprofile: stageprofile-start
+ -$(MV) ada/*$(objext) ada/*.ali ada/b_gnat*.ad* stageprofile/ada
+ -$(MV) ada/stamp-* stageprofile/ada
+ada.stagefeedback: stagefeedback-start
+ -$(MV) ada/*$(objext) ada/*.ali ada/b_gnat*.ad* stagefeedback/ada
+ -$(MV) ada/stamp-* stagefeedback/ada
+
+lang_checks += check-gnat
+
+check-ada: check-acats check-gnat
+check-ada-subtargets: check-acats-subtargets check-gnat-subtargets
+
+ACATSDIR = $(TESTSUITEDIR)/ada/acats
+
+check_acats_targets = $(patsubst %,check-acats%, 0 1 2)
+
+check-acats:
+ @test -d $(ACATSDIR) || mkdir -p $(ACATSDIR); \
+ rootme=`${PWD_COMMAND}`; export rootme; \
+ EXPECT=$(EXPECT); export EXPECT; \
+ if [ -z "$(CHAPTERS)" ] && [ "$(filter -j, $(MFLAGS))" = "-j" ]; \
+ then \
+ $(MAKE) $(check_acats_targets); \
+ for idx in 0 1 2; do \
+ mv -f $(ACATSDIR)$$idx/acats.sum $(ACATSDIR)$$idx/acats.sum.sep; \
+ mv -f $(ACATSDIR)$$idx/acats.log $(ACATSDIR)$$idx/acats.log.sep; \
+ done; \
+ $(SHELL) $(srcdir)/../contrib/dg-extract-results.sh \
+ $(ACATSDIR)0/acats.sum.sep $(ACATSDIR)1/acats.sum.sep \
+ $(ACATSDIR)2/acats.sum.sep > $(ACATSDIR)/acats.sum; \
+ $(SHELL) $(srcdir)/../contrib/dg-extract-results.sh -L \
+ $(ACATSDIR)0/acats.log.sep $(ACATSDIR)1/acats.log.sep \
+ $(ACATSDIR)2/acats.log.sep > $(ACATSDIR)/acats.log; \
+ exit 0; \
+ fi; \
+ testdir=`cd ${srcdir}/${ACATSDIR} && ${PWD_COMMAND}`; \
+ export testdir; cd $(ACATSDIR) && $(SHELL) $${testdir}/run_acats $(CHAPTERS)
+
+check-acats-subtargets:
+ @echo $(check_acats_targets)
+
+# Parallelized check-acats
+$(check_acats_targets): check-acats%:
+ test -d $(ACATSDIR)$* || mkdir -p $(ACATSDIR)$*; \
+ testdir=`cd ${srcdir}/${ACATSDIR} && ${PWD_COMMAND}`; \
+ case "$*" in \
+ 0) chapters="`cd $$testdir/tests; echo [a-b]* c[0-4]*`";; \
+ 1) chapters="`cd $$testdir/tests; echo c[5-9ab]*`";; \
+ 2) chapters="`cd $$testdir/tests; echo c[c-z]* [d-z]*`";; \
+ esac; \
+ export testdir; cd $(ACATSDIR)$* && $(SHELL) $${testdir}/run_acats $$chapters
+
+.PHONY: check-acats $(check_acats_targets)
+
+# Compiling object files from source files.
+
+# Ada language specific files.
+
+ada/b_gnat1.adb : $(GNAT1_ADA_OBJS)
+ # Old gnatbind do not allow a path for -o.
+ $(GNATBIND) $(ADA_INCLUDES) -o b_gnat1.adb -n ada/gnat1drv.ali
+ $(MV) b_gnat1.adb b_gnat1.ads ada/
+
+ada/b_gnat1.o : ada/b_gnat1.adb
+ # Do not use ADAFLAGS to get rid of -gnatg which generates a lot
+ # of style messages.
+ $(CC) -c $(CFLAGS) $(ADA_CFLAGS) -gnatp -gnatws $(ADA_INCLUDES) \
+ $< $(OUTPUT_OPTION)
+
+ada/b_gnatb.adb : $(GNATBIND_OBJS) ada/gnatbind.o ada/interfac.o
+ # Old gnatbind do not allow a path for -o.
+ $(GNATBIND) $(ADA_INCLUDES) -o b_gnatb.adb ada/gnatbind.ali
+ $(MV) b_gnatb.adb b_gnatb.ads ada/
+
+ada/b_gnatb.o : ada/b_gnatb.adb
+ $(CC) -c $(CFLAGS) $(ADA_CFLAGS) -gnatp -gnatws $(ADA_INCLUDES) \
+ $< $(OUTPUT_OPTION)
+
+include $(srcdir)/ada/Make-generated.in
+
+update-sources : ada/treeprs.ads ada/einfo.h ada/sinfo.h ada/nmake.adb \
+ ada/nmake.ads
+ $(RM) $(addprefix $(srcdir)/ada/,$(notdir $^))
+ $(CP) $^ $(srcdir)/ada
+
+ada/sdefault.o : ada/ada.ads ada/a-except.ads ada/a-unccon.ads \
+ ada/a-uncdea.ads ada/alloc.ads ada/debug.ads ada/hostparm.ads ada/namet.ads \
+ ada/opt.ads ada/osint.ads ada/output.ads ada/sdefault.ads ada/sdefault.adb \
+ ada/s-exctab.ads ada/s-memory.ads ada/s-os_lib.ads ada/s-parame.ads \
+ ada/s-stalib.ads ada/s-strops.ads ada/s-sopco3.ads ada/s-sopco4.ads \
+ ada/s-sopco5.ads ada/s-string.ads ada/s-traent.ads ada/s-unstyp.ads \
+ ada/s-wchcon.ads ada/system.ads ada/table.adb ada/table.ads ada/tree_io.ads \
+ ada/types.ads ada/unchdeal.ads ada/unchconv.ads
+
+# Special flags - see gcc-interface/Makefile.in for the template.
+
+ada/a-except.o : ada/a-except.adb ada/a-except.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) -O1 -fno-inline \
+ $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+ @$(ADA_DEPS)
+
+ada/s-excdeb.o : ada/s-excdeb.adb ada/s-excdeb.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) -O0 \
+ $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+ @$(ADA_DEPS)
+
+ada/s-assert.o : ada/s-assert.adb ada/s-assert.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) $(ADA_INCLUDES) \
+ $< $(OUTPUT_OPTION)
+ @$(ADA_DEPS)
+
+ada/a-tags.o : ada/a-tags.adb ada/a-tags.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) $(ADA_INCLUDES) \
+ $< $(OUTPUT_OPTION)
+ @$(ADA_DEPS)
+
+# Dependencies for windows specific tool (mdll)
+
+ada/mdll.o : ada/mdll.adb ada/mdll.ads ada/mdll-fil.ads ada/mdll-utl.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+
+ada/mdll-fil.o : ada/mdll-fil.adb ada/mdll.ads ada/mdll-fil.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+
+ada/mdll-utl.o : ada/mdll-utl.adb ada/mdll.ads ada/mdll-utl.ads ada/sdefault.ads ada/types.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+
+ada_generated_files = ada/sinfo.h ada/einfo.h ada/nmake.adb ada/nmake.ads \
+ ada/treeprs.ads ada/snames.ads ada/snames.adb ada/snames.h
+
+# When building from scratch we don't have dependency files, the only thing
+# we need to ensure is that the generated files are created first.
+$(GNAT1_ADA_OBJS) $(GNATBIND_OBJS): | $(ada_generated_files)
+
+# Manually include the auto-generated dependencies for the Ada host objects.
+ADA_DEPFILES = $(foreach obj,$(GNAT1_ADA_OBJS) $(GNATBIND_OBJS),\
+ $(dir $(obj))/$(DEPDIR)/$(patsubst %.o,%.Po,$(notdir $(obj))))
+-include $(ADA_DEPFILES)
+
+# Automatically include the auto-generated dependencies for the C host objects.
+ada_OBJS = $(GNAT1_C_OBJS)
diff --git a/gcc-4.9/gcc/ada/gcc-interface/Makefile.in b/gcc-4.9/gcc/ada/gcc-interface/Makefile.in
new file mode 100644
index 000000000..352d6550c
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/Makefile.in
@@ -0,0 +1,3103 @@
+# Makefile for GNU Ada Compiler (GNAT).
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+#This file is part of GCC.
+
+#GCC is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3, or (at your option)
+#any later version.
+
+#GCC is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with GCC; see the file COPYING3. If not see
+#<http://www.gnu.org/licenses/>.
+
+# The makefile built from this file lives in the language subdirectory.
+# Its purpose is to provide support for:
+#
+# 1) recursion where necessary, and only then (building .o's), and
+# 2) building and debugging cc1 from the language subdirectory, and
+# 3) nothing else.
+#
+# The parent makefile handles all other chores, with help from the
+# language makefile fragment, of course.
+#
+# The targets for external use are:
+# all, TAGS, ???mostlyclean, ???clean.
+
+# This makefile will only work with Gnu make.
+# The rules are written assuming a minimum subset of tools are available:
+#
+# Required:
+# MAKE: Only Gnu make will work.
+# MV: Must accept (at least) one, maybe wildcard, source argument,
+# a file or directory destination, and support creation/
+# modification date preservation. Gnu mv -f works.
+# RM: Must accept an arbitrary number of space separated file
+# arguments, or one wildcard argument. Gnu rm works.
+# RMDIR: Must delete a directory and all its contents. Gnu rm -rf works.
+# ECHO: Must support command line redirection. Any Unix-like
+# shell will typically provide this, otherwise a custom version
+# is trivial to write.
+# AR: Gnu ar works.
+# MKDIR: Gnu mkdir works.
+# CHMOD: Gnu chmod works.
+# true: Does nothing and returns a normal successful return code.
+# pwd: Prints the current directory on stdout.
+# cd: Change directory.
+#
+# Optional:
+# BISON: Gnu bison works.
+# FLEX: Gnu flex works.
+# Other miscellaneous tools for obscure targets.
+
+# Suppress smart makes who think they know how to automake Yacc files
+.y.c:
+
+# Variables that exist for you to override.
+# See below for how to change them for certain systems.
+
+# Various ways of specifying flags for compilations:
+# CFLAGS is for the user to override to, e.g., do a bootstrap with -O2.
+# BOOT_CFLAGS is the value of CFLAGS to pass
+# to the stage2 and stage3 compilations
+CFLAGS = -g
+BOOT_CFLAGS = -O $(CFLAGS)
+# These exists to be overridden by the t-* files, respectively.
+T_CFLAGS =
+
+CC = cc
+BISON = bison
+BISONFLAGS =
+ECHO = echo
+LEX = flex
+LEXFLAGS =
+CHMOD = chmod
+LN = ln
+LN_S = ln -s
+CP = cp -p
+MV = mv -f
+RM = rm -f
+RMDIR = rm -rf
+MKDIR = mkdir -p
+AR = ar
+AR_FLAGS = rc
+LS = ls
+RANLIB = @RANLIB@
+RANLIB_FLAGS = @ranlib_flags@
+AWK = @AWK@
+
+COMPILER = $(CC)
+COMPILER_FLAGS = $(CFLAGS)
+
+SHELL = @SHELL@
+PWD_COMMAND = $${PWDCMD-pwd}
+# How to copy preserving the date
+INSTALL_DATA_DATE = cp -p
+MAKEINFO = makeinfo
+TEXI2DVI = texi2dvi
+TEXI2PDF = texi2pdf
+GNATBIND_FLAGS = -static -x
+ADA_CFLAGS =
+ADAFLAGS = -W -Wall -gnatpg -gnata
+FORCE_DEBUG_ADAFLAGS = -g
+NO_SIBLING_ADAFLAGS = -fno-optimize-sibling-calls
+NO_REORDER_ADAFLAGS = -fno-toplevel-reorder
+GNATLIBFLAGS = -W -Wall -gnatpg -nostdinc
+GNATLIBCFLAGS = -g -O2
+PICFLAG_FOR_TARGET = @PICFLAG_FOR_TARGET@
+
+# Pretend that _Unwind_GetIPInfo is available for the target by default. This
+# should be autodetected during the configuration of libada and passed down to
+# here, but we need something for --disable-libada and hope for the best.
+GNATLIBCFLAGS_FOR_C = -W -Wall $(GNATLIBCFLAGS) \
+ -fexceptions -DIN_RTS -DHAVE_GETIPINFO
+ALL_ADAFLAGS = $(CFLAGS) $(ADA_CFLAGS) $(ADAFLAGS)
+THREAD_KIND = native
+THREADSLIB =
+GMEM_LIB =
+MISCLIB =
+OUTPUT_OPTION = -o $@
+
+objext = .o
+exeext =
+arext = .a
+soext = .so
+shext =
+hyphen = -
+
+# Define this as & to perform parallel make on a Sequent.
+# Note that this has some bugs, and it seems currently necessary
+# to compile all the gen* files first by hand to avoid erroneous results.
+P =
+
+# This is used instead of ALL_CFLAGS when compiling with GCC_FOR_TARGET.
+# It specifies -B./.
+# It also specifies -B$(tooldir)/ to find as and ld for a cross compiler.
+GCC_CFLAGS = $(INTERNAL_CFLAGS) $(T_CFLAGS) $(CFLAGS)
+
+# Tools to use when building a cross-compiler.
+# These are used because `configure' appends `cross-make'
+# to the makefile when making a cross-compiler.
+
+# We don't use cross-make. Instead we use the tools from the build tree,
+# if they are available.
+# program_transform_name and objdir are set by configure.in.
+program_transform_name =
+objdir = .
+
+target_alias=@target_alias@
+target=@target@
+target_cpu=@target_cpu@
+target_vendor=@target_vendor@
+target_os=@target_os@
+host_cpu=@host_cpu@
+host_vendor=@host_vendor@
+host_os=@host_os@
+target_cpu_default = @target_cpu_default@
+xmake_file = @xmake_file@
+tmake_file = @tmake_file@
+#version=`sed -e 's/.*\"\([^ \"]*\)[ \"].*/\1/' < $(srcdir)/version.c`
+#mainversion=`sed -e 's/.*\"\([0-9]*\.[0-9]*\).*/\1/' < $(srcdir)/version.c`
+
+# Directory where sources are, from where we are.
+VPATH = $(srcdir)/ada
+
+# Full path to top source directory
+# In particular this is used to access libgcc headers, so that references to
+# these headers from GNAT runtime objects have path names in debugging info
+# that are consistent with libgcc objects. Also used for other references to
+# the top source directory for consistency.
+ftop_srcdir := $(shell cd $(srcdir)/..;${PWD_COMMAND})
+
+fsrcdir := $(shell cd $(srcdir);${PWD_COMMAND})
+fsrcpfx := $(shell cd $(srcdir);${PWD_COMMAND})/
+fcurdir := $(shell ${PWD_COMMAND})
+fcurpfx := $(shell ${PWD_COMMAND})/
+
+# Top build directory, relative to here.
+top_builddir = ../..
+
+# Internationalization library.
+LIBINTL = @LIBINTL@
+LIBINTL_DEP = @LIBINTL_DEP@
+
+# Character encoding conversion library.
+LIBICONV = @LIBICONV@
+LIBICONV_DEP = @LIBICONV_DEP@
+
+# Any system libraries needed just for GNAT.
+SYSLIBS = @GNAT_LIBEXC@
+
+# List extra gnattools
+EXTRA_GNATTOOLS =
+
+# List of target dependent sources, overridden below as necessary
+TARGET_ADA_SRCS =
+
+# Type of tools build we are doing; default is not compiling tools.
+TOOLSCASE =
+
+# Multilib handling
+MULTISUBDIR =
+RTSDIR = rts$(subst /,_,$(MULTISUBDIR))
+
+# Link flags used to build gnat tools. By default we prefer to statically
+# link with libgcc to avoid a dependency on shared libgcc (which is tricky
+# to deal with as it may conflict with the libgcc provided by the system).
+GCC_LINK_FLAGS=-static-libstdc++ -static-libgcc
+
+# End of variables for you to override.
+
+all: all.indirect
+
+# This tells GNU Make version 3 not to put all variables in the environment.
+.NOEXPORT:
+
+# target overrides
+ifneq ($(tmake_file),)
+include $(tmake_file)
+endif
+
+# host overrides
+ifneq ($(xmake_file),)
+include $(xmake_file)
+endif
+
+# Now figure out from those variables how to compile and link.
+
+all.indirect: Makefile ../gnat1$(exeext)
+
+# IN_GCC is meant to distinguish between code compiled into GCC itself, i.e.
+# for the host, and the rest. But we also use it for the tools (link.c) and
+# even break the host/target wall by using it for the library (targext.c).
+# autoconf inserts -DCROSS_DIRECTORY_STRUCTURE if we are building a cross
+# compiler which does not use the native libraries and headers.
+INTERNAL_CFLAGS = @CROSS@ -DIN_GCC
+
+# This is the variable actually used when we compile.
+ALL_CFLAGS = $(INTERNAL_CFLAGS) $(T_CFLAGS) $(CFLAGS)
+
+# Likewise.
+ALL_CPPFLAGS = $(CPPFLAGS)
+
+# Used with $(COMPILER).
+ALL_COMPILERFLAGS = $(ALL_CFLAGS)
+
+# This is where we get libiberty.a from.
+LIBIBERTY = ../../libiberty/libiberty.a
+
+# We need to link against libbacktrace because diagnostic.c in
+# libcommon.a uses it.
+LIBBACKTRACE = ../../libbacktrace/.libs/libbacktrace.a
+
+# How to link with both our special library facilities
+# and the system's installed libraries.
+LIBS = $(LIBINTL) $(LIBICONV) $(LIBBACKTRACE) $(LIBIBERTY) $(SYSLIBS)
+LIBDEPS = $(LIBINTL_DEP) $(LIBICONV_DEP) $(LIBBACKTRACE) $(LIBIBERTY)
+# Default is no TGT_LIB; one might be passed down or something
+TGT_LIB =
+TOOLS_LIBS = ../link.o ../targext.o ../../ggc-none.o ../../libcommon-target.a \
+ ../../libcommon.a ../../../libcpp/libcpp.a $(LIBGNAT) $(LIBINTL) $(LIBICONV) \
+ ../$(LIBBACKTRACE) ../$(LIBIBERTY) $(SYSLIBS) $(TGT_LIB)
+
+# Specify the directories to be searched for header files.
+# Both . and srcdir are used, in that order,
+# so that tm.h and config.h will be found in the compilation
+# subdirectory rather than in the source directory.
+INCLUDES = -I- -I. -I.. -I$(srcdir)/ada -I$(srcdir) -I$(ftop_srcdir)/include $(GMPINC)
+
+ADA_INCLUDES = -I- -I. -I$(srcdir)/ada
+
+# Likewise, but valid for subdirectories of the current dir.
+# FIXME: for VxWorks, we cannot add $(fsrcdir) because the regs.h file in
+# that directory conflicts with a system header file.
+ifneq ($(findstring vxworks,$(target_os)),)
+ INCLUDES_FOR_SUBDIR = -iquote . -iquote .. -iquote ../.. \
+ -iquote $(fsrcdir)/ada \
+ -I$(ftop_srcdir)/include $(GMPINC)
+else
+ INCLUDES_FOR_SUBDIR = -iquote . -iquote .. -iquote ../.. \
+ -iquote $(fsrcdir)/ada -iquote $(fsrcdir) \
+ -I$(ftop_srcdir)/include $(GMPINC)
+endif
+
+ADA_INCLUDES_FOR_SUBDIR = -I. -I$(fsrcdir)/ada
+
+# Avoid a lot of time thinking about remaking Makefile.in and *.def.
+.SUFFIXES: .in .def
+
+# Say how to compile Ada programs.
+.SUFFIXES: .ada .adb .ads .asm
+
+# Always use -I$(srcdir)/config when compiling.
+.asm.o:
+ $(CC) -c -x assembler $< $(OUTPUT_OPTION)
+
+.c.o:
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ADA_CFLAGS) $(ALL_CPPFLAGS) \
+ $(INCLUDES) $< $(OUTPUT_OPTION)
+
+.adb.o:
+ $(CC) -c $(ALL_ADAFLAGS) $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+
+.ads.o:
+ $(CC) -c $(ALL_ADAFLAGS) $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+
+# how to regenerate this file
+Makefile: ../config.status $(srcdir)/ada/gcc-interface/Makefile.in $(srcdir)/ada/Makefile.in $(srcdir)/version.c
+ cd ..; \
+ LANGUAGES="$(CONFIG_LANGUAGES)" \
+ CONFIG_HEADERS= \
+ CONFIG_FILES="ada/gcc-interface/Makefile ada/Makefile" $(SHELL) config.status
+
+# This tells GNU make version 3 not to export all the variables
+# defined in this file into the environment.
+.NOEXPORT:
+
+# Lists of files for various purposes.
+
+GNATLINK_OBJS = gnatlink.o \
+ a-except.o ali.o alloc.o butil.o casing.o csets.o debug.o fmap.o fname.o \
+ gnatvsn.o hostparm.o indepsw.o interfac.o i-c.o i-cstrin.o namet.o opt.o \
+ osint.o output.o rident.o s-exctab.o s-secsta.o s-stalib.o s-stoele.o \
+ sdefault.o snames.o stylesw.o switch.o system.o table.o targparm.o tree_io.o \
+ types.o validsw.o widechar.o
+
+GNATMAKE_OBJS = a-except.o ali.o ali-util.o aspects.o s-casuti.o alloc.o \
+ atree.o binderr.o butil.o casing.o csets.o debug.o elists.o einfo.o errout.o \
+ erroutc.o errutil.o err_vars.o fmap.o fname.o fname-uf.o fname-sf.o \
+ gnatmake.o gnatvsn.o hostparm.o interfac.o i-c.o i-cstrin.o krunch.o lib.o \
+ make.o makeusg.o makeutl.o mlib.o mlib-fil.o mlib-prj.o mlib-tgt.o \
+ mlib-tgt-specific.o mlib-utl.o namet.o nlists.o opt.o osint.o osint-m.o \
+ output.o prj.o prj-attr.o prj-attr-pm.o prj-com.o prj-dect.o prj-env.o \
+ prj-conf.o prj-pp.o prj-err.o prj-ext.o prj-nmsc.o prj-pars.o prj-part.o \
+ prj-proc.o prj-strt.o prj-tree.o prj-util.o restrict.o rident.o s-exctab.o \
+ s-secsta.o s-stalib.o s-stoele.o scans.o scng.o sdefault.o sfn_scan.o \
+ s-purexc.o s-htable.o scil_ll.o sem_aux.o sinfo.o sinput.o sinput-c.o \
+ sinput-p.o snames.o stand.o stringt.o styleg.o stylesw.o system.o validsw.o \
+ switch.o switch-m.o table.o targparm.o tempdir.o tree_io.o types.o uintp.o \
+ uname.o urealp.o usage.o widechar.o \
+ $(EXTRA_GNATMAKE_OBJS)
+
+# Make arch match the current multilib so that the RTS selection code
+# picks up the right files. For a given target this must be coherent
+# with MULTILIB_DIRNAMES defined in gcc/config/target/t-*.
+
+ifeq ($(strip $(filter-out %x86_64, $(target_cpu))),)
+ ifeq ($(strip $(MULTISUBDIR)),/32)
+ target_cpu:=i686
+ else
+ ifeq ($(strip $(MULTISUBDIR)),/x32)
+ target_cpu:=x32
+ endif
+ endif
+endif
+
+# ???: handle more multilib targets
+
+# LIBGNAT_TARGET_PAIRS is a list of pairs of filenames.
+# The members of each pair must be separated by a '<' and no whitespace.
+# Each pair must be separated by some amount of whitespace from the following
+# pair.
+
+# Non-tasking case:
+
+LIBGNAT_TARGET_PAIRS = \
+a-intnam.ads<a-intnam-dummy.ads \
+s-inmaop.adb<s-inmaop-dummy.adb \
+s-intman.adb<s-intman-dummy.adb \
+s-osinte.ads<s-osinte-dummy.ads \
+s-osprim.adb<s-osprim-posix.adb \
+s-taprop.adb<s-taprop-dummy.adb \
+s-taspri.ads<s-taspri-dummy.ads
+
+# When using the GCC exception handling mechanism, we need to use an
+# alternate body for a-exexpr.adb (a-exexpr-gcc.adb)
+
+EH_MECHANISM=
+
+# Default shared object option. Note that we rely on the fact that the "soname"
+# option will always be present and last in this flag, so that we can have
+# $(SO_OPTS)libgnat-x.xx
+
+SO_OPTS = -Wl,-soname,
+
+# Default gnatlib-shared target.
+# By default, equivalent to gnatlib.
+# Set to gnatlib-shared-default, gnatlib-shared-dual, or a platform specific
+# target when supported.
+GNATLIB_SHARED = gnatlib
+
+# default value for gnatmake's target dependent file
+MLIB_TGT = mlib-tgt
+
+# By default, build socket support units. On platforms that do not support
+# sockets, reset this variable to empty and add DUMMY_SOCKETS_TARGET_PAIRS
+# to LIBGNAT_TARGET_PAIRS.
+
+GNATRTL_SOCKETS_OBJS = g-soccon$(objext) g-socket$(objext) g-socthi$(objext) \
+ g-soliop$(objext) g-sothco$(objext)
+
+DUMMY_SOCKETS_TARGET_PAIRS = \
+ g-socket.adb<g-socket-dummy.adb \
+ g-socket.ads<g-socket-dummy.ads \
+ g-socthi.adb<g-socthi-dummy.adb \
+ g-socthi.ads<g-socthi-dummy.ads \
+ g-sothco.adb<g-sothco-dummy.adb \
+ g-sothco.ads<g-sothco-dummy.ads
+
+# On platforms where atomic increment/decrement operations are supported,
+# special version of Ada.Strings.Unbounded package can be used.
+
+ATOMICS_TARGET_PAIRS = \
+ a-coinho.adb<a-coinho-shared.adb \
+ a-coinho.ads<a-coinho-shared.ads \
+ a-stunau.adb<a-stunau-shared.adb \
+ a-suteio.adb<a-suteio-shared.adb \
+ a-strunb.ads<a-strunb-shared.ads \
+ a-strunb.adb<a-strunb-shared.adb \
+ a-stwiun.adb<a-stwiun-shared.adb \
+ a-stwiun.ads<a-stwiun-shared.ads \
+ a-swunau.adb<a-swunau-shared.adb \
+ a-swuwti.adb<a-swuwti-shared.adb \
+ a-stzunb.adb<a-stzunb-shared.adb \
+ a-stzunb.ads<a-stzunb-shared.ads \
+ a-szunau.adb<a-szunau-shared.adb \
+ a-szuzti.adb<a-szuzti-shared.adb
+
+ATOMICS_BUILTINS_TARGET_PAIRS = \
+ s-atocou.adb<s-atocou-builtin.adb
+
+# Special version of units for x86 and x86-64 platforms.
+
+X86_TARGET_PAIRS = \
+ a-numaux.ads<a-numaux-x86.ads \
+ a-numaux.adb<a-numaux-x86.adb \
+ s-atocou.adb<s-atocou-x86.adb
+
+X86_64_TARGET_PAIRS = \
+ a-numaux.ads<a-numaux-x86.ads \
+ a-numaux.adb<a-numaux-x86.adb \
+ s-atocou.adb<s-atocou-builtin.adb
+
+LIB_VERSION = $(strip $(shell grep ' Library_Version :' $(fsrcpfx)ada/gnatvsn.ads | sed -e 's/.*"\(.*\)".*/\1/'))
+
+# Additionnal object files from C source to be added to libgnat.
+EXTRA_LIBGNAT_OBJS=
+# Additionnal C source file to be added to libgnat without corresponding object
+# file (included files).
+EXTRA_LIBGNAT_SRCS=
+
+# $(filter-out PATTERN...,TEXT) removes all PATTERN words from TEXT.
+# $(strip STRING) removes leading and trailing spaces from STRING.
+# If what's left is null then it's a match.
+
+# m68k VxWorks
+ifeq ($(strip $(filter-out m68k% wrs vx%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-vxworks.ads \
+ a-numaux.ads<a-numaux-vxworks.ads \
+ s-inmaop.adb<s-inmaop-vxworks.adb \
+ s-interr.adb<s-interr-hwint.adb \
+ s-intman.ads<s-intman-vxworks.ads \
+ s-intman.adb<s-intman-vxworks.adb \
+ s-osinte.adb<s-osinte-vxworks.adb \
+ s-osinte.ads<s-osinte-vxworks.ads \
+ s-osprim.adb<s-osprim-vxworks.adb \
+ s-parame.ads<s-parame-vxworks.ads \
+ s-parame.adb<s-parame-vxworks.adb \
+ s-stchop.ads<s-stchop-limit.ads \
+ s-stchop.adb<s-stchop-vxworks.adb \
+ s-taprop.adb<s-taprop-vxworks.adb \
+ s-tasinf.ads<s-tasinf-vxworks.ads \
+ s-taspri.ads<s-taspri-vxworks.ads \
+ s-tpopsp.adb<s-tpopsp-vxworks.adb \
+ s-vxwork.ads<s-vxwork-m68k.ads \
+ g-socthi.ads<g-socthi-vxworks.ads \
+ g-socthi.adb<g-socthi-vxworks.adb \
+ g-stsifd.adb<g-stsifd-sockets.adb \
+ system.ads<system-vxworks-m68k.ads
+
+ TOOLS_TARGET_PAIRS=mlib-tgt-specific.adb<mlib-tgt-specific-vxworks.adb
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=i-vxwork.o i-vxwoio.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-vxwork.o s-vxwext.o
+
+ EXTRA_LIBGNAT_OBJS+=vx_stack_info.o
+
+ ifeq ($(strip $(filter-out yes,$(TRACE))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-traces.adb<s-traces-default.adb \
+ s-tratas.adb<s-tratas-default.adb \
+ s-trafor.adb<s-trafor-default.adb \
+ s-trafor.ads<s-trafor-default.ads \
+ s-tfsetr.adb<s-tfsetr-vxworks.adb
+ endif
+endif
+
+# PowerPC and e500v2 VxWorks
+ifeq ($(strip $(filter-out powerpc% wrs vxworks,$(target_cpu) $(target_vendor) $(target_os))),)
+
+ ifeq ($(strip $(filter-out e500%, $(target_alias))),)
+ ARCH_STR=e500
+ else
+ ARCH_STR=ppc
+ endif
+
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-vxworks.ads \
+ a-numaux.ads<a-numaux-vxworks.ads \
+ s-inmaop.adb<s-inmaop-vxworks.adb \
+ s-intman.ads<s-intman-vxworks.ads \
+ s-intman.adb<s-intman-vxworks.adb \
+ s-osinte.ads<s-osinte-vxworks.ads \
+ s-osinte.adb<s-osinte-vxworks.adb \
+ s-osprim.adb<s-osprim-vxworks.adb \
+ s-parame.ads<s-parame-vxworks.ads \
+ s-parame.adb<s-parame-vxworks.adb \
+ s-taprop.adb<s-taprop-vxworks.adb \
+ s-tasinf.ads<s-tasinf-vxworks.ads \
+ s-taspri.ads<s-taspri-vxworks.ads \
+ s-vxwork.ads<s-vxwork-ppc.ads \
+ g-socthi.ads<g-socthi-vxworks.ads \
+ g-socthi.adb<g-socthi-vxworks.adb \
+ g-stsifd.adb<g-stsifd-sockets.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ # VxWorks 5 and 6 both use the same target triplet making them
+ # indistinguishable in the context of this make file. Package
+ # System.Stack_Checking.Operations is not needed on VxWorks 6 as it leads to
+ # an undefined symbol when building a dynamic shared library. To alleviate
+ # this problem and distinguish this case, we use the THREAD_KIND and include
+ # the package only in kernel mode.
+
+ ifeq ($(strip $(filter-out default,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-stchop.ads<s-stchop-limit.ads \
+ s-stchop.adb<s-stchop-vxworks.adb
+ endif
+
+ TOOLS_TARGET_PAIRS=\
+ mlib-tgt-specific.adb<mlib-tgt-specific-vxworks.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ ifeq ($(strip $(filter-out yes,$(TRACE))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-traces.adb<s-traces-default.adb \
+ s-trafor.adb<s-trafor-default.adb \
+ s-trafor.ads<s-trafor-default.ads \
+ s-tratas.adb<s-tratas-default.adb \
+ s-tfsetr.adb<s-tfsetr-vxworks.adb
+ endif
+
+ ifeq ($(strip $(filter-out rtp,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-vxwext.ads<s-vxwext-rtp.ads \
+ s-vxwext.adb<s-vxwext-rtp.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks-rtp.adb \
+ system.ads<system-vxworks-$(ARCH_STR)-rtp.ads
+ else
+ ifeq ($(strip $(filter-out rtp-smp,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-vxwext.ads<s-vxwext-rtp.ads \
+ s-vxwext.adb<s-vxwext-rtp-smp.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks-tls.adb \
+ system.ads<system-vxworks-$(ARCH_STR)-rtp.ads
+
+ EXTRA_LIBGNAT_OBJS+=affinity.o
+ else
+ ifeq ($(strip $(filter-out kernel-smp,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-interr.adb<s-interr-hwint.adb \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks-tls.adb \
+ s-vxwext.ads<s-vxwext-kernel.ads \
+ s-vxwext.adb<s-vxwext-kernel-smp.adb \
+ system.ads<system-vxworks-$(ARCH_STR)-kernel.ads
+
+ EH_MECHANISM=-gcc
+ EXTRA_LIBGNAT_OBJS+=affinity.o
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ s-interr.adb<s-interr-hwint.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks.adb
+
+ ifeq ($(strip $(filter-out kernel,$(THREAD_KIND))),)
+ EH_MECHANISM=-gcc
+ LIBGNAT_TARGET_PAIRS += \
+ s-vxwext.ads<s-vxwext-kernel.ads \
+ s-vxwext.adb<s-vxwext-kernel.adb \
+ system.ads<system-vxworks-$(ARCH_STR)-kernel.ads
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ system.ads<system-vxworks-ppc.ads
+ endif
+ endif
+ EXTRA_GNATRTL_NONTASKING_OBJS=i-vxwork.o i-vxwoio.o
+ EXTRA_LIBGNAT_OBJS+=sigtramp-ppcvxw.o
+ endif
+ endif
+
+ EXTRA_GNATRTL_TASKING_OBJS += s-vxwork.o s-vxwext.o
+
+ EXTRA_LIBGNAT_OBJS+=vx_stack_info.o
+endif
+
+# PowerPC and e500v2 VxWorks 653
+ifeq ($(strip $(filter-out powerpc% wrs vxworksae,$(target_cpu) $(target_vendor) $(target_os))),)
+ # target pairs for vthreads runtime
+ LIBGNAT_TARGET_PAIRS = \
+ a-elchha.adb<a-elchha-vxworks-ppc-full.adb \
+ a-intnam.ads<a-intnam-vxworks.ads \
+ a-numaux.ads<a-numaux-vxworks.ads \
+ g-io.adb<g-io-vxworks-ppc-cert.adb \
+ s-inmaop.adb<s-inmaop-vxworks.adb \
+ s-interr.adb<s-interr-hwint.adb \
+ s-intman.ads<s-intman-vxworks.ads \
+ s-intman.adb<s-intman-vxworks.adb \
+ s-osinte.adb<s-osinte-vxworks.adb \
+ s-osinte.ads<s-osinte-vxworks.ads \
+ s-osprim.adb<s-osprim-vxworks.adb \
+ s-parame.ads<s-parame-ae653.ads \
+ s-parame.adb<s-parame-vxworks.adb \
+ s-taprop.adb<s-taprop-vxworks.adb \
+ s-tasinf.ads<s-tasinf-vxworks.ads \
+ s-taspri.ads<s-taspri-vxworks.ads \
+ s-tpopsp.adb<s-tpopsp-vxworks.adb \
+ s-vxwext.adb<s-vxwext-noints.adb \
+ s-vxwext.ads<s-vxwext-vthreads.ads \
+ s-vxwork.ads<s-vxwork-ppc.ads \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ ifeq ($(strip $(filter-out e500%, $(target_alias))),)
+ LIBGNAT_TARGET_PAIRS += system.ads<system-vxworks-e500-vthread.ads
+ else
+ LIBGNAT_TARGET_PAIRS += system.ads<system-vxworks-ppc-vthread.ads
+ endif
+
+ TOOLS_TARGET_PAIRS=\
+ mlib-tgt-specific.adb<mlib-tgt-specific-vxworks.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=i-vxwork.o i-vxwoio.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-vxwork.o s-vxwext.o
+
+ EXTRA_LIBGNAT_OBJS+=sigtramp-ppcvxw.o
+
+ # Extra pairs for the vthreads runtime
+ ifeq ($(strip $(filter-out vthreads,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-thread.adb<s-thread-ae653.adb \
+ $(DUMMY_SOCKETS_TARGET_PAIRS)
+
+ GNATRTL_SOCKETS_OBJS =
+ EXTRA_GNATRTL_NONTASKING_OBJS += s-thread.o
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ g-socthi.ads<g-socthi-vxworks.ads \
+ g-socthi.adb<g-socthi-vxworks.adb \
+ g-stsifd.adb<g-stsifd-sockets.adb
+ endif
+
+ ifeq ($(strip $(filter-out yes,$(TRACE))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-traces.adb<s-traces-default.adb \
+ s-trafor.adb<s-trafor-default.adb \
+ s-trafor.ads<s-trafor-default.ads \
+ s-tratas.adb<s-tratas-default.adb \
+ s-tfsetr.adb<s-tfsetr-vxworks.adb
+ endif
+endif
+
+# PowerPC and e500v2 VxWorks MILS
+ifeq ($(strip $(filter-out powerpc% wrs vxworksmils,$(target_cpu) $(target_vendor) $(target_os))),)
+ # target pairs for vthreads runtime
+ LIBGNAT_TARGET_PAIRS = \
+ a-elchha.adb<a-elchha-vx6-raven-cert.adb \
+ a-intnam.ads<a-intnam-vxworks.ads \
+ a-numaux.ads<a-numaux-vxworks.ads \
+ g-io.adb<g-io-vxworks-ppc-cert.adb \
+ s-inmaop.adb<s-inmaop-vxworks.adb \
+ s-interr.adb<s-interr-hwint.adb \
+ s-intman.ads<s-intman-vxworks.ads \
+ s-intman.adb<s-intman-vxworks.adb \
+ s-osinte.adb<s-osinte-vxworks.adb \
+ s-osinte.ads<s-osinte-vxworks.ads \
+ s-osprim.adb<s-osprim-vxworks.adb \
+ s-parame.ads<s-parame-ae653.ads \
+ s-parame.adb<s-parame-vxworks.adb \
+ s-stchop.adb<s-stchop-vxworks.adb \
+ s-stchop.ads<s-stchop-limit.ads \
+ s-taprop.adb<s-taprop-vxworks.adb \
+ s-tasinf.ads<s-tasinf-vxworks.ads \
+ s-taspri.ads<s-taspri-vxworks.ads \
+ s-thread.adb<s-thread-ae653.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks.adb \
+ s-vxwork.ads<s-vxwork-ppc.ads \
+ system.ads<system-vxworks-ppc-mils.ads \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS) \
+ $(DUMMY_SOCKETS_TARGET_PAIRS)
+
+ TOOLS_TARGET_PAIRS=\
+ mlib-tgt-specific.adb<mlib-tgt-specific-vxworks.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=i-vxwork.o i-vxwoio.o s-thread.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-vxwork.o s-vxwext.o
+
+ EXTRA_LIBGNAT_OBJS+=vx_stack_info.o sigtramp-ppcvxw.o
+ GNATRTL_SOCKETS_OBJS =
+
+ ifeq ($(strip $(filter-out yes,$(TRACE))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-traces.adb<s-traces-default.adb \
+ s-trafor.adb<s-trafor-default.adb \
+ s-trafor.ads<s-trafor-default.ads \
+ s-tratas.adb<s-tratas-default.adb \
+ s-tfsetr.adb<s-tfsetr-vxworks.adb
+ endif
+endif
+
+# VxWorksae / VxWorks 653 for x86 (vxsim) - ?? VxWorks mils not implemented
+ifeq ($(strip $(filter-out %86 wrs vxworksae vxworksmils,$(target_cpu) $(target_vendor) $(target_os))),)
+ # target pairs for kernel + vthreads runtime
+ LIBGNAT_TARGET_PAIRS = \
+ a-elchha.adb<a-elchha-vxworks-ppc-full.adb \
+ a-intnam.ads<a-intnam-vxworks.ads \
+ a-numaux.ads<a-numaux-vxworks.ads \
+ g-io.adb<g-io-vxworks-ppc-cert.adb \
+ s-inmaop.adb<s-inmaop-vxworks.adb \
+ s-interr.adb<s-interr-hwint.adb \
+ s-intman.ads<s-intman-vxworks.ads \
+ s-intman.adb<s-intman-vxworks.adb \
+ s-osinte.adb<s-osinte-vxworks.adb \
+ s-osinte.ads<s-osinte-vxworks.ads \
+ s-osprim.adb<s-osprim-vxworks.adb \
+ s-parame.ads<s-parame-ae653.ads \
+ s-parame.adb<s-parame-vxworks.adb \
+ s-taprop.adb<s-taprop-vxworks.adb \
+ s-tasinf.ads<s-tasinf-vxworks.ads \
+ s-taspri.ads<s-taspri-vxworks.ads \
+ s-tpopsp.adb<s-tpopsp-vxworks.adb \
+ s-vxwext.adb<s-vxwext-noints.adb \
+ s-vxwext.ads<s-vxwext-vthreads.ads \
+ s-vxwork.ads<s-vxwork-x86.ads \
+ system.ads<system-vxworks-x86-vthread.ads \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ TOOLS_TARGET_PAIRS=\
+ mlib-tgt-specific.adb<mlib-tgt-specific-vxworks.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=i-vxwork.o i-vxwoio.o s-thread.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-vxwork.o s-vxwext.o
+
+ EXTRA_LIBGNAT_OBJS+=vx_stack_info.o
+ GNATRTL_SOCKETS_OBJS =
+
+ # Extra pairs for the vthreads runtime
+ ifeq ($(strip $(filter-out vthreads,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-thread.adb<s-thread-ae653.adb \
+ $(DUMMY_SOCKETS_TARGET_PAIRS)
+
+ GNATRTL_SOCKETS_OBJS =
+ EXTRA_GNATRTL_NONTASKING_OBJS += s-thread.o
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ g-socthi.ads<g-socthi-vxworks.ads \
+ g-socthi.adb<g-socthi-vxworks.adb \
+ g-stsifd.adb<g-stsifd-sockets.adb
+ endif
+
+ ifeq ($(strip $(filter-out yes,$(TRACE))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-traces.adb<s-traces-default.adb \
+ s-trafor.adb<s-trafor-default.adb \
+ s-trafor.ads<s-trafor-default.ads \
+ s-tratas.adb<s-tratas-default.adb \
+ s-tfsetr.adb<s-tfsetr-vxworks.adb
+ endif
+endif
+
+# Sparc VxWorks
+ifeq ($(strip $(filter-out sparc% wrs vx%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-vxworks.ads \
+ a-numaux.ads<a-numaux-vxworks.ads \
+ s-inmaop.adb<s-inmaop-vxworks.adb \
+ s-interr.adb<s-interr-hwint.adb \
+ s-intman.ads<s-intman-vxworks.ads \
+ s-intman.adb<s-intman-vxworks.adb \
+ s-osinte.adb<s-osinte-vxworks.adb \
+ s-osinte.ads<s-osinte-vxworks.ads \
+ s-osprim.adb<s-osprim-vxworks.adb \
+ s-parame.ads<s-parame-vxworks.ads \
+ s-parame.adb<s-parame-vxworks.adb \
+ s-stchop.ads<s-stchop-limit.ads \
+ s-stchop.adb<s-stchop-vxworks.adb \
+ s-taprop.adb<s-taprop-vxworks.adb \
+ s-tasinf.ads<s-tasinf-vxworks.ads \
+ s-taspri.ads<s-taspri-vxworks.ads \
+ s-tpopsp.adb<s-tpopsp-vxworks.adb \
+ g-socthi.ads<g-socthi-vxworks.ads \
+ g-socthi.adb<g-socthi-vxworks.adb \
+ g-stsifd.adb<g-stsifd-sockets.adb
+
+ TOOLS_TARGET_PAIRS=\
+ mlib-tgt-specific.adb<mlib-tgt-specific-vxworks.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ ifeq ($(strip $(filter-out sparc64 sparcv9, $(target_cpu))),)
+ # 64-bits
+ LIBGNAT_TARGET_PAIRS += \
+ s-vxwork.ads<s-vxwork-sparcv9.ads \
+ system.ads<system-vxworks-sparcv9.ads
+ else
+ # 32-bits
+ LIBGNAT_TARGET_PAIRS += \
+ s-vxwork.ads<s-vxwork-sparc.ads \
+ system.ads<system-vxworks-sparc-kernel.ads
+ endif
+
+ ifeq ($(strip $(filter-out kernel,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-vxwext.ads<s-vxwext-kernel.ads \
+ s-vxwext.adb<s-vxwext-kernel.adb
+ endif
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=i-vxwork.o i-vxwoio.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-vxwork.o s-vxwext.o
+
+ EXTRA_LIBGNAT_OBJS+=vx_stack_info.o
+endif
+
+# x86 VxWorks
+ifeq ($(strip $(filter-out %86 wrs vxworks,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-vxworks.ads \
+ i-vxwork.ads<i-vxwork-x86.ads \
+ s-osinte.adb<s-osinte-vxworks.adb \
+ s-osinte.ads<s-osinte-vxworks.ads \
+ s-inmaop.adb<s-inmaop-vxworks.adb \
+ s-intman.ads<s-intman-vxworks.ads \
+ s-intman.adb<s-intman-vxworks.adb \
+ s-osprim.adb<s-osprim-vxworks.adb \
+ s-parame.ads<s-parame-vxworks.ads \
+ s-parame.adb<s-parame-vxworks.adb \
+ s-stchop.ads<s-stchop-limit.ads \
+ s-stchop.adb<s-stchop-vxworks.adb \
+ s-taprop.adb<s-taprop-vxworks.adb \
+ s-tasinf.ads<s-tasinf-vxworks.ads \
+ s-taspri.ads<s-taspri-vxworks.ads \
+ s-vxwork.ads<s-vxwork-x86.ads \
+ g-socthi.ads<g-socthi-vxworks.ads \
+ g-socthi.adb<g-socthi-vxworks.adb \
+ g-stsifd.adb<g-stsifd-sockets.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(X86_TARGET_PAIRS)
+
+ TOOLS_TARGET_PAIRS=\
+ mlib-tgt-specific.adb<mlib-tgt-specific-vxworks.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ ifeq ($(strip $(filter-out yes,$(TRACE))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-traces.adb<s-traces-default.adb \
+ s-trafor.adb<s-trafor-default.adb \
+ s-trafor.ads<s-trafor-default.ads \
+ s-tratas.adb<s-tratas-default.adb \
+ s-tfsetr.adb<s-tfsetr-vxworks.adb
+ endif
+
+ ifeq ($(strip $(filter-out rtp,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-vxwext.ads<s-vxwext-rtp.ads \
+ s-vxwext.adb<s-vxwext-rtp.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks-rtp.adb \
+ system.ads<system-vxworks-x86-rtp.ads
+
+ else
+ ifeq ($(strip $(filter-out rtp-smp, $(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-vxwext.ads<s-vxwext-rtp.ads \
+ s-vxwext.adb<s-vxwext-rtp-smp.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks-tls.adb \
+ system.ads<system-vxworks-x86-rtp.ads
+
+ EXTRA_LIBGNAT_OBJS+=affinity.o
+ else
+ ifeq ($(strip $(filter-out kernel-smp, $(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-interr.adb<s-interr-hwint.adb \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks-tls.adb \
+ s-vxwext.ads<s-vxwext-kernel.ads \
+ s-vxwext.adb<s-vxwext-kernel-smp.adb \
+ system.ads<system-vxworks-x86-kernel.ads
+ EXTRA_LIBGNAT_OBJS+=affinity.o
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ s-interr.adb<s-interr-hwint.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks.adb
+
+ ifeq ($(strip $(filter-out kernel,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-vxwext.ads<s-vxwext-kernel.ads \
+ s-vxwext.adb<s-vxwext-kernel.adb \
+ system.ads<system-vxworks-x86-kernel.ads
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ system.ads<system-vxworks-x86.ads
+ endif
+ endif
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=i-vxwork.o i-vxwoio.o
+ endif
+ endif
+ EXTRA_GNATRTL_TASKING_OBJS += s-vxwork.o s-vxwext.o
+
+ EXTRA_LIBGNAT_OBJS+=vx_stack_info.o
+endif
+
+# ARM VxWorks
+ifeq ($(strip $(filter-out arm% coff wrs vx%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-vxworks.ads \
+ a-numaux.ads<a-numaux-vxworks.ads \
+ s-inmaop.adb<s-inmaop-vxworks.adb \
+ s-interr.adb<s-interr-hwint.adb \
+ s-intman.ads<s-intman-vxworks.ads \
+ s-intman.adb<s-intman-vxworks.adb \
+ s-osinte.adb<s-osinte-vxworks.adb \
+ s-osinte.ads<s-osinte-vxworks.ads \
+ s-osprim.adb<s-osprim-vxworks.adb \
+ s-parame.ads<s-parame-vxworks.ads \
+ s-parame.adb<s-parame-vxworks.adb \
+ s-stchop.ads<s-stchop-limit.ads \
+ s-stchop.adb<s-stchop-vxworks.adb \
+ s-taprop.adb<s-taprop-vxworks.adb \
+ s-tasinf.ads<s-tasinf-vxworks.ads \
+ s-taspri.ads<s-taspri-vxworks.ads \
+ s-vxwork.ads<s-vxwork-arm.ads \
+ g-socthi.ads<g-socthi-vxworks.ads \
+ g-socthi.adb<g-socthi-vxworks.adb \
+ g-stsifd.adb<g-stsifd-sockets.adb
+
+ TOOLS_TARGET_PAIRS=\
+ mlib-tgt-specific.adb<mlib-tgt-specific-vxworks.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ ifeq ($(strip $(filter-out rtp-smp,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-vxwext.ads<s-vxwext-rtp.ads \
+ s-vxwext.adb<s-vxwext-rtp-smp.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks-tls.adb \
+ system.ads<system-vxworks-arm-rtp.ads
+
+ EXTRA_LIBGNAT_OBJS+=affinity.o
+ else
+ ifeq ($(strip $(filter-out kernel-smp,$(THREAD_KIND))),)
+ EH_MECHANISM=-gcc
+
+ LIBGNAT_TARGET_PAIRS += \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-tpopsp.adb<s-tpopsp-vxworks-tls.adb \
+ s-vxwext.ads<s-vxwext-kernel.ads \
+ s-vxwext.adb<s-vxwext-kernel-smp.adb \
+ system.ads<system-vxworks-arm.ads
+
+ EXTRA_LIBGNAT_OBJS+=affinity.o sigtramp-armvxw.o
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ s-tpopsp.adb<s-tpopsp-vxworks.adb \
+ system.ads<system-vxworks-arm.ads
+
+ ifeq ($(strip $(filter-out kernel,$(THREAD_KIND))),)
+ EH_MECHANISM=-gcc
+
+ LIBGNAT_TARGET_PAIRS += \
+ s-vxwext.ads<s-vxwext-kernel.ads \
+ s-vxwext.adb<s-vxwext-kernel.adb
+
+ EXTRA_LIBGNAT_OBJS+=sigtramp-armvxw.o
+ endif
+ endif
+ endif
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=i-vxwork.o i-vxwoio.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-vxwork.o s-vxwext.o
+
+ EXTRA_LIBGNAT_OBJS+=vx_stack_info.o
+endif
+
+# MIPS VxWorks
+ifeq ($(strip $(filter-out mips% wrs vx%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-vxworks.ads \
+ a-numaux.ads<a-numaux-vxworks.ads \
+ s-inmaop.adb<s-inmaop-vxworks.adb \
+ s-interr.adb<s-interr-hwint.adb \
+ s-intman.ads<s-intman-vxworks.ads \
+ s-intman.adb<s-intman-vxworks.adb \
+ s-osinte.adb<s-osinte-vxworks.adb \
+ s-osinte.ads<s-osinte-vxworks.ads \
+ s-osprim.adb<s-osprim-vxworks.adb \
+ s-parame.ads<s-parame-vxworks.ads \
+ s-parame.adb<s-parame-vxworks.adb \
+ s-stchop.ads<s-stchop-limit.ads \
+ s-stchop.adb<s-stchop-vxworks.adb \
+ s-taprop.adb<s-taprop-vxworks.adb \
+ s-tasinf.ads<s-tasinf-vxworks.ads \
+ s-taspri.ads<s-taspri-vxworks.ads \
+ s-tpopsp.adb<s-tpopsp-vxworks.adb \
+ s-vxwork.ads<s-vxwork-mips.ads \
+ g-socthi.ads<g-socthi-vxworks.ads \
+ g-socthi.adb<g-socthi-vxworks.adb \
+ g-stsifd.adb<g-stsifd-sockets.adb \
+ system.ads<system-vxworks-mips.ads
+
+ TOOLS_TARGET_PAIRS=\
+ mlib-tgt-specific.adb<mlib-tgt-specific-vxworks.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=i-vxwork.o i-vxwoio.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-vxwork.o s-vxwext.o
+
+ EXTRA_LIBGNAT_OBJS+=vx_stack_info.o
+endif
+
+# ARM android
+ifeq ($(strip $(filter-out arm% linux-androideabi,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-linux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux.ads \
+ s-osinte.adb<s-osinte-android.adb \
+ s-osinte.ads<s-osinte-android.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-posix.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ system.ads<system-linux-armel.ads \
+ $(DUMMY_SOCKETS_TARGET_PAIRS)
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ GNATRTL_SOCKETS_OBJS =
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o
+ EH_MECHANISM=
+ THREADSLIB =
+ GNATLIB_SHARED = gnatlib-shared-dual
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# Sparc Solaris
+ifeq ($(strip $(filter-out sparc% sun solaris%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS_COMMON = \
+ a-intnam.ads<a-intnam-solaris.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-solaris.adb \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-osinte.adb<s-osinte-solaris.adb \
+ s-osinte.ads<s-osinte-solaris.ads \
+ s-osprim.adb<s-osprim-solaris.adb \
+ s-taprop.adb<s-taprop-solaris.adb \
+ s-tasinf.adb<s-tasinf-solaris.adb \
+ s-tasinf.ads<s-tasinf-solaris.ads \
+ s-taspri.ads<s-taspri-solaris.ads \
+ s-tpopsp.adb<s-tpopsp-solaris.adb \
+ g-soliop.ads<g-soliop-solaris.ads
+
+ LIBGNAT_TARGET_PAIRS_32 = \
+ system.ads<system-solaris-sparc.ads
+
+ LIBGNAT_TARGET_PAIRS_64 = \
+ system.ads<system-solaris-sparcv9.ads \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ ifeq ($(strip $(filter-out sparc sun solaris%,$(target_cpu) $(target_vendor) $(target_os))),)
+ ifeq ($(strip $(MULTISUBDIR)),/sparcv9)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64)
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ endif
+ else
+ ifeq ($(strip $(MULTISUBDIR)),/sparcv8plus)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64)
+ endif
+ endif
+
+ TOOLS_TARGET_PAIRS=mlib-tgt-specific.adb<mlib-tgt-specific-solaris.adb
+
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lposix4 -lthread
+ MISCLIB = -lposix4 -lnsl -lsocket
+ SO_OPTS = -Wl,-h,
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+
+ ifeq ($(strip $(filter-out pthread PTHREAD,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-solaris.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-solaris-posix.ads \
+ s-osprim.adb<s-osprim-solaris.adb \
+ s-taprop.adb<s-taprop-posix.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ g-soliop.ads<g-soliop-solaris.ads \
+ system.ads<system-solaris-sparc.ads
+
+ THREADSLIB = -lposix4 -lpthread
+ endif
+
+ ifeq ($(strip $(filter-out m64,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS = $(LIBGNAT_TARGET_PAIRS_64)
+ endif
+endif
+
+# x86 and x86-64 solaris
+ifeq ($(strip $(filter-out %86 %x86_64 solaris2%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS_COMMON = \
+ a-intnam.ads<a-intnam-solaris.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-solaris.adb \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-osinte.adb<s-osinte-solaris.adb \
+ s-osinte.ads<s-osinte-solaris.ads \
+ s-osprim.adb<s-osprim-solaris.adb \
+ s-taprop.adb<s-taprop-solaris.adb \
+ s-tasinf.adb<s-tasinf-solaris.adb \
+ s-tasinf.ads<s-tasinf-solaris.ads \
+ s-taspri.ads<s-taspri-solaris.ads \
+ s-tpopsp.adb<s-tpopsp-solaris.adb \
+ g-soliop.ads<g-soliop-solaris.ads \
+ $(ATOMICS_TARGET_PAIRS)
+
+ LIBGNAT_TARGET_PAIRS_32 = \
+ $(X86_TARGET_PAIRS) \
+ system.ads<system-solaris-x86.ads
+
+ LIBGNAT_TARGET_PAIRS_64 = \
+ $(X86_64_TARGET_PAIRS) \
+ system.ads<system-solaris-x86_64.ads
+
+ ifeq ($(strip $(filter-out %86 solaris2%,$(target_cpu) $(target_os))),)
+ ifeq ($(strip $(MULTISUBDIR)),/amd64)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64)
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ endif
+ else
+ ifeq ($(strip $(MULTISUBDIR)),/32)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64)
+ endif
+ endif
+
+ TOOLS_TARGET_PAIRS=mlib-tgt-specific.adb<mlib-tgt-specific-solaris.adb
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=g-sse.o g-ssvety.o
+
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lposix4 -lthread
+ MISCLIB = -lposix4 -lnsl -lsocket
+ SO_OPTS = -Wl,-h,
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# x86 Linux
+ifeq ($(strip $(filter-out %86 linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-linux.ads \
+ a-synbar.adb<a-synbar-posix.adb \
+ a-synbar.ads<a-synbar-posix.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-tpopsp.adb<s-tpopsp-tls.adb \
+ g-sercom.adb<g-sercom-linux.adb \
+ a-exetim.adb<a-exetim-posix.adb \
+ a-exetim.ads<a-exetim-default.ads \
+ s-linux.ads<s-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ $(ATOMICS_TARGET_PAIRS)
+
+ LIBGNAT_TARGET_PAIRS_32 = \
+ $(X86_TARGET_PAIRS) \
+ system.ads<system-linux-x86.ads
+
+ LIBGNAT_TARGET_PAIRS_64 = \
+ $(X86_64_TARGET_PAIRS) \
+ system.ads<system-linux-x86_64.ads
+
+ ifeq ($(strip $(MULTISUBDIR)),/64)
+ LIBGNAT_TARGET_PAIRS += $(LIBGNAT_TARGET_PAIRS_64)
+ else
+ LIBGNAT_TARGET_PAIRS += $(LIBGNAT_TARGET_PAIRS_32)
+ endif
+
+ ifeq ($(strip $(filter-out xenomai,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-osinte.ads<s-osinte-linux-xenomai.ads \
+ s-osprim.adb<s-osprim-linux-xenomai.adb \
+ s-taprop.adb<s-taprop-linux-xenomai.adb \
+ s-taspri.ads<s-taspri-linux-xenomai.ads
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix.ads
+ endif
+
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lpthread -lrt
+ EXTRA_GNATRTL_NONTASKING_OBJS=g-sse.o g-ssvety.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o a-exetim.o
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ GNATLIB_SHARED = gnatlib-shared-dual
+ MISCLIB = -ldl
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# x86 kfreebsd
+ifeq ($(strip $(filter-out %86 kfreebsd%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-freebsd.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-kfreebsd-gnu.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(X86_TARGET_PAIRS) \
+ system.ads<system-freebsd-x86.ads
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=g-sse.o g-ssvety.o
+
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lpthread
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+ MISCLIB = -lutil
+endif
+
+ifeq ($(strip $(filter-out x86_64 kfreebsd%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-freebsd.ads \
+ a-numaux.adb<a-numaux-x86.adb \
+ a-numaux.ads<a-numaux-x86.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-kfreebsd-gnu.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ system.ads<system-freebsd-x86_64.ads
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lpthread
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# x86 FreeBSD
+ifeq ($(strip $(filter-out %86 freebsd%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-freebsd.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.adb<s-osinte-freebsd.adb \
+ s-osinte.ads<s-osinte-freebsd.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-posix.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(X86_TARGET_PAIRS) \
+ system.ads<system-freebsd-x86.ads
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb
+ GNATLIB_SHARED = gnatlib-shared-dual
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=g-sse.o g-ssvety.o
+
+ EH_MECHANISM=-gcc
+ THREADSLIB= -lpthread
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+ MISCLIB = -lutil
+endif
+
+# x86-64 FreeBSD
+ifeq ($(strip $(filter-out %86_64 freebsd%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-freebsd.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.adb<s-osinte-freebsd.adb \
+ s-osinte.ads<s-osinte-freebsd.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-posix.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(X86_64_TARGET_PAIRS) \
+ system.ads<system-freebsd-x86_64.ads
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb
+ GNATLIB_SHARED = gnatlib-shared-dual
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=g-sse.o g-ssvety.o
+
+ EH_MECHANISM=-gcc
+ THREADSLIB= -lpthread
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+ MISCLIB = -lutil
+endif
+
+# S390 Linux
+ifeq ($(strip $(filter-out s390% linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS_COMMON = \
+ a-intnam.ads<a-intnam-linux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb
+
+ LIBGNAT_TARGET_PAIRS_32 = \
+ system.ads<system-linux-s390.ads
+
+ LIBGNAT_TARGET_PAIRS_64 = \
+ system.ads<system-linux-s390x.ads
+
+ ifeq ($(strip $(filter-out s390x,$(target_cpu))),)
+ ifeq ($(strip $(MULTISUBDIR)),/32)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64)
+ endif
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ endif
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lpthread
+ GNATLIB_SHARED = gnatlib-shared-dual
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# HP/PA HP-UX 10
+ifeq ($(strip $(filter-out hppa% hp hpux10%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-excpol.adb<a-excpol-abort.adb \
+ a-intnam.ads<a-intnam-hpux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-interr.adb<s-interr-sigaction.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.adb<s-osinte-hpux-dce.adb \
+ s-osinte.ads<s-osinte-hpux-dce.ads \
+ s-parame.ads<s-parame-hpux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-hpux-dce.adb \
+ s-taspri.ads<s-taspri-hpux-dce.ads \
+ s-tpopsp.adb<s-tpopsp-posix.adb \
+ system.ads<system-hpux.ads
+
+ EH_MECHANISM=-gcc
+endif
+
+# HP/PA HP-UX 11
+ifeq ($(strip $(filter-out hppa% hp hpux11%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-hpux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-hpux.ads \
+ s-parame.ads<s-parame-hpux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-traceb.adb<s-traceb-hpux.adb \
+ s-taprop.adb<s-taprop-posix.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ system.ads<system-hpux.ads
+
+ TOOLS_TARGET_PAIRS = mlib-tgt-specific.adb<mlib-tgt-specific-hpux.adb
+ EH_MECHANISM=-gcc
+ TGT_LIB = /usr/lib/libcl.a
+ THREADSLIB = -lpthread
+ GMEM_LIB = gmemlib
+ soext = .sl
+ SO_OPTS = -Wl,+h,
+ GNATLIB_SHARED = gnatlib-shared-dual
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# IBM AIX
+ifeq ($(strip $(filter-out ibm aix%,$(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS_COMMON = \
+ a-intnam.ads<a-intnam-aix.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.adb<s-osinte-aix.adb \
+ s-osinte.ads<s-osinte-aix.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-posix.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ LIBGNAT_TARGET_PAIRS_32 = \
+ system.ads<system-aix.ads
+
+ LIBGNAT_TARGET_PAIRS_64 = \
+ system.ads<system-aix64.ads
+
+ ifeq ($(findstring ppc64, \
+ $(shell $(GCC_FOR_TARGET) $(GNATLIBCFLAGS) \
+ -print-multi-os-directory)), \
+ ppc64)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64)
+ TOOLS_TARGET_PAIRS = \
+ indepsw.adb<indepsw-aix.adb
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ TOOLS_TARGET_PAIRS = \
+ indepsw.adb<indepsw-gnu.adb
+ endif
+
+ THREADSLIB = -lpthreads
+ EH_MECHANISM=-gcc
+ TOOLS_TARGET_PAIRS += \
+ mlib-tgt-specific.adb<mlib-tgt-specific-aix.adb
+
+ GMEM_LIB = gmemlib
+endif
+
+# RTEMS
+ifeq ($(strip $(filter-out rtems%,$(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ system.ads<system-rtems.ads \
+ a-intnam.ads<a-intnam-rtems.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.adb<s-osinte-rtems.adb \
+ s-osinte.ads<s-osinte-rtems.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-parame.adb<s-parame-rtems.adb \
+ s-taprop.adb<s-taprop-posix.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-rtems.adb \
+ s-stchop.adb<s-stchop-rtems.adb \
+ s-interr.adb<s-interr-hwint.adb
+endif
+
+# OpenVMS (host)
+ifeq ($(strip $(filter-out alpha64 ia64 dec hp vms% openvms% alphavms%,$(host_cpu) $(host_vendor) $(host_os))),)
+
+soext = .exe
+hyphen = _
+LN = cp -p
+LN_S = cp -p
+
+endif
+
+# OpenVMS (target)
+ifeq ($(strip $(filter-out alpha64 ia64 dec hp vms% openvms% alphavms%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-caldel.adb<a-caldel-vms.adb \
+ a-calend.adb<a-calend-vms.adb \
+ a-calend.ads<a-calend-vms.ads \
+ a-dirval.adb<a-dirval-vms.adb \
+ a-excpol.adb<a-excpol-abort.adb \
+ a-intnam.ads<a-intnam-vms.ads \
+ a-numaux.ads<a-numaux-vms.ads \
+ g-expect.adb<g-expect-vms.adb \
+ g-socthi.ads<g-socthi-vms.ads \
+ g-socthi.adb<g-socthi-vms.adb \
+ g-stsifd.adb<g-stsifd-sockets.adb \
+ i-cstrea.adb<i-cstrea-vms.adb \
+ memtrack.adb<memtrack-vms_64.adb \
+ s-auxdec.ads<s-auxdec-vms_64.ads \
+ s-inmaop.adb<s-inmaop-vms.adb \
+ s-interr.adb<s-interr-vms.adb \
+ s-intman.adb<s-intman-vms.adb \
+ s-intman.ads<s-intman-vms.ads \
+ s-memory.adb<s-memory-vms_64.adb \
+ s-memory.ads<s-memory-vms_64.ads \
+ s-ransee.adb<s-ransee-vms.adb \
+ s-osprim.adb<s-osprim-vms.adb \
+ s-osprim.ads<s-osprim-vms.ads \
+ s-osinte.adb<s-osinte-vms.adb \
+ s-osinte.ads<s-osinte-vms.ads \
+ s-taprop.adb<s-taprop-vms.adb \
+ s-tasdeb.adb<s-tasdeb-vms.adb \
+ s-taspri.ads<s-taspri-vms.ads \
+ s-tpopsp.adb<s-tpopsp-vms.adb \
+ s-tpopde.adb<s-tpopde-vms.adb \
+ s-tpopde.ads<s-tpopde-vms.ads
+
+ ifeq ($(strip $(filter-out ia64 hp vms% openvms%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS += \
+ g-enblsp.adb<g-enblsp-vms-ia64.adb \
+ g-trasym.adb<g-trasym-vms-ia64.adb \
+ s-asthan.adb<s-asthan-vms-ia64.adb \
+ s-auxdec.adb<s-auxdec-vms-ia64.adb \
+ s-vaflop.adb<s-vaflop-vms-ia64.adb \
+ system.ads<system-vms-ia64.ads \
+ s-parame.ads<s-parame-vms-ia64.ads \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ EXTRA_LIBGNAT_SRCS+=tb-ivms.c
+ override GNATRTL_ALTIVEC_OBJS=
+
+ TOOLS_TARGET_PAIRS= \
+ mlib-tgt-specific.adb<mlib-tgt-specific-vms-ia64.adb \
+ symbols.adb<symbols-vms.adb \
+ symbols-processing.adb<symbols-processing-vms-ia64.adb
+ else
+ ifeq ($(strip $(filter-out alpha64 dec vms% openvms% alphavms%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS += \
+ g-enblsp.adb<g-enblsp-vms-alpha.adb \
+ g-trasym.adb<g-trasym-vms-alpha.adb \
+ s-asthan.adb<s-asthan-vms-alpha.adb \
+ s-auxdec.adb<s-auxdec-vms-alpha.adb \
+ s-traent.adb<s-traent-vms.adb \
+ s-traent.ads<s-traent-vms.ads \
+ s-vaflop.adb<s-vaflop-vms-alpha.adb \
+ system.ads<system-vms_64.ads \
+ s-parame.ads<s-parame-vms-alpha.ads \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ EXTRA_LIBGNAT_SRCS+=tb-alvms.c
+
+ TOOLS_TARGET_PAIRS= \
+ mlib-tgt-specific.adb<mlib-tgt-specific-vms-alpha.adb \
+ symbols.adb<symbols-vms.adb \
+ symbols-processing.adb<symbols-processing-vms-alpha.adb
+ endif
+ endif
+
+ EXTRA_GNATMAKE_OBJS = mlib-tgt-vms_common.o
+
+ GMEM_LIB = gmemlib
+ EH_MECHANISM=-gcc
+ GNATLIB_SHARED=gnatlib-shared-vms
+ EXTRA_GNATRTL_NONTASKING_OBJS+=s-po32gl.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-tpopde.o
+ EXTRA_GNATTOOLS = \
+ ../../gnatsym$(exeext)
+ # This command transforms (YYYYMMDD) into YY,MMDD
+ GSMATCH_VERSION := $(shell grep "^ *Gnat_Static_Version_String" $(fsrcpfx)ada/gnatvsn.ads | sed -e 's/.*(\(.*\)).*/\1/' -e 's/\(..\)\(..\)\(....\).*/\2,\3/')
+ TOOLS_LIBS_LO := --for-linker=sys\\$$\$$library:trace.exe
+ LIBRARY_VERSION := $(subst .,_,$(LIB_VERSION))
+endif
+
+# PikeOS
+ifeq ($(strip $(filter-out powerpc% %86 sysgo pikeos,$(target_cpu) $(target_vendor) $(target_os)))),)
+ TOOLS_TARGET_PAIRS=\
+ mlib-tgt-specific.adb<mlib-tgt-specific-xi.adb \
+ indepsw.adb<indepsw-gnu.adb
+endif
+
+# *-elf, *-eabi, or *-eabispe
+ifeq ($(strip $(filter-out elf eabi eabispe,$(target_os))),)
+ TOOLS_TARGET_PAIRS=\
+ mlib-tgt-specific.adb<mlib-tgt-specific-xi.adb \
+ indepsw.adb<indepsw-gnu.adb
+endif
+
+# Cygwin/Mingw32
+ifeq ($(strip $(filter-out cygwin% mingw32% pe,$(target_os))),)
+ # Cygwin provides a full Posix environment, and so we use the default
+ # versions of s-memory and g-socthi rather than the Windows-specific
+ # MinGW versions. Ideally we would use all the default versions for
+ # Cygwin and none of the MinGW versions, but for historical reasons
+ # the Cygwin port has always been a CygMing frankenhybrid and it is
+ # a long-term project to disentangle them.
+ ifeq ($(strip $(filter-out cygwin%,$(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ s-memory.adb<s-memory.adb \
+ g-socthi.ads<g-socthi.ads \
+ g-socthi.adb<g-socthi.adb
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ s-memory.adb<s-memory-mingw.adb \
+ g-socthi.ads<g-socthi-mingw.ads \
+ g-socthi.adb<g-socthi-mingw.adb
+ endif
+ LIBGNAT_TARGET_PAIRS += \
+ a-dirval.adb<a-dirval-mingw.adb \
+ a-excpol.adb<a-excpol-abort.adb \
+ s-gloloc.adb<s-gloloc-mingw.adb \
+ s-inmaop.adb<s-inmaop-dummy.adb \
+ s-taspri.ads<s-taspri-mingw.ads \
+ s-tasinf.adb<s-tasinf-mingw.adb \
+ s-tasinf.ads<s-tasinf-mingw.ads \
+ g-stsifd.adb<g-stsifd-sockets.adb \
+ g-soliop.ads<g-soliop-mingw.ads \
+ $(ATOMICS_TARGET_PAIRS)
+
+ ifeq ($(strip $(filter-out rtx_w32 rtx_rtss,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-intman.adb<s-intman-dummy.adb \
+ s-osinte.ads<s-osinte-rtx.ads \
+ s-osprim.adb<s-osprim-rtx.adb \
+ s-taprop.adb<s-taprop-rtx.adb \
+ $(X86_TARGET_PAIRS)
+
+ EXTRA_GNATRTL_NONTASKING_OBJS = s-win32.o
+
+ ifeq ($(strip $(filter-out rtx_w32,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS += system.ads<system-rtx.ads
+
+ EH_MECHANISM=-gcc
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ system.ads<system-rtx-rtss.ads \
+ s-parame.adb<s-parame-vxworks.adb
+
+ EH_MECHANISM=
+ endif
+
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ a-exetim.adb<a-exetim-mingw.adb \
+ a-exetim.ads<a-exetim-mingw.ads \
+ a-intnam.ads<a-intnam-mingw.ads \
+ g-sercom.adb<g-sercom-mingw.adb \
+ s-interr.adb<s-interr-sigaction.adb \
+ s-intman.adb<s-intman-mingw.adb \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-osinte.ads<s-osinte-mingw.ads \
+ s-osprim.adb<s-osprim-mingw.adb \
+ s-taprop.adb<s-taprop-mingw.adb
+
+ ifeq ($(strip $(filter-out x86_64%,$(target_cpu))),)
+ ifeq ($(strip $(MULTISUBDIR)),/32)
+ LIBGNAT_TARGET_PAIRS += \
+ $(X86_TARGET_PAIRS) \
+ system.ads<system-mingw.ads
+ SO_OPTS= -m32 -Wl,-soname,
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ $(X86_64_TARGET_PAIRS) \
+ system.ads<system-mingw-x86_64.ads
+ SO_OPTS = -m64 -Wl,-soname,
+ endif
+ else
+ ifeq ($(strip $(MULTISUBDIR)),/64)
+ LIBGNAT_TARGET_PAIRS += \
+ $(X86_64_TARGET_PAIRS) \
+ system.ads<system-mingw-x86_64.ads
+ SO_OPTS = -m64 -Wl,-soname,
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ $(X86_TARGET_PAIRS) \
+ system.ads<system-mingw.ads
+ SO_OPTS = -m32 -Wl,-soname,
+ endif
+ endif
+
+ EXTRA_GNATRTL_NONTASKING_OBJS = \
+ s-win32.o s-winext.o g-regist.o g-sse.o g-ssvety.o
+ EXTRA_GNATRTL_TASKING_OBJS = a-exetim.o
+ EXTRA_LIBGNAT_SRCS+=mingw32.h
+ MISCLIB = -lws2_32
+
+ # ??? This will be replaced by gnatlib-shared-dual-win32 when GNAT
+ # auto-import support for array/record will be done.
+ GNATLIB_SHARED = gnatlib-shared-win32
+
+ EH_MECHANISM=-gcc
+ endif
+
+ TOOLS_TARGET_PAIRS= \
+ mlib-tgt-specific.adb<mlib-tgt-specific-mingw.adb \
+ indepsw.adb<indepsw-mingw.adb
+
+ GMEM_LIB = gmemlib
+ EXTRA_GNATTOOLS = ../../gnatdll$(exeext)
+ EXTRA_GNATMAKE_OBJS = mdll.o mdll-utl.o mdll-fil.o
+ soext = .dll
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# Mips Linux
+ifeq ($(strip $(filter-out mips linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-linux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ system.ads<system-linux-mips.ads
+
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lpthread
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# Mips/el Linux
+ifeq ($(strip $(filter-out mipsel linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS_COMMON = \
+ a-intnam.ads<a-intnam-linux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux-mipsel.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ g-sercom.adb<g-sercom-linux.adb
+
+ LIBGNAT_TARGET_PAIRS_32 = \
+ system.ads<system-linux-mipsel.ads
+
+ LIBGNAT_TARGET_PAIRS_64 = \
+ system.ads<system-linux-mips64el.ads
+
+ ifeq ($(strip $(shell $(GCC_FOR_TARGET) $(GNATLIBCFLAGS) -print-multi-os-directory)),../lib64)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64)
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ endif
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lpthread
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# Mips64/el Linux
+ifeq ($(strip $(filter-out mips64el linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS_COMMON = \
+ a-intnam.ads<a-intnam-linux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux-mipsel.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ g-sercom.adb<g-sercom-linux.adb
+
+ LIBGNAT_TARGET_PAIRS_32 = \
+ system.ads<system-linux-mipsel.ads
+
+ LIBGNAT_TARGET_PAIRS_64 = \
+ system.ads<system-linux-mips64el.ads
+
+ ifeq ($(strip $(shell $(GCC_FOR_TARGET) $(GNATLIBCFLAGS) -print-multi-os-directory)),../lib64)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64)
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ endif
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lpthread
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# PowerPC and e500v2 Linux
+ifeq ($(strip $(filter-out powerpc% linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS_COMMON = \
+ a-exetim.adb<a-exetim-posix.adb \
+ a-exetim.ads<a-exetim-default.ads \
+ a-intnam.ads<a-intnam-linux.ads \
+ a-synbar.adb<a-synbar-posix.adb \
+ a-synbar.ads<a-synbar-posix.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-tpopsp.adb<s-tpopsp-tls.adb \
+ g-sercom.adb<g-sercom-linux.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ ifeq ($(strip $(filter-out xenomai,$(THREAD_KIND))),)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON)
+
+ LIBGNAT_TARGET_PAIRS += \
+ s-osinte.ads<s-osinte-linux-xenomai.ads \
+ s-osprim.adb<s-osprim-linux-xenomai.adb \
+ s-taprop.adb<s-taprop-linux-xenomai.adb \
+ s-taspri.ads<s-taspri-linux-xenomai.ads \
+ system.ads<system-linux-ppc.ads
+ else
+ LIBGNAT_TARGET_PAIRS_32 = \
+ system.ads<system-linux-ppc.ads
+
+ LIBGNAT_TARGET_PAIRS_64 = \
+ system.ads<system-linux-ppc64.ads
+
+ ifeq ($(strip $(shell $(GCC_FOR_TARGET) $(GNATLIBCFLAGS) -print-multi-os-directory)),../lib64)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64)
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ endif
+
+ LIBGNAT_TARGET_PAIRS += \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads
+ endif
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o a-exetim.o
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lpthread -lrt
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# ARM linux, GNU eabi
+ifeq ($(strip $(filter-out arm% linux-gnueabi%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-linux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb
+
+ ifeq ($(strip $(filter-out arm%b,$(target_cpu))),)
+ EH_MECHANISM=
+ LIBGNAT_TARGET_PAIRS += \
+ system.ads<system-linux-armeb.ads
+ else
+ EH_MECHANISM=-arm
+ LIBGNAT_TARGET_PAIRS += \
+ system.ads<system-linux-armel.ads \
+ a-exexpr.adb<a-exexpr-gcc.adb \
+ s-excmac.ads<s-excmac-arm.ads
+ endif
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o
+ EXTRA_LIBGNAT_OBJS+=raise-gcc.o
+ EXTRA_GNATRTL_NONTASKING_OBJS+=g-cppexc.o s-excmac.o
+ THREADSLIB = -lpthread
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# Sparc Linux
+ifeq ($(strip $(filter-out sparc% linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS_COMMON = \
+ a-intnam.ads<a-intnam-linux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux-sparc.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads \
+ s-tpopsp.adb<s-tpopsp-tls.adb
+
+ LIBGNAT_TARGET_PAIRS_32 = \
+ system.ads<system-linux-sparc.ads
+
+ LIBGNAT_TARGET_PAIRS_64 = \
+ system.ads<system-linux-sparcv9.ads
+
+ ifeq ($(strip $(shell $(GCC_FOR_TARGET) $(GNATLIBCFLAGS) -print-multi-os-directory)),../lib64)
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64)
+ else
+ LIBGNAT_TARGET_PAIRS = \
+ $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32)
+ endif
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lpthread
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# HP/PA Linux
+ifeq ($(strip $(filter-out hppa% linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-linux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux-hppa.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ system.ads<system-linux-hppa.ads
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o
+ EH_MECHANISM=-gcc
+ THREADSLIB = -lpthread
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# SH4 Linux
+ifeq ($(strip $(filter-out sh4% linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-linux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ system.ads<system-linux-sh4.ads
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-linux.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o
+ EH_MECHANISM=-gcc
+ MISCLIB=
+ THREADSLIB = -lpthread
+ GNATLIB_SHARED = gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# IA64 Linux
+ifeq ($(strip $(filter-out %ia64 linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-exetim.adb<a-exetim-posix.adb \
+ a-exetim.ads<a-exetim-default.ads \
+ a-intnam.ads<a-intnam-linux.ads \
+ a-numaux.ads<a-numaux-libc-x86.ads \
+ a-synbar.adb<a-synbar-posix.adb \
+ a-synbar.ads<a-synbar-posix.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux.ads \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-tpopsp.adb<s-tpopsp-tls.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads \
+ g-sercom.adb<g-sercom-linux.adb \
+ system.ads<system-linux-ia64.ads \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o a-exetim.o
+ EH_MECHANISM=-gcc
+ MISCLIB=
+ THREADSLIB=-lpthread -lrt
+ GNATLIB_SHARED=gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# IA64 HP-UX
+ifeq ($(strip $(filter-out ia64% hp hpux%,$(target_cpu) $(target_vendor) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-hpux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osinte.ads<s-osinte-hpux.ads \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-posix.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ system.ads<system-hpux-ia64.ads \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-ia64-hpux.adb
+
+ MISCLIB=
+ EH_MECHANISM=-gcc
+ THREADSLIB=-lpthread
+ GNATLIB_SHARED=gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ soext = .so
+ SO_OPTS = -Wl,+h,
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# Alpha Linux
+ifeq ($(strip $(filter-out alpha% linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-linux.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux-alpha.ads \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb \
+ s-taspri.ads<s-taspri-posix-noaltstack.ads \
+ system.ads<system-linux-alpha.ads \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS)
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o
+ EH_MECHANISM=-gcc
+ MISCLIB=
+ THREADSLIB=-lpthread
+ GNATLIB_SHARED=gnatlib-shared-dual
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# x86-64 Linux
+ifeq ($(strip $(filter-out %x86_64 linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-exetim.adb<a-exetim-posix.adb \
+ a-exetim.ads<a-exetim-default.ads \
+ a-intnam.ads<a-intnam-linux.ads \
+ a-synbar.adb<a-synbar-posix.adb \
+ a-synbar.ads<a-synbar-posix.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux.ads \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-tpopsp.adb<s-tpopsp-tls.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ g-sercom.adb<g-sercom-linux.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(X86_64_TARGET_PAIRS) \
+ system.ads<system-linux-x86_64.ads
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=g-sse.o g-ssvety.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o a-exetim.o
+ EH_MECHANISM=-gcc
+ THREADSLIB=-lpthread -lrt
+ MISCLIB = -ldl
+ GNATLIB_SHARED=gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+ifeq ($(strip $(filter-out %x32 linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-exetim.adb<a-exetim-posix.adb \
+ a-exetim.ads<a-exetim-default.ads \
+ a-intnam.ads<a-intnam-linux.ads \
+ a-synbar.adb<a-synbar-posix.adb \
+ a-synbar.ads<a-synbar-posix.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux-x32.ads \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osprim.adb<s-osprim-x32.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-tpopsp.adb<s-tpopsp-tls.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ g-sercom.adb<g-sercom-linux.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(X86_64_TARGET_PAIRS) \
+ system.ads<system-linux-x86.ads
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=g-sse.o g-ssvety.o
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o a-exetim.o
+ EH_MECHANISM=-gcc
+ THREADSLIB=-lpthread -lrt
+ GNATLIB_SHARED=gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
+# Darwin (Mac OS X)
+ifeq ($(strip $(filter-out darwin%,$(target_os))),)
+ SO_OPTS = -shared-libgcc
+ LIBGNAT_TARGET_PAIRS = \
+ a-intnam.ads<a-intnam-darwin.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-osinte.adb<s-osinte-darwin.adb \
+ s-osinte.ads<s-osinte-darwin.ads \
+ s-taprop.adb<s-taprop-posix.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix-foreign.adb
+
+ ifeq ($(strip $(filter-out %86,$(target_cpu))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-intman.adb<s-intman-susv3.adb \
+ s-osprim.adb<s-osprim-darwin.adb \
+ $(ATOMICS_TARGET_PAIRS)
+
+ ifeq ($(strip $(MULTISUBDIR)),/x86_64)
+ LIBGNAT_TARGET_PAIRS += \
+ $(X86_64_TARGET_PAIRS) \
+ system.ads<system-darwin-x86_64.ads
+ SO_OPTS += -m64
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ $(X86_TARGET_PAIRS) \
+ system.ads<system-darwin-x86.ads
+ endif
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=g-sse.o g-ssvety.o
+ endif
+
+ ifeq ($(strip $(filter-out %x86_64,$(target_cpu))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-intman.adb<s-intman-susv3.adb \
+ s-osprim.adb<s-osprim-darwin.adb \
+ $(ATOMICS_TARGET_PAIRS)
+
+ ifeq ($(strip $(MULTISUBDIR)),/i386)
+ LIBGNAT_TARGET_PAIRS += \
+ $(X86_TARGET_PAIRS) \
+ system.ads<system-darwin-x86.ads
+ SO_OPTS += -m32
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ $(X86_64_TARGET_PAIRS) \
+ system.ads<system-darwin-x86_64.ads
+ endif
+
+ EXTRA_GNATRTL_NONTASKING_OBJS=g-sse.o g-ssvety.o
+ endif
+
+ ifeq ($(strip $(filter-out powerpc%,$(target_cpu))),)
+ LIBGNAT_TARGET_PAIRS += \
+ s-intman.adb<s-intman-posix.adb \
+ s-osprim.adb<s-osprim-posix.adb \
+ a-numaux.ads<a-numaux-darwin.ads \
+ a-numaux.adb<a-numaux-darwin.adb
+
+ ifeq ($(strip $(MULTISUBDIR)),/ppc64)
+ LIBGNAT_TARGET_PAIRS += \
+ system.ads<system-darwin-ppc64.ads
+ SO_OPTS += -m64
+ else
+ LIBGNAT_TARGET_PAIRS += \
+ system.ads<system-darwin-ppc.ads
+ endif
+ endif
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-darwin.adb \
+ indepsw.adb<indepsw-darwin.adb
+
+ EH_MECHANISM=-gcc
+ GNATLIB_SHARED = gnatlib-shared-darwin
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+ soext = .dylib
+ GCC_LINK_FLAGS=-static-libstdc++
+endif
+
+# ARM Nucleus
+ifeq ($(strip $(filter-out arm nucleus%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ system.ads<system-nucleus-arm.ads \
+ a-numaux.ads<a-numaux-vxworks.ads \
+ a-intnam.ads<a-intnam-nucleus.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-osinte.ads<s-osinte-nucleus.ads \
+ s-osinte.adb<s-osinte-nucleus.adb \
+ s-osprim.adb<s-osprim-nucleus.adb \
+ s-parame.adb<s-parame-vxworks.adb \
+ s-taprop.adb<s-taprop-posix.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ s-tpopsp.adb<s-tpopsp-posix.adb \
+ $(DUMMY_SOCKETS_TARGET_PAIRS)
+
+ LIBRARY_VERSION := $(LIB_VERSION)
+ GNATRTL_SOCKETS_OBJS =
+endif
+
+ifeq ($(EH_MECHANISM),-gcc)
+ LIBGNAT_TARGET_PAIRS += \
+ a-exexpr.adb<a-exexpr-gcc.adb \
+ s-excmac.ads<s-excmac-gcc.ads
+ EXTRA_LIBGNAT_OBJS+=raise-gcc.o
+ EXTRA_GNATRTL_NONTASKING_OBJS+=g-cppexc.o s-excmac.o
+endif
+
+# Use the Ada 2005 version of Ada.Exceptions by default, unless specified
+# explicitly already. The base files (a-except.ad?) are used only for building
+# the compiler and other basic tools.
+# These base versions lack Ada 2005 additions which would cause bootstrap
+# problems if included in the compiler and other basic tools.
+
+ifeq ($(filter a-except%,$(LIBGNAT_TARGET_PAIRS)),)
+ LIBGNAT_TARGET_PAIRS += \
+ a-except.ads<a-except-2005.ads \
+ a-except.adb<a-except-2005.adb
+endif
+
+# LIBGNAT_SRCS is the list of all C files (including headers) of the runtime
+# library. LIBGNAT_OBJS is the list of object files for libgnat.
+# thread.c is special as put into GNATRTL_TASKING_OBJS by Makefile.rtl
+LIBGNAT_OBJS = adadecode.o adaint.o argv.o aux-io.o \
+ cal.o cio.o cstreams.o ctrl_c.o \
+ env.o errno.o exit.o expect.o final.o \
+ init.o initialize.o locales.o mkdir.o \
+ raise.o seh_init.o socket.o sysdep.o \
+ targext.o terminals.o tracebak.o \
+ $(EXTRA_LIBGNAT_OBJS)
+
+# NOTE ??? - when the -I option for compiling Ada code is made to work,
+# the library installation will change and there will be a
+# GNAT_RTL_SRCS. Right now we count on being able to build GNATRTL_OBJS
+# from ADA_INCLUDE_SRCS.
+
+LIBGNAT_SRCS = $(patsubst %.o,%.c,$(LIBGNAT_OBJS)) \
+ adadecode.h adaint.h env.h gsocket.h raise.h \
+ tb-gcc.c thread.c $(EXTRA_LIBGNAT_SRCS)
+
+# GNATRTL_NONTASKING_OBJS and GNATRTL_TASKING_OBJS can be found in
+# the following include file:
+
+include $(fsrcdir)/ada/Makefile.rtl
+
+# memtrack.o is special as not put into libgnat.
+GNATRTL_OBJS = $(GNATRTL_NONTASKING_OBJS) $(GNATRTL_TASKING_OBJS) \
+ memtrack.o
+
+# Run time source files
+ADA_INCLUDE_SRCS =\
+ ada.ads calendar.ads directio.ads gnat.ads interfac.ads ioexcept.ads \
+ machcode.ads text_io.ads unchconv.ads unchdeal.ads \
+ sequenio.ads system.ads memtrack.adb \
+ a-[a-o]*.adb a-[p-z]*.adb a-[a-o]*.ads a-[p-z]*.ads g-*.ad? i-*.ad? \
+ s-[a-o]*.adb s-[p-z]*.adb s-[a-o]*.ads s-[p-z]*.ads
+
+# Files that are in ADA_INCLUDE_SRCS but not in all configurations.
+# They will be removed from the run time if not used.
+ADA_EXCLUDE_SRCS =\
+ s-bb.ads \
+ s-bbbosu.ads s-bbcaco.ads s-bbcppr.ads s-bbexti.adb s-bbexti.ads \
+ s-bbinte.adb s-bbinte.ads s-bbprot.adb s-bbprot.ads s-bbsle3.ads \
+ s-bbsuer.ads s-bbsule.ads s-bbthqu.adb s-bbthqu.ads s-bbthre.adb \
+ s-bbthre.ads s-bbtiev.adb s-bbtiev.ads s-bbtime.adb s-bbtime.ads \
+ s-bcprmu.adb s-bcprmu.ads s-btstch.adb s-btstch.ads \
+ s-init.ads s-init.adb \
+ s-po32gl.adb s-po32gl.ads \
+ s-stache.adb s-stache.ads \
+ s-thread.ads \
+ s-vxwext.adb s-vxwext.ads \
+ s-win32.ads s-winext.ads \
+ g-regist.adb g-regist.ads g-sse.ads g-ssvety.ads \
+ i-vxwoio.adb i-vxwoio.ads i-vxwork.ads \
+ g-allein.ads g-alleve.ads g-altcon.ads g-alveop.adb g-alvety.ads \
+ g-alleve.adb g-altcon.adb g-altive.ads g-alveop.ads g-alvevi.ads
+
+# ADA_EXCLUDE_SRCS without the sources used by the target
+ADA_EXCLUDE_FILES=$(filter-out \
+ $(patsubst %$(objext),%.ads,$(GNATRTL_OBJS)) \
+ $(patsubst %$(objext),%.adb,$(GNATRTL_OBJS)), \
+ $(ADA_EXCLUDE_SRCS))
+
+LIBGNAT=../$(RTSDIR)/libgnat.a
+
+TOOLS_FLAGS_TO_PASS= \
+ "CC=$(CC)" \
+ "CFLAGS=$(CFLAGS)" \
+ "LDFLAGS=$(LDFLAGS)" \
+ "ADAFLAGS=$(ADAFLAGS)" \
+ "INCLUDES=$(INCLUDES_FOR_SUBDIR)"\
+ "ADA_INCLUDES=$(ADA_INCLUDES) $(ADA_INCLUDES_FOR_SUBDIR)"\
+ "libsubdir=$(libsubdir)" \
+ "exeext=$(exeext)" \
+ "fsrcdir=$(fsrcdir)" \
+ "srcdir=$(fsrcdir)" \
+ "TOOLS_LIBS=$(TOOLS_LIBS) $(TGT_LIB)" \
+ "GNATMAKE=$(GNATMAKE)" \
+ "GNATLINK=$(GNATLINK)" \
+ "GNATBIND=$(GNATBIND)"
+
+GCC_LINK=$(CXX) $(GCC_LINK_FLAGS) $(ADA_INCLUDES) $(LDFLAGS)
+
+# Build directory for the tools. Let's copy the target-dependent
+# sources using the same mechanism as for gnatlib. The other sources are
+# accessed using the vpath directive below
+# Note: dummy target, stamp-tools is mainly handled by gnattools.
+
+../stamp-tools:
+ touch ../stamp-tools
+
+# when compiling the tools, the runtime has to be first on the path so that
+# it hides the runtime files lying with the rest of the sources
+ifeq ($(TOOLSCASE),native)
+ vpath %.ads ../$(RTSDIR) ../
+ vpath %.adb ../$(RTSDIR) ../
+ vpath %.c ../$(RTSDIR) ../
+ vpath %.h ../$(RTSDIR) ../
+endif
+
+# in the cross tools case, everything is compiled with the native
+# gnatmake/link. Therefore only -I needs to be modified in ADA_INCLUDES
+ifeq ($(TOOLSCASE),cross)
+ vpath %.ads ../
+ vpath %.adb ../
+ vpath %.c ../
+ vpath %.h ../
+endif
+
+# gnatmake/link tools cannot always be built with gnatmake/link for bootstrap
+# reasons: gnatmake should be built with a recent compiler, a recent compiler
+# may not generate ALI files compatible with an old gnatmake so it is important
+# to be able to build gnatmake without a version of gnatmake around. Once
+# everything has been compiled once, gnatmake can be recompiled with itself
+# (see target gnattools1-re)
+gnattools1: ../stamp-tools ../stamp-gnatlib-$(RTSDIR)
+ $(MAKE) -C tools -f ../Makefile $(TOOLS_FLAGS_TO_PASS) \
+ TOOLSCASE=native \
+ ../../gnatmake$(exeext) ../../gnatlink$(exeext)
+
+# gnatmake/link can be built with recent gnatmake/link if they are available.
+# This is especially convenient for building cross tools or for rebuilding
+# the tools when the original bootstrap has already be done.
+gnattools1-re: ../stamp-tools
+ $(MAKE) -C tools -f ../Makefile $(TOOLS_FLAGS_TO_PASS) \
+ TOOLSCASE=cross INCLUDES="" gnatmake-re gnatlink-re
+
+# these tools are built with gnatmake & are common to native and cross
+gnattools2: ../stamp-tools
+ $(MAKE) -C tools -f ../Makefile $(TOOLS_FLAGS_TO_PASS) \
+ TOOLSCASE=native common-tools $(EXTRA_GNATTOOLS)
+
+# those tools are only built for the cross version
+gnattools4: ../stamp-tools
+ifeq ($(ENABLE_VXADDR2LINE),true)
+ $(MAKE) -C tools -f ../Makefile $(TOOLS_FLAGS_TO_PASS) \
+ TOOLSCASE=cross top_buildir=../../.. \
+ ../../vxaddr2line$(exeext)
+endif
+
+common-tools: ../stamp-tools
+ $(GNATMAKE) -j0 -c -b $(ADA_INCLUDES) \
+ --GNATBIND="$(GNATBIND)" --GCC="$(CC) $(ALL_ADAFLAGS)" \
+ gnatchop gnatcmd gnatkr gnatls gnatprep gnatxref gnatfind gnatname \
+ gnatclean -bargs $(ADA_INCLUDES) $(GNATBIND_FLAGS)
+ $(GNATLINK) -v gnatcmd -o ../../gnat$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+ $(GNATLINK) -v gnatchop -o ../../gnatchop$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+ $(GNATLINK) -v gnatkr -o ../../gnatkr$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+ $(GNATLINK) -v gnatls -o ../../gnatls$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+ $(GNATLINK) -v gnatprep -o ../../gnatprep$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+ $(GNATLINK) -v gnatxref -o ../../gnatxref$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+ $(GNATLINK) -v gnatfind -o ../../gnatfind$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+ $(GNATLINK) -v gnatname -o ../../gnatname$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+ $(GNATLINK) -v gnatclean -o ../../gnatclean$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+
+../../gnatsym$(exeext): ../stamp-tools
+ $(GNATMAKE) -c $(ADA_INCLUDES) gnatsym --GCC="$(CC) $(ALL_ADAFLAGS)"
+ $(GNATBIND) $(ADA_INCLUDES) $(GNATBIND_FLAGS) gnatsym
+ $(GNATLINK) -v gnatsym -o $@ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+
+../../gnatdll$(exeext): ../stamp-tools
+ $(GNATMAKE) -c $(ADA_INCLUDES) gnatdll --GCC="$(CC) $(ALL_ADAFLAGS)"
+ $(GNATBIND) $(ADA_INCLUDES) $(GNATBIND_FLAGS) gnatdll
+ $(GNATLINK) -v gnatdll -o $@ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+
+../../vxaddr2line$(exeext): ../stamp-tools
+ $(GNATMAKE) -c $(ADA_INCLUDES) vxaddr2line --GCC="$(CC) $(ALL_ADAFLAGS)"
+ $(GNATBIND) $(ADA_INCLUDES) $(GNATBIND_FLAGS) vxaddr2line
+ $(GNATLINK) -v vxaddr2line -o $@ --GCC="$(GCC_LINK)" ../targext.o $(CLIB)
+
+gnatmake-re: ../stamp-tools
+ $(GNATMAKE) -j0 $(ADA_INCLUDES) -u sdefault --GCC="$(CC) $(MOST_ADA_FLAGS)"
+ $(GNATMAKE) -j0 -c $(ADA_INCLUDES) gnatmake --GCC="$(CC) $(ALL_ADAFLAGS)"
+ $(GNATBIND) $(ADA_INCLUDES) $(GNATBIND_FLAGS) gnatmake
+ $(GNATLINK) -v gnatmake -o ../../gnatmake$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+
+# Note the use of the "mv" command in order to allow gnatlink to be linked with
+# with the former version of gnatlink itself which cannot override itself.
+# gnatlink-re cannot be run at the same time as gnatmake-re, hence the
+# dependency
+gnatlink-re: ../stamp-tools gnatmake-re
+ $(GNATMAKE) -j0 -c $(ADA_INCLUDES) gnatlink --GCC="$(CC) $(ALL_ADAFLAGS)"
+ $(GNATBIND) $(ADA_INCLUDES) $(GNATBIND_FLAGS) gnatlink
+ $(GNATLINK) -v gnatlink -o ../../gnatlinknew$(exeext) \
+ --GCC="$(GCC_LINK)" $(TOOLS_LIBS)
+ $(MV) ../../gnatlinknew$(exeext) ../../gnatlink$(exeext)
+
+# Needs to be built with CC=gcc
+# Since the RTL should be built with the latest compiler, remove the
+# stamp target in the parent directory whenever gnat1 is rebuilt
+
+# Likewise for the tools
+../../gnatmake$(exeext): $(P) b_gnatm.o $(GNATMAKE_OBJS)
+ +$(GCC_LINK) $(ALL_CFLAGS) -o $@ b_gnatm.o $(GNATMAKE_OBJS) $(TOOLS_LIBS)
+
+../../gnatlink$(exeext): $(P) b_gnatl.o $(GNATLINK_OBJS)
+ +$(GCC_LINK) $(ALL_CFLAGS) -o $@ b_gnatl.o $(GNATLINK_OBJS) $(TOOLS_LIBS)
+
+../stamp-gnatlib-$(RTSDIR):
+ @if [ ! -f stamp-gnatlib-$(RTSDIR) ] ; \
+ then \
+ $(ECHO) You must first build the GNAT library: make gnatlib; \
+ false; \
+ else \
+ true; \
+ fi
+
+install-gnatlib: ../stamp-gnatlib-$(RTSDIR)
+# Create the directory before deleting it, in case the directory is
+# a list of directories (as it may be on VMS). This ensures we are
+# deleting the right one.
+ -$(MKDIR) $(DESTDIR)$(ADA_RTL_OBJ_DIR)
+ -$(MKDIR) $(DESTDIR)$(ADA_INCLUDE_DIR)
+ $(RMDIR) $(DESTDIR)$(ADA_RTL_OBJ_DIR)
+ $(RMDIR) $(DESTDIR)$(ADA_INCLUDE_DIR)
+ -$(MKDIR) $(DESTDIR)$(ADA_RTL_OBJ_DIR)
+ -$(MKDIR) $(DESTDIR)$(ADA_INCLUDE_DIR)
+ for file in $(RTSDIR)/*.ali; do \
+ $(INSTALL_DATA_DATE) $$file $(DESTDIR)$(ADA_RTL_OBJ_DIR); \
+ done
+ -cd $(RTSDIR); for file in *$(arext);do \
+ $(INSTALL_DATA) $$file $(DESTDIR)$(ADA_RTL_OBJ_DIR); \
+ $(RANLIB_FOR_TARGET) $(DESTDIR)$(ADA_RTL_OBJ_DIR)/$$file; \
+ done
+ -$(foreach file, $(EXTRA_ADALIB_FILES), \
+ $(INSTALL_DATA_DATE) $(RTSDIR)/$(file) $(DESTDIR)$(ADA_RTL_OBJ_DIR) && \
+ ) true
+# Install the shared libraries, if any, using $(INSTALL) instead
+# of $(INSTALL_DATA). The latter may force a mode inappropriate
+# for shared libraries on some targets, e.g. on HP-UX where the x
+# permission is required.
+# Also install the .dSYM directories if they exist (these directories
+# contain the debug information for the shared libraries on darwin)
+ for file in gnat gnarl; do \
+ if [ -f $(RTSDIR)/lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext) ]; then \
+ $(INSTALL) $(RTSDIR)/lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(DESTDIR)$(ADA_RTL_OBJ_DIR); \
+ fi; \
+ if [ -f $(RTSDIR)/lib$${file}$(soext) ]; then \
+ $(LN_S) lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(DESTDIR)$(ADA_RTL_OBJ_DIR)/lib$${file}$(soext); \
+ fi; \
+ if [ -d $(RTSDIR)/lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext).dSYM ]; then \
+ $(CP) -r $(RTSDIR)/lib$${file}$(hyphen)$(LIBRARY_VERSION)$(soext).dSYM \
+ $(DESTDIR)$(ADA_RTL_OBJ_DIR); \
+ fi; \
+ done
+# This copy must be done preserving the date on the original file.
+ for file in $(RTSDIR)/*.ad?; do \
+ $(INSTALL_DATA_DATE) $$file $(DESTDIR)$(ADA_INCLUDE_DIR); \
+ done
+ cd $(DESTDIR)$(ADA_INCLUDE_DIR); $(CHMOD) a-wx *.adb
+ cd $(DESTDIR)$(ADA_INCLUDE_DIR); $(CHMOD) a-wx *.ads
+
+../stamp-gnatlib2-$(RTSDIR):
+ $(RM) $(RTSDIR)/s-*.ali
+ $(RM) $(RTSDIR)/s-*$(objext)
+ $(RM) $(RTSDIR)/a-*.ali
+ $(RM) $(RTSDIR)/a-*$(objext)
+ $(RM) $(RTSDIR)/*.ali
+ $(RM) $(RTSDIR)/*$(objext)
+ $(RM) $(RTSDIR)/*$(arext)
+ $(RM) $(RTSDIR)/*$(soext)
+ touch ../stamp-gnatlib2-$(RTSDIR)
+ $(RM) ../stamp-gnatlib-$(RTSDIR)
+
+../stamp-gnatlib1-$(RTSDIR): Makefile ../stamp-gnatlib2-$(RTSDIR)
+ $(RMDIR) $(RTSDIR)
+ $(MKDIR) $(RTSDIR)
+ $(CHMOD) u+w $(RTSDIR)
+# Copy target independent sources
+ $(foreach f,$(ADA_INCLUDE_SRCS) $(LIBGNAT_SRCS), \
+ $(LN_S) $(fsrcpfx)ada/$(f) $(RTSDIR) ;) true
+# Remove files not used
+ $(RM) $(patsubst %,$(RTSDIR)/%,$(ADA_EXCLUDE_FILES))
+# Remove files to be replaced by target dependent sources
+ $(RM) $(foreach PAIR,$(LIBGNAT_TARGET_PAIRS), \
+ $(RTSDIR)/$(word 1,$(subst <, ,$(PAIR))))
+ for f in $(RTSDIR)/*-*-*.ads $(RTSDIR)/*-*-*.adb; do \
+ case "$$f" in \
+ $(RTSDIR)/s-stratt-*) ;; \
+ *) $(RM) $$f ;; \
+ esac; \
+ done
+# Copy new target dependent sources
+ $(foreach PAIR,$(LIBGNAT_TARGET_PAIRS), \
+ $(LN_S) $(fsrcpfx)ada/$(word 2,$(subst <, ,$(PAIR))) \
+ $(RTSDIR)/$(word 1,$(subst <, ,$(PAIR)));)
+# Copy tsystem.h
+ $(CP) $(srcdir)/tsystem.h $(RTSDIR)
+ $(RM) ../stamp-gnatlib-$(RTSDIR)
+ touch ../stamp-gnatlib1-$(RTSDIR)
+
+ifeq ($(strip $(filter-out alpha64 ia64 dec hp vms% openvms% alphavms%, $(host_cpu) $(host_os))),)
+OSCONS_CPP=../../$(DECC) -E /comment=as_is -DNATIVE \
+ -DTARGET='""$(target)""' $(fsrcpfx)ada/s-oscons-tmplt.c
+
+OSCONS_EXTRACT=../../$(DECC) -DNATIVE \
+ -DTARGET='""$(target)""' $(fsrcpfx)ada/s-oscons-tmplt.c ; \
+ ld -o s-oscons-tmplt.exe s-oscons-tmplt.obj; \
+ ./s-oscons-tmplt.exe > s-oscons-tmplt.s
+
+else
+# GCC_FOR_TARGET has paths relative to the gcc directory, so we need to adjust
+# for running it from $(RTSDIR)
+OSCONS_CC=`echo "$(GCC_FOR_TARGET)" \
+ | sed -e 's^\./xgcc^../../xgcc^' -e 's^-B./^-B../../^'`
+OSCONS_CPP=$(OSCONS_CC) $(GNATLIBCFLAGS) -E -C \
+ -DTARGET=\"$(target)\" $(fsrcpfx)ada/s-oscons-tmplt.c > s-oscons-tmplt.i
+OSCONS_EXTRACT=$(OSCONS_CC) $(GNATLIBCFLAGS) -S s-oscons-tmplt.i
+endif
+
+./bldtools/oscons/xoscons: xoscons.adb xutil.ads xutil.adb
+ -$(MKDIR) ./bldtools/oscons
+ $(RM) $(addprefix ./bldtools/oscons/,$(notdir $^))
+ $(CP) $^ ./bldtools/oscons
+ (cd ./bldtools/oscons ; gnatmake -q xoscons)
+
+$(RTSDIR)/s-oscons.ads: ../stamp-gnatlib1-$(RTSDIR) s-oscons-tmplt.c gsocket.h ./bldtools/oscons/xoscons
+ $(RM) $(RTSDIR)/s-oscons-tmplt.i $(RTSDIR)/s-oscons-tmplt.s
+ (cd $(RTSDIR) ; \
+ $(OSCONS_CPP) ; \
+ $(OSCONS_EXTRACT) ; \
+ ../bldtools/oscons/xoscons s-oscons)
+
+# Don't use semicolon separated shell commands that involve list expansions.
+# The semicolon triggers a call to DCL on VMS and DCL can't handle command
+# line lengths in excess of 256 characters.
+# Example: cd $(RTSDIR); ar rc libfoo.a $(LONG_LIST_OF_OBJS)
+# is guaranteed to overflow the buffer.
+
+gnatlib: ../stamp-gnatlib1-$(RTSDIR) ../stamp-gnatlib2-$(RTSDIR) $(RTSDIR)/s-oscons.ads
+# C files
+ $(MAKE) -C $(RTSDIR) \
+ CC="`echo \"$(GCC_FOR_TARGET)\" \
+ | sed -e 's,\./xgcc,../../xgcc,' -e 's,-B\./,-B../../,'`" \
+ INCLUDES="$(INCLUDES_FOR_SUBDIR) -I./../.." \
+ CFLAGS="$(GNATLIBCFLAGS_FOR_C)" \
+ FORCE_DEBUG_ADAFLAGS="$(FORCE_DEBUG_ADAFLAGS)" \
+ srcdir=$(fsrcdir) \
+ -f ../Makefile $(LIBGNAT_OBJS)
+# Ada files
+ $(MAKE) -C $(RTSDIR) \
+ CC="`echo \"$(GCC_FOR_TARGET)\" \
+ | sed -e 's,\./xgcc,../../xgcc,' -e 's,-B\./,-B../../,'`" \
+ ADA_INCLUDES="" \
+ CFLAGS="$(GNATLIBCFLAGS)" \
+ ADAFLAGS="$(GNATLIBFLAGS)" \
+ FORCE_DEBUG_ADAFLAGS="$(FORCE_DEBUG_ADAFLAGS)" \
+ srcdir=$(fsrcdir) \
+ -f ../Makefile $(GNATRTL_OBJS)
+ $(RM) $(RTSDIR)/libgnat$(arext) $(RTSDIR)/libgnarl$(arext)
+ $(AR_FOR_TARGET) $(AR_FLAGS) $(RTSDIR)/libgnat$(arext) \
+ $(addprefix $(RTSDIR)/,$(GNATRTL_NONTASKING_OBJS) $(LIBGNAT_OBJS))
+ $(RANLIB_FOR_TARGET) $(RTSDIR)/libgnat$(arext)
+ $(AR_FOR_TARGET) $(AR_FLAGS) $(RTSDIR)/libgnarl$(arext) \
+ $(addprefix $(RTSDIR)/,$(GNATRTL_TASKING_OBJS))
+ $(RANLIB_FOR_TARGET) $(RTSDIR)/libgnarl$(arext)
+ ifeq ($(GMEM_LIB),gmemlib)
+ $(AR_FOR_TARGET) $(AR_FLAGS) $(RTSDIR)/libgmem$(arext) \
+ $(RTSDIR)/memtrack.o
+ $(RANLIB_FOR_TARGET) $(RTSDIR)/libgmem$(arext)
+ endif
+ $(CHMOD) a-wx $(RTSDIR)/*.ali
+ touch ../stamp-gnatlib-$(RTSDIR)
+
+# Warning: this target assumes that LIBRARY_VERSION has been set correctly.
+gnatlib-shared-default:
+ $(MAKE) $(FLAGS_TO_PASS) \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS) $(PICFLAG_FOR_TARGET)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C) $(PICFLAG_FOR_TARGET)" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ gnatlib
+ $(RM) $(RTSDIR)/libgna*$(soext)
+ cd $(RTSDIR); `echo "$(GCC_FOR_TARGET)" \
+ | sed -e 's,\./xgcc,../../xgcc,' -e 's,-B\./,-B../../,'` -shared $(GNATLIBCFLAGS) \
+ $(PICFLAG_FOR_TARGET) \
+ -o libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(GNATRTL_NONTASKING_OBJS) $(LIBGNAT_OBJS) \
+ $(SO_OPTS)libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(MISCLIB) -lm
+ cd $(RTSDIR); `echo "$(GCC_FOR_TARGET)" \
+ | sed -e 's,\./xgcc,../../xgcc,' -e 's,-B\./,-B../../,'` -shared $(GNATLIBCFLAGS) \
+ $(PICFLAG_FOR_TARGET) \
+ -o libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(GNATRTL_TASKING_OBJS) \
+ $(SO_OPTS)libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(THREADSLIB)
+ cd $(RTSDIR); $(LN_S) libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ libgnat$(soext)
+ cd $(RTSDIR); $(LN_S) libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ libgnarl$(soext)
+
+gnatlib-shared-dual:
+ $(MAKE) $(FLAGS_TO_PASS) \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C)" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ gnatlib-shared-default
+ $(MV) $(RTSDIR)/libgna*$(soext) .
+ $(RM) ../stamp-gnatlib2-$(RTSDIR)
+ $(MAKE) $(FLAGS_TO_PASS) \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C)" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ gnatlib
+ $(MV) libgna*$(soext) $(RTSDIR)
+
+gnatlib-shared-dual-win32:
+ $(MAKE) $(FLAGS_TO_PASS) \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C)" \
+ PICFLAG_FOR_TARGET="$(PICFLAG_FOR_TARGET)" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ gnatlib-shared-win32
+ $(MV) $(RTSDIR)/libgna*$(soext) .
+ $(RM) ../stamp-gnatlib2-$(RTSDIR)
+ $(MAKE) $(FLAGS_TO_PASS) \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C)" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ gnatlib
+ $(MV) libgna*$(soext) $(RTSDIR)
+
+# ??? we need to add the option to support auto-import of arrays/records to
+# the GNATLIBFLAGS when this will be supported by GNAT. At this point we will
+# use the gnatlib-shared-dual-win32 target to build the GNAT runtimes on
+# Windows.
+gnatlib-shared-win32:
+ $(MAKE) $(FLAGS_TO_PASS) \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS) $(PICFLAG_FOR_TARGET)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C) $(PICFLAG_FOR_TARGET)" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ gnatlib
+ $(RM) $(RTSDIR)/libgna*$(soext)
+ cd $(RTSDIR); `echo "$(GCC_FOR_TARGET)" \
+ | sed -e 's,\./xgcc,../../xgcc,' -e 's,-B\./,-B../../,'` -shared -shared-libgcc \
+ $(PICFLAG_FOR_TARGET) \
+ -o libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(GNATRTL_NONTASKING_OBJS) $(LIBGNAT_OBJS) \
+ $(SO_OPTS)libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) $(MISCLIB)
+ cd $(RTSDIR); `echo "$(GCC_FOR_TARGET)" \
+ | sed -e 's,\./xgcc,../../xgcc,' -e 's,-B\./,-B../../,'` -shared -shared-libgcc \
+ $(PICFLAG_FOR_TARGET) \
+ -o libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(GNATRTL_TASKING_OBJS) \
+ $(SO_OPTS)libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(THREADSLIB) -Wl,libgnat$(hyphen)$(LIBRARY_VERSION)$(soext)
+
+gnatlib-shared-darwin:
+ $(MAKE) $(FLAGS_TO_PASS) \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS) $(PICFLAG_FOR_TARGET)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C) $(PICFLAG_FOR_TARGET) -fno-common" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ gnatlib
+ $(RM) $(RTSDIR)/libgnat$(soext) $(RTSDIR)/libgnarl$(soext)
+ cd $(RTSDIR); `echo "$(GCC_FOR_TARGET)" \
+ | sed -e 's,\./xgcc,../../xgcc,' -e 's,-B\./,-B../../,'` -dynamiclib $(PICFLAG_FOR_TARGET) \
+ -o libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(GNATRTL_NONTASKING_OBJS) $(LIBGNAT_OBJS) \
+ $(SO_OPTS) \
+ -Wl,-install_name,@rpath/libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(MISCLIB)
+ cd $(RTSDIR); `echo "$(GCC_FOR_TARGET)" \
+ | sed -e 's,\./xgcc,../../xgcc,' -e 's,-B\./,-B../../,'` -dynamiclib $(PICFLAG_FOR_TARGET) \
+ -o libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(GNATRTL_TASKING_OBJS) \
+ $(SO_OPTS) \
+ -Wl,-install_name,@rpath/libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ $(THREADSLIB) -Wl,libgnat$(hyphen)$(LIBRARY_VERSION)$(soext)
+ cd $(RTSDIR); $(LN_S) libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ libgnat$(soext)
+ cd $(RTSDIR); $(LN_S) libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ libgnarl$(soext)
+ cd $(RTSDIR); dsymutil libgnat$(hyphen)$(LIBRARY_VERSION)$(soext)
+ cd $(RTSDIR); dsymutil libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext)
+
+gnatlib-shared-vms:
+ $(MAKE) $(FLAGS_TO_PASS) \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C)" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ gnatlib
+ $(RM) $(RTSDIR)/libgna*$(soext)
+ cd $(RTSDIR) && \
+ ../../gnatsym -s SYMVEC_$$$$.opt \
+ $(LIBGNAT_OBJS) $(GNATRTL_NONTASKING_OBJS) && \
+ ../../xgcc -g -B../../ -shared -shared-libgcc \
+ -o libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) libgnat.a \
+ sys\$$library:trace.exe \
+ --for-linker=/noinform \
+ --for-linker=SYMVEC_$$$$.opt \
+ --for-linker=gsmatch=equal,$(GSMATCH_VERSION)
+ cd $(RTSDIR) && \
+ ../../gnatsym -s SYMVEC_$$$$.opt \
+ $(GNATRTL_TASKING_OBJS) && \
+ ../../xgcc -g -B../../ -shared -shared-libgcc \
+ -o libgnarl$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ libgnarl.a libgnat$(hyphen)$(LIBRARY_VERSION)$(soext) \
+ sys\$$library:trace.exe \
+ --for-linker=/noinform \
+ --for-linker=SYMVEC_$$$$.opt \
+ --for-linker=gsmatch=equal,$(GSMATCH_VERSION)
+
+gnatlib-shared:
+ $(MAKE) $(FLAGS_TO_PASS) \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C)" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ PICFLAG_FOR_TARGET="$(PICFLAG_FOR_TARGET)" \
+ $(GNATLIB_SHARED)
+
+# When building a SJLJ runtime for VxWorks, in addition to forcing
+# ZCX_By_default to False, we need to ensure that extra linker options
+# are not passed to prevent the inclusion of useless objects and
+# potential troubles from the presence of extra symbols and references
+# in some configurations. The inhibition is performed by commenting
+# the pragma instead of deleting the line, as the latter might result
+# in getting multiple blank lines, hence a style check error, as a
+# result.
+gnatlib-sjlj:
+ $(MAKE) $(FLAGS_TO_PASS) EH_MECHANISM="" \
+ THREAD_KIND="$(THREAD_KIND)" ../stamp-gnatlib1-$(RTSDIR)
+ sed -e 's/ZCX_By_Default.*/ZCX_By_Default : constant Boolean := False;/' $(RTSDIR)/system.ads > $(RTSDIR)/s.ads
+ sed -e 's/\(pragma Linker.*crtbe.*\)/-- \1/' $(RTSDIR)/s.ads > $(RTSDIR)/s2.ads
+ $(RM) $(RTSDIR)/s.ads
+ $(MV) $(RTSDIR)/s2.ads $(RTSDIR)/system.ads
+ $(MAKE) $(FLAGS_TO_PASS) \
+ EH_MECHANISM="" \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C)" \
+ FORCE_DEBUG_ADAFLAGS="$(FORCE_DEBUG_ADAFLAGS)" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ PICFLAG_FOR_TARGET="$(PICFLAG_FOR_TARGET)" gnatlib
+
+gnatlib-zcx:
+ $(MAKE) $(FLAGS_TO_PASS) EH_MECHANISM="-gcc" \
+ THREAD_KIND="$(THREAD_KIND)" ../stamp-gnatlib1-$(RTSDIR)
+ sed -e 's/ZCX_By_Default.*/ZCX_By_Default : constant Boolean := True;/' $(RTSDIR)/system.ads > $(RTSDIR)/s.ads
+ $(MV) $(RTSDIR)/s.ads $(RTSDIR)/system.ads
+ $(MAKE) $(FLAGS_TO_PASS) \
+ EH_MECHANISM="-gcc" \
+ GNATLIBFLAGS="$(GNATLIBFLAGS)" \
+ GNATLIBCFLAGS="$(GNATLIBCFLAGS)" \
+ GNATLIBCFLAGS_FOR_C="$(GNATLIBCFLAGS_FOR_C)" \
+ FORCE_DEBUG_ADAFLAGS="$(FORCE_DEBUG_ADAFLAGS)" \
+ MULTISUBDIR="$(MULTISUBDIR)" \
+ THREAD_KIND="$(THREAD_KIND)" \
+ PICFLAG_FOR_TARGET="$(PICFLAG_FOR_TARGET)" gnatlib
+
+# Compiling object files from source files.
+
+# Note that dependencies on obstack.h are not written
+# because that file is not part of GCC.
+# Dependencies on gvarargs.h are not written
+# because all that file does, when not compiling with GCC,
+# is include the system varargs.h.
+
+b_gnatl.adb : $(GNATLINK_OBJS)
+ $(GNATBIND) $(ADA_INCLUDES) -o b_gnatl.adb gnatlink.ali
+
+b_gnatl.o : b_gnatl.adb
+ $(CC) -c $(ALL_ADAFLAGS) $(ADA_INCLUDES) -gnatws -gnatyN \
+ $< $(OUTPUT_OPTION)
+
+b_gnatm.adb : $(GNATMAKE_OBJS)
+ $(GNATBIND) $(ADA_INCLUDES) -o b_gnatm.adb gnatmake.ali
+
+b_gnatm.o : b_gnatm.adb
+ $(CC) -c $(ALL_ADAFLAGS) $(ADA_INCLUDES) -gnatws -gnatyN \
+ $< $(OUTPUT_OPTION)
+
+ADA_INCLUDE_DIR = $(libsubdir)/adainclude
+ADA_RTL_OBJ_DIR = $(libsubdir)/adalib
+
+# Special flags
+
+# force no sibling call optimization on s-traceb.o so the number of stack
+# frames to be skipped when computing a call chain is not modified by
+# optimization.
+
+s-traceb.o : s-traceb.adb s-traceb.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) \
+ $(NO_SIBLING_ADAFLAGS) $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+
+# force debugging information on s-tasdeb.o so that it is always
+# possible to set conditional breakpoints on tasks.
+
+s-tasdeb.o : s-tasdeb.adb s-tasdeb.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) -O0 $(ADA_INCLUDES) \
+ $< $(OUTPUT_OPTION)
+
+# force debugging information on s-vaflop.o so that it is always
+# possible to call the VAX float debug print routines.
+# force at least -O so that the inline assembly works.
+
+s-vaflop.o : s-vaflop.adb s-vaflop.ads
+ $(CC) -c -O $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) $(ADA_INCLUDES) \
+ $< $(OUTPUT_OPTION)
+
+# force no function reordering on a-except.o because of the exclusion bounds
+# mechanism (see the source file for more detailed information).
+# force debugging information on a-except.o so that it is always
+# possible to set conditional breakpoints on exceptions.
+# use -O1 otherwise gdb isn't able to get a full backtrace on mips targets.
+
+a-except.o : a-except.adb a-except.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) -O1 -fno-inline \
+ $(NO_REORDER_ADAFLAGS) $(ADA_INCLUDES) $< $(OUTPUT_OPTION)
+
+# compile s-excdeb.o without optimization and with debug info to let the
+# debugger set breakpoints and inspect subprogram parameters on exception
+# related events.
+
+s-excdeb.o : s-excdeb.adb s-excdeb.ads s-except.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) -O0 $(ADA_INCLUDES) \
+ $< $(OUTPUT_OPTION)
+
+# force debugging information on s-assert.o so that it is always
+# possible to set breakpoint on assert failures.
+
+s-assert.o : s-assert.adb s-assert.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) $(ADA_INCLUDES) \
+ $< $(OUTPUT_OPTION)
+
+# force debugging information on a-tags.o so that the debugger can find
+# the description of Ada.Tags.Type_Specific_Data.
+
+a-tags.o : a-tags.adb a-tags.ads
+ $(CC) -c $(ALL_ADAFLAGS) $(FORCE_DEBUG_ADAFLAGS) $(ADA_INCLUDES) \
+ $< $(OUTPUT_OPTION)
+
+# need to keep the frame pointer in this file to pop the stack properly on
+# some targets.
+tracebak.o : tracebak.c tb-alvms.c tb-alvxw.c tb-gcc.c
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ADA_CFLAGS) $(ALL_CPPFLAGS) \
+ $(INCLUDES) -fno-omit-frame-pointer $< $(OUTPUT_OPTION)
+
+adadecode.o : adadecode.c adadecode.h
+aux-io.o : aux-io.c
+argv.o : argv.c
+cal.o : cal.c
+deftarg.o : deftarg.c
+errno.o : errno.c
+exit.o : adaint.h exit.c
+expect.o : expect.c
+final.o : final.c
+locales.o : locales.c
+mkdir.o : mkdir.c
+socket.o : socket.c gsocket.h
+sysdep.o : sysdep.c
+raise.o : raise.c raise.h
+sigtramp-armvxw.o : sigtramp-armvxw.c sigtramp.h
+sigtramp-ppcvxw.o : sigtramp-ppcvxw.c sigtramp.h
+terminals.o : terminals.c
+vx_stack_info.o : vx_stack_info.c
+
+raise-gcc.o : raise-gcc.c raise.h
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ADA_CFLAGS) \
+ -iquote $(srcdir) -iquote $(ftop_srcdir)/libgcc \
+ $(ALL_CPPFLAGS) $(INCLUDES) $< $(OUTPUT_OPTION)
+
+cio.o : cio.c
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ADA_CFLAGS) \
+ $(ALL_CPPFLAGS) $(INCLUDES) $< $(OUTPUT_OPTION)
+
+init.o : init.c adaint.h raise.h
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ADA_CFLAGS) \
+ $(ALL_CPPFLAGS) $(INCLUDES) $< $(OUTPUT_OPTION)
+
+initialize.o : initialize.c raise.h
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ADA_CFLAGS) \
+ $(ALL_CPPFLAGS) $(INCLUDES) $< $(OUTPUT_OPTION)
+
+link.o : link.c
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ADA_CFLAGS) \
+ $(ALL_CPPFLAGS) $(INCLUDES_FOR_SUBDIR) \
+ $< $(OUTPUT_OPTION)
+
+targext.o : targext.c
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ADA_CFLAGS) \
+ -iquote $(srcdir) \
+ $(ALL_CPPFLAGS) $(INCLUDES_FOR_SUBDIR) \
+ $< $(OUTPUT_OPTION)
+
+# In GNU Make, ignore whether `stage*' exists.
+.PHONY: stage1 stage2 stage3 stage4 clean realclean TAGS bootstrap
+.PHONY: risky-stage1 risky-stage2 risky-stage3 risky-stage4
+
+force:
diff --git a/gcc-4.9/gcc/ada/gcc-interface/ada-tree.def b/gcc-4.9/gcc/ada/gcc-interface/ada-tree.def
new file mode 100644
index 000000000..93967b58c
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/ada-tree.def
@@ -0,0 +1,74 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * GNAT-SPECIFIC GCC TREE CODES *
+ * *
+ * Specification *
+ * *
+ * Copyright (C) 1992-2009, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License along with GCC; see the file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+/* A type that is an unconstrained array. This node is never passed to GCC.
+ TREE_TYPE is the type of the fat pointer and TYPE_OBJECT_RECORD_TYPE is
+ the type of a record containing the template and data. */
+DEFTREECODE (UNCONSTRAINED_ARRAY_TYPE, "unconstrained_array_type", tcc_type, 0)
+
+/* A reference to an unconstrained array. This node only exists as an
+ intermediate node during the translation of a GNAT tree to a GCC tree;
+ it is never passed to GCC. The only field used is operand 0, which
+ is the fat pointer object. */
+DEFTREECODE (UNCONSTRAINED_ARRAY_REF, "unconstrained_array_ref",
+ tcc_reference, 1)
+
+/* An expression that returns an RTL suitable for its type. Operand 0
+ is an expression to be evaluated for side effects only. */
+DEFTREECODE (NULL_EXPR, "null_expr", tcc_expression, 1)
+
+/* Same as PLUS_EXPR, except that no modulo reduction is applied.
+ This is used for loops and never shows up in the tree. */
+DEFTREECODE (PLUS_NOMOD_EXPR, "plus_nomod_expr", tcc_binary, 2)
+
+/* Same as MINUS_EXPR, except that no modulo reduction is applied.
+ This is used for loops and never shows up in the tree. */
+DEFTREECODE (MINUS_NOMOD_EXPR, "minus_nomod_expr", tcc_binary, 2)
+
+/* Same as ADDR_EXPR, except that if the operand represents a bit field,
+ return the address of the byte containing the bit. This is used
+ for the Address attribute and never shows up in the tree. */
+DEFTREECODE (ATTR_ADDR_EXPR, "attr_addr_expr", tcc_reference, 1)
+
+/* Here are the tree codes for the statement types known to Ada. These
+ must be at the end of this file to allow IS_ADA_STMT to work. */
+
+/* This is how record_code_position and insert_code_for work. The former
+ makes this tree node, whose operand is a statement. The latter inserts
+ the actual statements into this node. Gimplification consists of
+ just returning the inner statement. */
+DEFTREECODE (STMT_STMT, "stmt_stmt", tcc_statement, 1)
+
+/* A loop. LOOP_STMT_COND is the test to exit the loop. LOOP_STMT_UPDATE
+ is the statement to update the loop iteration variable at the continue
+ point. LOOP_STMT_BODY are the statements in the body of the loop. And
+ LOOP_STMT_LABEL points to the LABEL_DECL of the end label of the loop. */
+DEFTREECODE (LOOP_STMT, "loop_stmt", tcc_statement, 4)
+
+/* Conditionally exit a loop. EXIT_STMT_COND is the condition, which, if
+ true, will cause the loop to be exited. If no condition is specified,
+ the loop is unconditionally exited. EXIT_STMT_LABEL is the end label
+ corresponding to the loop to exit. */
+DEFTREECODE (EXIT_STMT, "exit_stmt", tcc_statement, 2)
diff --git a/gcc-4.9/gcc/ada/gcc-interface/ada-tree.h b/gcc-4.9/gcc/ada/gcc-interface/ada-tree.h
new file mode 100644
index 000000000..c1b45effc
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/ada-tree.h
@@ -0,0 +1,513 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * A D A - T R E E *
+ * *
+ * C Header File *
+ * *
+ * Copyright (C) 1992-2013, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License along with GCC; see the file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+/* The resulting tree type. */
+union GTY((desc ("0"),
+ chain_next ("CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL")))
+ lang_tree_node
+{
+ union tree_node GTY((tag ("0"),
+ desc ("tree_node_structure (&%h)"))) generic;
+};
+
+/* Ada uses the lang_decl and lang_type fields to hold a tree.
+
+ FIXME: the variable_size annotation here is needed because these types are
+ variable-sized in some other front-ends. Due to gengtype deficiency, the
+ GTY options of such types have to agree across all front-ends. */
+struct GTY((variable_size)) lang_type { tree t; };
+struct GTY((variable_size)) lang_decl { tree t; };
+
+/* Macros to get and set the tree in TYPE_LANG_SPECIFIC. */
+#define GET_TYPE_LANG_SPECIFIC(NODE) \
+ (TYPE_LANG_SPECIFIC (NODE) ? TYPE_LANG_SPECIFIC (NODE)->t : NULL_TREE)
+
+#define SET_TYPE_LANG_SPECIFIC(NODE, X) \
+do { \
+ tree tmp = (X); \
+ if (!TYPE_LANG_SPECIFIC (NODE)) \
+ TYPE_LANG_SPECIFIC (NODE) \
+ = ggc_alloc_lang_type (sizeof (struct lang_type)); \
+ TYPE_LANG_SPECIFIC (NODE)->t = tmp; \
+} while (0)
+
+/* Macros to get and set the tree in DECL_LANG_SPECIFIC. */
+#define GET_DECL_LANG_SPECIFIC(NODE) \
+ (DECL_LANG_SPECIFIC (NODE) ? DECL_LANG_SPECIFIC (NODE)->t : NULL_TREE)
+
+#define SET_DECL_LANG_SPECIFIC(NODE, X) \
+do { \
+ tree tmp = (X); \
+ if (!DECL_LANG_SPECIFIC (NODE)) \
+ DECL_LANG_SPECIFIC (NODE) \
+ = ggc_alloc_lang_decl (sizeof (struct lang_decl)); \
+ DECL_LANG_SPECIFIC (NODE)->t = tmp; \
+} while (0)
+
+
+/* Flags added to type nodes. */
+
+/* For RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE, nonzero if this is a
+ record being used as a fat pointer (only true for RECORD_TYPE). */
+#define TYPE_FAT_POINTER_P(NODE) \
+ TYPE_LANG_FLAG_0 (RECORD_OR_UNION_CHECK (NODE))
+
+#define TYPE_IS_FAT_POINTER_P(NODE) \
+ (TREE_CODE (NODE) == RECORD_TYPE && TYPE_FAT_POINTER_P (NODE))
+
+/* For integral types and array types, nonzero if this is a packed array type
+ used for bit-packed types. Such types should not be extended to a larger
+ size or validated against a specified size. */
+#define TYPE_PACKED_ARRAY_TYPE_P(NODE) \
+ TYPE_LANG_FLAG_0 (TREE_CHECK2 (NODE, INTEGER_TYPE, ARRAY_TYPE))
+
+#define TYPE_IS_PACKED_ARRAY_TYPE_P(NODE) \
+ ((TREE_CODE (NODE) == INTEGER_TYPE || TREE_CODE (NODE) == ARRAY_TYPE) \
+ && TYPE_PACKED_ARRAY_TYPE_P (NODE))
+
+/* For INTEGER_TYPE, nonzero if this is a modular type with a modulus that
+ is not equal to two to the power of its mode's size. */
+#define TYPE_MODULAR_P(NODE) TYPE_LANG_FLAG_1 (INTEGER_TYPE_CHECK (NODE))
+
+/* For ARRAY_TYPE, nonzero if this type corresponds to a dimension of
+ an Ada array other than the first. */
+#define TYPE_MULTI_ARRAY_P(NODE) TYPE_LANG_FLAG_1 (ARRAY_TYPE_CHECK (NODE))
+
+/* For FUNCTION_TYPE, nonzero if this denotes a function returning an
+ unconstrained array or record. */
+#define TYPE_RETURN_UNCONSTRAINED_P(NODE) \
+ TYPE_LANG_FLAG_1 (FUNCTION_TYPE_CHECK (NODE))
+
+/* For RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE, nonzero if this denotes
+ a justified modular type (will only be true for RECORD_TYPE). */
+#define TYPE_JUSTIFIED_MODULAR_P(NODE) \
+ TYPE_LANG_FLAG_1 (RECORD_OR_UNION_CHECK (NODE))
+
+/* Nonzero in an arithmetic subtype if this is a subtype not known to the
+ front-end. */
+#define TYPE_EXTRA_SUBTYPE_P(NODE) TYPE_LANG_FLAG_2 (INTEGER_TYPE_CHECK (NODE))
+
+/* Nonzero for an aggregate type if this is a by-reference type. We also
+ set this on an ENUMERAL_TYPE that is dummy. */
+#define TYPE_BY_REFERENCE_P(NODE) \
+ TYPE_LANG_FLAG_2 (TREE_CHECK5 (NODE, RECORD_TYPE, UNION_TYPE, \
+ ARRAY_TYPE, UNCONSTRAINED_ARRAY_TYPE, \
+ ENUMERAL_TYPE))
+
+#define TYPE_IS_BY_REFERENCE_P(NODE) \
+ ((TREE_CODE (NODE) == RECORD_TYPE \
+ || TREE_CODE (NODE) == UNION_TYPE \
+ || TREE_CODE (NODE) == ARRAY_TYPE \
+ || TREE_CODE (NODE) == UNCONSTRAINED_ARRAY_TYPE \
+ || TREE_CODE (NODE) == ENUMERAL_TYPE) \
+ && TYPE_BY_REFERENCE_P (NODE))
+
+/* For INTEGER_TYPE, nonzero if this really represents a VAX
+ floating-point type. */
+#define TYPE_VAX_FLOATING_POINT_P(NODE) \
+ TYPE_LANG_FLAG_3 (INTEGER_TYPE_CHECK (NODE))
+
+/* For RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE, nonzero if this is the
+ type for an object whose type includes its template in addition to
+ its value (only true for RECORD_TYPE). */
+#define TYPE_CONTAINS_TEMPLATE_P(NODE) \
+ TYPE_LANG_FLAG_3 (RECORD_OR_UNION_CHECK (NODE))
+
+/* True if NODE is a thin pointer. */
+#define TYPE_IS_THIN_POINTER_P(NODE) \
+ (POINTER_TYPE_P (NODE) \
+ && TREE_CODE (TREE_TYPE (NODE)) == RECORD_TYPE \
+ && TYPE_CONTAINS_TEMPLATE_P (TREE_TYPE (NODE)))
+
+/* True if TYPE is either a fat or thin pointer to an unconstrained
+ array. */
+#define TYPE_IS_FAT_OR_THIN_POINTER_P(NODE) \
+ (TYPE_IS_FAT_POINTER_P (NODE) || TYPE_IS_THIN_POINTER_P (NODE))
+
+/* For INTEGER_TYPEs, nonzero if the type has a biased representation. */
+#define TYPE_BIASED_REPRESENTATION_P(NODE) \
+ TYPE_LANG_FLAG_4 (INTEGER_TYPE_CHECK (NODE))
+
+/* For ARRAY_TYPEs, nonzero if the array type has Convention_Fortran. */
+#define TYPE_CONVENTION_FORTRAN_P(NODE) \
+ TYPE_LANG_FLAG_4 (ARRAY_TYPE_CHECK (NODE))
+
+/* For FUNCTION_TYPEs, nonzero if the function returns by direct reference,
+ i.e. the callee returns a pointer to a memory location it has allocated
+ and the caller only needs to dereference the pointer. */
+#define TYPE_RETURN_BY_DIRECT_REF_P(NODE) \
+ TYPE_LANG_FLAG_4 (FUNCTION_TYPE_CHECK (NODE))
+
+/* For RECORD_TYPE, UNION_TYPE and ENUMERAL_TYPE, nonzero if this is a dummy
+ type, made to correspond to a private or incomplete type. */
+#define TYPE_DUMMY_P(NODE) \
+ TYPE_LANG_FLAG_4 (TREE_CHECK3 (NODE, RECORD_TYPE, UNION_TYPE, ENUMERAL_TYPE))
+
+#define TYPE_IS_DUMMY_P(NODE) \
+ ((TREE_CODE (NODE) == RECORD_TYPE \
+ || TREE_CODE (NODE) == UNION_TYPE \
+ || TREE_CODE (NODE) == ENUMERAL_TYPE) \
+ && TYPE_DUMMY_P (NODE))
+
+/* For an INTEGER_TYPE, nonzero if TYPE_ACTUAL_BOUNDS is present. */
+#define TYPE_HAS_ACTUAL_BOUNDS_P(NODE) \
+ TYPE_LANG_FLAG_5 (INTEGER_TYPE_CHECK (NODE))
+
+/* For a RECORD_TYPE, nonzero if this was made just to supply needed
+ padding or alignment. */
+#define TYPE_PADDING_P(NODE) TYPE_LANG_FLAG_5 (RECORD_TYPE_CHECK (NODE))
+
+#define TYPE_IS_PADDING_P(NODE) \
+ (TREE_CODE (NODE) == RECORD_TYPE && TYPE_PADDING_P (NODE))
+
+/* True if TYPE can alias any other types. */
+#define TYPE_UNIVERSAL_ALIASING_P(NODE) TYPE_LANG_FLAG_6 (NODE)
+
+/* For an UNCONSTRAINED_ARRAY_TYPE, this is the record containing both the
+ template and the object.
+
+ ??? We also put this on an ENUMERAL_TYPE that is dummy. Technically,
+ this is a conflict on the minval field, but there doesn't seem to be
+ simple fix, so we'll live with this kludge for now. */
+#define TYPE_OBJECT_RECORD_TYPE(NODE) \
+ (TYPE_MINVAL (TREE_CHECK2 ((NODE), UNCONSTRAINED_ARRAY_TYPE, ENUMERAL_TYPE)))
+
+/* For numerical types, this is the GCC lower bound of the type. The GCC
+ type system is based on the invariant that an object X of a given type
+ cannot hold at run time a value smaller than its lower bound; otherwise
+ the behavior is undefined. The optimizer takes advantage of this and
+ considers that the assertion X >= LB is always true. */
+#define TYPE_GCC_MIN_VALUE(NODE) (TYPE_MINVAL (NUMERICAL_TYPE_CHECK (NODE)))
+
+/* For numerical types, this is the GCC upper bound of the type. The GCC
+ type system is based on the invariant that an object X of a given type
+ cannot hold at run time a value larger than its upper bound; otherwise
+ the behavior is undefined. The optimizer takes advantage of this and
+ considers that the assertion X <= UB is always true. */
+#define TYPE_GCC_MAX_VALUE(NODE) (TYPE_MAXVAL (NUMERICAL_TYPE_CHECK (NODE)))
+
+/* For a FUNCTION_TYPE, if the subprogram has parameters passed by copy in/
+ copy out, this is the list of nodes used to specify the return values of
+ the out (or in out) parameters that are passed by copy in/copy out. For
+ a full description of the copy in/copy out parameter passing mechanism
+ refer to the routine gnat_to_gnu_entity. */
+#define TYPE_CI_CO_LIST(NODE) TYPE_LANG_SLOT_1 (FUNCTION_TYPE_CHECK (NODE))
+
+/* For a VECTOR_TYPE, this is the representative array type. */
+#define TYPE_REPRESENTATIVE_ARRAY(NODE) \
+ TYPE_LANG_SLOT_1 (VECTOR_TYPE_CHECK (NODE))
+
+/* For numerical types, this holds various RM-defined values. */
+#define TYPE_RM_VALUES(NODE) TYPE_LANG_SLOT_1 (NUMERICAL_TYPE_CHECK (NODE))
+
+/* Macros to get and set the individual values in TYPE_RM_VALUES. */
+#define TYPE_RM_VALUE(NODE, N) \
+ (TYPE_RM_VALUES (NODE) \
+ ? TREE_VEC_ELT (TYPE_RM_VALUES (NODE), (N)) : NULL_TREE)
+
+#define SET_TYPE_RM_VALUE(NODE, N, X) \
+do { \
+ tree tmp = (X); \
+ if (!TYPE_RM_VALUES (NODE)) \
+ TYPE_RM_VALUES (NODE) = make_tree_vec (3); \
+ /* ??? The field is not visited by the generic \
+ code so we need to mark it manually. */ \
+ MARK_VISITED (tmp); \
+ TREE_VEC_ELT (TYPE_RM_VALUES (NODE), (N)) = tmp; \
+} while (0)
+
+/* For numerical types, this is the RM size of the type, aka its precision.
+ There is a discrepancy between what is called precision here (and more
+ generally throughout gigi) and what is called precision in the GCC type
+ system: in the former case it's TYPE_RM_SIZE whereas it's TYPE_PRECISION
+ in the latter case. They are not identical because of the need to support
+ invalid values.
+
+ These values can be outside the range of values allowed by the RM size
+ but they must nevertheless be valid in the GCC type system, otherwise
+ the optimizer can pretend that they simply don't exist. Therefore they
+ must be within the range of values allowed by the precision in the GCC
+ sense, hence TYPE_PRECISION be set to the Esize, not the RM size. */
+#define TYPE_RM_SIZE(NODE) TYPE_RM_VALUE ((NODE), 0)
+#define SET_TYPE_RM_SIZE(NODE, X) SET_TYPE_RM_VALUE ((NODE), 0, (X))
+
+/* For numerical types, this is the RM lower bound of the type. There is
+ again a discrepancy between this lower bound and the GCC lower bound,
+ again because of the need to support invalid values.
+
+ These values can be outside the range of values allowed by the RM lower
+ bound but they must nevertheless be valid in the GCC type system, otherwise
+ the optimizer can pretend that they simply don't exist. Therefore they
+ must be within the range of values allowed by the lower bound in the GCC
+ sense, hence the GCC lower bound be set to that of the base type. */
+#define TYPE_RM_MIN_VALUE(NODE) TYPE_RM_VALUE ((NODE), 1)
+#define SET_TYPE_RM_MIN_VALUE(NODE, X) SET_TYPE_RM_VALUE ((NODE), 1, (X))
+
+/* For numerical types, this is the RM upper bound of the type. There is
+ again a discrepancy between this upper bound and the GCC upper bound,
+ again because of the need to support invalid values.
+
+ These values can be outside the range of values allowed by the RM upper
+ bound but they must nevertheless be valid in the GCC type system, otherwise
+ the optimizer can pretend that they simply don't exist. Therefore they
+ must be within the range of values allowed by the upper bound in the GCC
+ sense, hence the GCC upper bound be set to that of the base type. */
+#define TYPE_RM_MAX_VALUE(NODE) TYPE_RM_VALUE ((NODE), 2)
+#define SET_TYPE_RM_MAX_VALUE(NODE, X) SET_TYPE_RM_VALUE ((NODE), 2, (X))
+
+/* For numerical types, this is the lower bound of the type, i.e. the RM lower
+ bound for language-defined types and the GCC lower bound for others. */
+#undef TYPE_MIN_VALUE
+#define TYPE_MIN_VALUE(NODE) \
+ (TYPE_RM_MIN_VALUE (NODE) \
+ ? TYPE_RM_MIN_VALUE (NODE) : TYPE_GCC_MIN_VALUE (NODE))
+
+/* For numerical types, this is the upper bound of the type, i.e. the RM upper
+ bound for language-defined types and the GCC upper bound for others. */
+#undef TYPE_MAX_VALUE
+#define TYPE_MAX_VALUE(NODE) \
+ (TYPE_RM_MAX_VALUE (NODE) \
+ ? TYPE_RM_MAX_VALUE (NODE) : TYPE_GCC_MAX_VALUE (NODE))
+
+/* For an INTEGER_TYPE with TYPE_MODULAR_P, this is the value of the
+ modulus. */
+#define TYPE_MODULUS(NODE) \
+ GET_TYPE_LANG_SPECIFIC (INTEGER_TYPE_CHECK (NODE))
+#define SET_TYPE_MODULUS(NODE, X) \
+ SET_TYPE_LANG_SPECIFIC (INTEGER_TYPE_CHECK (NODE), X)
+
+/* For an INTEGER_TYPE with TYPE_VAX_FLOATING_POINT_P, this is the
+ Digits_Value. */
+#define TYPE_DIGITS_VALUE(NODE) \
+ GET_TYPE_LANG_SPECIFIC (INTEGER_TYPE_CHECK (NODE))
+#define SET_TYPE_DIGITS_VALUE(NODE, X) \
+ SET_TYPE_LANG_SPECIFIC (INTEGER_TYPE_CHECK (NODE), X)
+
+/* For an INTEGER_TYPE that is the TYPE_DOMAIN of some ARRAY_TYPE, this is
+ the type corresponding to the Ada index type. */
+#define TYPE_INDEX_TYPE(NODE) \
+ GET_TYPE_LANG_SPECIFIC (INTEGER_TYPE_CHECK (NODE))
+#define SET_TYPE_INDEX_TYPE(NODE, X) \
+ SET_TYPE_LANG_SPECIFIC (INTEGER_TYPE_CHECK (NODE), X)
+
+/* For an INTEGER_TYPE with TYPE_HAS_ACTUAL_BOUNDS_P or an ARRAY_TYPE, this is
+ the index type that should be used when the actual bounds are required for
+ a template. This is used in the case of packed arrays. */
+#define TYPE_ACTUAL_BOUNDS(NODE) \
+ GET_TYPE_LANG_SPECIFIC (TREE_CHECK2 (NODE, INTEGER_TYPE, ARRAY_TYPE))
+#define SET_TYPE_ACTUAL_BOUNDS(NODE, X) \
+ SET_TYPE_LANG_SPECIFIC (TREE_CHECK2 (NODE, INTEGER_TYPE, ARRAY_TYPE), X)
+
+/* For a POINTER_TYPE that points to the template type of an unconstrained
+ array type, this is the address to be used in a null fat pointer. */
+#define TYPE_NULL_BOUNDS(NODE) \
+ GET_TYPE_LANG_SPECIFIC (POINTER_TYPE_CHECK (NODE))
+#define SET_TYPE_NULL_BOUNDS(NODE, X) \
+ SET_TYPE_LANG_SPECIFIC (POINTER_TYPE_CHECK (NODE), X)
+
+/* For a RECORD_TYPE that is a fat pointer, this is the type for the
+ unconstrained array. Likewise for a RECORD_TYPE that is pointed
+ to by a thin pointer, if it is made for the unconstrained array
+ type itself; the field is NULL_TREE if the RECORD_TYPE is made
+ for a constrained subtype of the array type. */
+#define TYPE_UNCONSTRAINED_ARRAY(NODE) \
+ GET_TYPE_LANG_SPECIFIC (RECORD_TYPE_CHECK (NODE))
+#define SET_TYPE_UNCONSTRAINED_ARRAY(NODE, X) \
+ SET_TYPE_LANG_SPECIFIC (RECORD_TYPE_CHECK (NODE), X)
+
+/* For other RECORD_TYPEs and all UNION_TYPEs and QUAL_UNION_TYPEs, this is
+ the Ada size of the object. This differs from the GCC size in that it
+ does not include any rounding up to the alignment of the type. */
+#define TYPE_ADA_SIZE(NODE) \
+ GET_TYPE_LANG_SPECIFIC (RECORD_OR_UNION_CHECK (NODE))
+#define SET_TYPE_ADA_SIZE(NODE, X) \
+ SET_TYPE_LANG_SPECIFIC (RECORD_OR_UNION_CHECK (NODE), X)
+
+
+/* Flags added to decl nodes. */
+
+/* Nonzero in a FUNCTION_DECL that represents a stubbed function
+ discriminant. */
+#define DECL_STUBBED_P(NODE) DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE))
+
+/* Nonzero in a VAR_DECL if it is guaranteed to be constant after having
+ been elaborated and TREE_READONLY is not set on it. */
+#define DECL_READONLY_ONCE_ELAB(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))
+
+/* Nonzero in a CONST_DECL if its value is (essentially) the address of a
+ constant CONSTRUCTOR. */
+#define DECL_CONST_ADDRESS_P(NODE) DECL_LANG_FLAG_0 (CONST_DECL_CHECK (NODE))
+
+/* Nonzero in a FIELD_DECL if it is declared as aliased. */
+#define DECL_ALIASED_P(NODE) DECL_LANG_FLAG_0 (FIELD_DECL_CHECK (NODE))
+
+/* Nonzero in a TYPE_DECL if this is the declaration of a Taft amendment type
+ in the main unit, i.e. the full declaration is available. */
+#define DECL_TAFT_TYPE_P(NODE) DECL_LANG_FLAG_0 (TYPE_DECL_CHECK (NODE))
+
+/* Nonzero in a DECL if it is always used by reference, i.e. an INDIRECT_REF
+ is needed to access the object. */
+#define DECL_BY_REF_P(NODE) DECL_LANG_FLAG_1 (NODE)
+
+/* Nonzero in a DECL if it is made for a pointer that can never be null. */
+#define DECL_CAN_NEVER_BE_NULL_P(NODE) DECL_LANG_FLAG_2 (NODE)
+
+/* Nonzero in a VAR_DECL if it is made for a loop parameter. */
+#define DECL_LOOP_PARM_P(NODE) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (NODE))
+
+/* Nonzero in a FIELD_DECL that is a dummy built for some internal reason. */
+#define DECL_INTERNAL_P(NODE) DECL_LANG_FLAG_3 (FIELD_DECL_CHECK (NODE))
+
+/* Nonzero in a PARM_DECL if it is made for an Ada array being passed to a
+ foreign convention subprogram. */
+#define DECL_BY_COMPONENT_PTR_P(NODE) DECL_LANG_FLAG_3 (PARM_DECL_CHECK (NODE))
+
+/* Nonzero in a FUNCTION_DECL that corresponds to an elaboration procedure. */
+#define DECL_ELABORATION_PROC_P(NODE) \
+ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (NODE))
+
+/* Nonzero in a DECL if it is made for a pointer that points to something which
+ is readonly. */
+#define DECL_POINTS_TO_READONLY_P(NODE) DECL_LANG_FLAG_4 (NODE)
+
+/* Nonzero in a PARM_DECL if we are to pass by descriptor. */
+#define DECL_BY_DESCRIPTOR_P(NODE) DECL_LANG_FLAG_5 (PARM_DECL_CHECK (NODE))
+
+/* Nonzero in a VAR_DECL if it is a pointer renaming a global object. */
+#define DECL_RENAMING_GLOBAL_P(NODE) DECL_LANG_FLAG_5 (VAR_DECL_CHECK (NODE))
+
+/* In a FIELD_DECL corresponding to a discriminant, contains the
+ discriminant number. */
+#define DECL_DISCRIMINANT_NUMBER(NODE) DECL_INITIAL (FIELD_DECL_CHECK (NODE))
+
+/* In a CONST_DECL, points to a VAR_DECL that is allocatable to
+ memory. Used when a scalar constant is aliased or has its
+ address taken. */
+#define DECL_CONST_CORRESPONDING_VAR(NODE) \
+ GET_DECL_LANG_SPECIFIC (CONST_DECL_CHECK (NODE))
+#define SET_DECL_CONST_CORRESPONDING_VAR(NODE, X) \
+ SET_DECL_LANG_SPECIFIC (CONST_DECL_CHECK (NODE), X)
+
+/* In a FIELD_DECL, points to the FIELD_DECL that was the ultimate
+ source of the decl. */
+#define DECL_ORIGINAL_FIELD(NODE) \
+ GET_DECL_LANG_SPECIFIC (FIELD_DECL_CHECK (NODE))
+#define SET_DECL_ORIGINAL_FIELD(NODE, X) \
+ SET_DECL_LANG_SPECIFIC (FIELD_DECL_CHECK (NODE), X)
+
+/* Set DECL_ORIGINAL_FIELD of FIELD1 to (that of) FIELD2. */
+#define SET_DECL_ORIGINAL_FIELD_TO_FIELD(FIELD1, FIELD2) \
+ SET_DECL_ORIGINAL_FIELD ((FIELD1), \
+ DECL_ORIGINAL_FIELD (FIELD2) \
+ ? DECL_ORIGINAL_FIELD (FIELD2) : (FIELD2))
+
+/* Return true if FIELD1 and FIELD2 represent the same field. */
+#define SAME_FIELD_P(FIELD1, FIELD2) \
+ ((FIELD1) == (FIELD2) \
+ || DECL_ORIGINAL_FIELD (FIELD1) == (FIELD2) \
+ || (FIELD1) == DECL_ORIGINAL_FIELD (FIELD2) \
+ || (DECL_ORIGINAL_FIELD (FIELD1) \
+ && (DECL_ORIGINAL_FIELD (FIELD1) == DECL_ORIGINAL_FIELD (FIELD2))))
+
+/* In a VAR_DECL with the DECL_LOOP_PARM_P flag set, points to the special
+ induction variable that is built under certain circumstances, if any. */
+#define DECL_INDUCTION_VAR(NODE) \
+ GET_DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))
+#define SET_DECL_INDUCTION_VAR(NODE, X) \
+ SET_DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE), X)
+
+/* In a VAR_DECL without the DECL_LOOP_PARM_P flag set and that is a renaming
+ pointer, points to the object being renamed, if any. Note that this object
+ is guaranteed to be protected against multiple evaluations. */
+#define DECL_RENAMED_OBJECT(NODE) \
+ GET_DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))
+#define SET_DECL_RENAMED_OBJECT(NODE, X) \
+ SET_DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE), X)
+
+/* In a TYPE_DECL, points to the parallel type if any, otherwise 0. */
+#define DECL_PARALLEL_TYPE(NODE) \
+ GET_DECL_LANG_SPECIFIC (TYPE_DECL_CHECK (NODE))
+#define SET_DECL_PARALLEL_TYPE(NODE, X) \
+ SET_DECL_LANG_SPECIFIC (TYPE_DECL_CHECK (NODE), X)
+
+/* In a FUNCTION_DECL, points to the stub associated with the function
+ if any, otherwise 0. */
+#define DECL_FUNCTION_STUB(NODE) \
+ GET_DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE))
+#define SET_DECL_FUNCTION_STUB(NODE, X) \
+ SET_DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE), X)
+
+/* In a PARM_DECL, points to the alternate TREE_TYPE. */
+#define DECL_PARM_ALT_TYPE(NODE) \
+ GET_DECL_LANG_SPECIFIC (PARM_DECL_CHECK (NODE))
+#define SET_DECL_PARM_ALT_TYPE(NODE, X) \
+ SET_DECL_LANG_SPECIFIC (PARM_DECL_CHECK (NODE), X)
+
+
+/* Flags added to ref nodes. */
+
+/* Nonzero means this node will not trap. */
+#undef TREE_THIS_NOTRAP
+#define TREE_THIS_NOTRAP(NODE) \
+ (TREE_CHECK4 (NODE, INDIRECT_REF, ARRAY_REF, UNCONSTRAINED_ARRAY_REF, \
+ ARRAY_RANGE_REF)->base.nothrow_flag)
+
+
+/* Fields and macros for statements. */
+#define IS_ADA_STMT(NODE) \
+ (STATEMENT_CLASS_P (NODE) && TREE_CODE (NODE) >= STMT_STMT)
+
+#define STMT_STMT_STMT(NODE) TREE_OPERAND_CHECK_CODE (NODE, STMT_STMT, 0)
+
+#define LOOP_STMT_COND(NODE) TREE_OPERAND_CHECK_CODE (NODE, LOOP_STMT, 0)
+#define LOOP_STMT_UPDATE(NODE) TREE_OPERAND_CHECK_CODE (NODE, LOOP_STMT, 1)
+#define LOOP_STMT_BODY(NODE) TREE_OPERAND_CHECK_CODE (NODE, LOOP_STMT, 2)
+#define LOOP_STMT_LABEL(NODE) TREE_OPERAND_CHECK_CODE (NODE, LOOP_STMT, 3)
+
+/* A loop statement is conceptually made up of 6 sub-statements:
+
+ loop:
+ TOP_CONDITION
+ TOP_UPDATE
+ BODY
+ BOTTOM_CONDITION
+ BOTTOM_UPDATE
+ GOTO loop
+
+ However, only 4 of them can exist for a given loop, the pair of conditions
+ and the pair of updates being mutually exclusive. The default setting is
+ TOP_CONDITION and BOTTOM_UPDATE and the following couple of flags are used
+ to toggle the individual settings. */
+#define LOOP_STMT_BOTTOM_COND_P(NODE) TREE_LANG_FLAG_0 (LOOP_STMT_CHECK (NODE))
+#define LOOP_STMT_TOP_UPDATE_P(NODE) TREE_LANG_FLAG_1 (LOOP_STMT_CHECK (NODE))
+
+/* Optimization hints on loops. */
+#define LOOP_STMT_NO_UNROLL(NODE) TREE_LANG_FLAG_2 (LOOP_STMT_CHECK (NODE))
+#define LOOP_STMT_UNROLL(NODE) TREE_LANG_FLAG_3 (LOOP_STMT_CHECK (NODE))
+#define LOOP_STMT_NO_VECTOR(NODE) TREE_LANG_FLAG_4 (LOOP_STMT_CHECK (NODE))
+#define LOOP_STMT_VECTOR(NODE) TREE_LANG_FLAG_5 (LOOP_STMT_CHECK (NODE))
+
+#define EXIT_STMT_COND(NODE) TREE_OPERAND_CHECK_CODE (NODE, EXIT_STMT, 0)
+#define EXIT_STMT_LABEL(NODE) TREE_OPERAND_CHECK_CODE (NODE, EXIT_STMT, 1)
diff --git a/gcc-4.9/gcc/ada/gcc-interface/ada.h b/gcc-4.9/gcc/ada/gcc-interface/ada.h
new file mode 100644
index 000000000..197ab95d2
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/ada.h
@@ -0,0 +1,73 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * A D A *
+ * *
+ * C Header File *
+ * *
+ * Copyright (C) 1992-2013, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License distributed with GNAT; see file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+/* This file contains some standard macros for performing Ada-like
+ operations. These are used to aid in the translation of other headers. */
+
+#ifndef GCC_ADA_H
+#define GCC_ADA_H
+
+/* Inlined functions in header are preceded by INLINE, which is normally set
+ to extern inline for GCC, but may be set to static for use in standard
+ ANSI-C. */
+
+#ifndef INLINE
+#ifdef __GNUC__
+#define INLINE static inline
+#else
+#define INLINE static
+#endif
+#endif
+
+/* Define a macro to concatenate two strings. Write it for ANSI C and
+ for traditional C. */
+
+#ifdef __STDC__
+#define CAT(A,B) A##B
+#else
+#define _ECHO(A) A
+#define CAT(A,B) _ECHO(A)B
+#endif
+
+/* The following macro definition simulates the effect of a declaration of
+ a subtype, where the first two parameters give the name of the type and
+ subtype, and the third and fourth parameters give the subtype range. The
+ effect is to compile a typedef defining the subtype as a synonym for the
+ type, together with two constants defining the end points. */
+
+#define SUBTYPE(SUBTYPE,TYPE,FIRST,LAST) \
+ typedef TYPE SUBTYPE; \
+ enum { CAT (SUBTYPE,__First) = FIRST, \
+ CAT (SUBTYPE,__Last) = LAST };
+
+/* The following definition provides the equivalent of the Ada IN operator,
+ assuming that the subtype involved has been defined using the SUBTYPE
+ macro defined above. */
+
+#define IN(VALUE,SUBTYPE) \
+ (((VALUE) >= (SUBTYPE) CAT (SUBTYPE,__First)) \
+ && ((VALUE) <= (SUBTYPE) CAT (SUBTYPE,__Last)))
+
+#endif
diff --git a/gcc-4.9/gcc/ada/gcc-interface/config-lang.in b/gcc-4.9/gcc/ada/gcc-interface/config-lang.in
new file mode 100644
index 000000000..5dc77df28
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/config-lang.in
@@ -0,0 +1,41 @@
+# Top level configure fragment for GNU Ada (GNAT).
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+#This file is part of GCC.
+
+#GCC is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3, or (at your option)
+#any later version.
+
+#GCC is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with GCC; see the file COPYING3. If not see
+#<http://www.gnu.org/licenses/>.
+
+# Configure looks for the existence of this file to auto-config each language.
+# We define several parameters used by configure:
+#
+# language - name of language as it would appear in $(LANGUAGES)
+# gcc_subdir - where the gcc integration files are to be found
+# boot_language - "yes" if we need to build this language in stage1
+# compilers - value to add to $(COMPILERS)
+
+language="ada"
+gcc_subdir="ada/gcc-interface"
+boot_language=yes
+compilers="gnat1\$(exeext)"
+
+gtfiles="\$(srcdir)/ada/gcc-interface/ada-tree.h \$(srcdir)/ada/gcc-interface/gigi.h \$(srcdir)/ada/gcc-interface/decl.c \$(srcdir)/ada/gcc-interface/trans.c \$(srcdir)/ada/gcc-interface/utils.c \$(srcdir)/ada/gcc-interface/misc.c"
+
+outputs="ada/gcc-interface/Makefile ada/Makefile"
+
+target_libs="target-libada"
+lang_dirs="gnattools"
+
+# Ada is not enabled by default for the time being.
+build_by_default=no
diff --git a/gcc-4.9/gcc/ada/gcc-interface/cuintp.c b/gcc-4.9/gcc/ada/gcc-interface/cuintp.c
new file mode 100644
index 000000000..c5736f5ec
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/cuintp.c
@@ -0,0 +1,183 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * C U I N T P *
+ * *
+ * C Implementation File *
+ * *
+ * Copyright (C) 1992-2014, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License along with GCC; see the file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+/* This file corresponds to the Ada package body Uintp. It was created
+ manually from the files uintp.ads and uintp.adb. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+
+#include "ada.h"
+#include "types.h"
+#include "uintp.h"
+#include "ada-tree.h"
+#include "gigi.h"
+
+/* Universal integers are represented by the Uint type which is an index into
+ the Uints_Ptr table containing Uint_Entry values. A Uint_Entry contains an
+ index and length for getting the "digits" of the universal integer from the
+ Udigits_Ptr table.
+
+ For efficiency, this method is used only for integer values larger than the
+ constant Uint_Bias. If a Uint is less than this constant, then it contains
+ the integer value itself. The origin of the Uints_Ptr table is adjusted so
+ that a Uint value of Uint_Bias indexes the first element.
+
+ First define a utility function that operates like build_int_cst_type for
+ integral types and does a conversion for floating-point types. */
+
+static tree
+build_cst_from_int (tree type, HOST_WIDE_INT low)
+{
+ if (SCALAR_FLOAT_TYPE_P (type))
+ return convert (type, build_int_cst (gnat_type_for_size (32, 0), low));
+ else
+ return build_int_cst_type (type, low);
+}
+
+/* Similar to UI_To_Int, but return a GCC INTEGER_CST or REAL_CST node,
+ depending on whether TYPE is an integral or real type. Overflow is tested
+ by the constant-folding used to build the node. TYPE is the GCC type of
+ the resulting node. */
+
+tree
+UI_To_gnu (Uint Input, tree type)
+{
+ /* We might have a TYPE with biased representation and be passed an unbiased
+ value that doesn't fit. We always use an unbiased type to be able to hold
+ any such possible value for intermediate computations and then rely on a
+ conversion back to TYPE to perform the bias adjustment when need be. */
+ tree comp_type
+ = TREE_CODE (type) == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (type)
+ ? get_base_type (type) : type;
+ tree gnu_ret;
+
+ if (Input <= Uint_Direct_Last)
+ gnu_ret = build_cst_from_int (comp_type, Input - Uint_Direct_Bias);
+ else
+ {
+ Int Idx = Uints_Ptr[Input].Loc;
+ Pos Length = Uints_Ptr[Input].Length;
+ Int First = Udigits_Ptr[Idx];
+ tree gnu_base;
+
+ gcc_assert (Length > 0);
+
+ /* The computations we perform below always require a type at least as
+ large as an integer not to overflow. FP types are always fine, but
+ INTEGER or ENUMERAL types we are handed may be too short. We use a
+ base integer type node for the computations in this case and will
+ convert the final result back to the incoming type later on. */
+ if (!SCALAR_FLOAT_TYPE_P (comp_type) && TYPE_PRECISION (comp_type) < 32)
+ comp_type = gnat_type_for_size (32, 0);
+
+ gnu_base = build_cst_from_int (comp_type, Base);
+
+ gnu_ret = build_cst_from_int (comp_type, First);
+ if (First < 0)
+ for (Idx++, Length--; Length; Idx++, Length--)
+ gnu_ret = fold_build2 (MINUS_EXPR, comp_type,
+ fold_build2 (MULT_EXPR, comp_type,
+ gnu_ret, gnu_base),
+ build_cst_from_int (comp_type,
+ Udigits_Ptr[Idx]));
+ else
+ for (Idx++, Length--; Length; Idx++, Length--)
+ gnu_ret = fold_build2 (PLUS_EXPR, comp_type,
+ fold_build2 (MULT_EXPR, comp_type,
+ gnu_ret, gnu_base),
+ build_cst_from_int (comp_type,
+ Udigits_Ptr[Idx]));
+ }
+
+ gnu_ret = convert (type, gnu_ret);
+
+ /* We don't need any NOP_EXPR or NON_LVALUE_EXPR on GNU_RET. */
+ while ((TREE_CODE (gnu_ret) == NOP_EXPR
+ || TREE_CODE (gnu_ret) == NON_LVALUE_EXPR)
+ && TREE_TYPE (TREE_OPERAND (gnu_ret, 0)) == TREE_TYPE (gnu_ret))
+ gnu_ret = TREE_OPERAND (gnu_ret, 0);
+
+ return gnu_ret;
+}
+
+/* Similar to UI_From_Int, but take a GCC INTEGER_CST. We use UI_From_Int
+ when possible, i.e. for a 32-bit signed value, to take advantage of its
+ built-in caching mechanism. For values of larger magnitude, we compute
+ digits into a vector and call Vector_To_Uint. */
+
+Uint
+UI_From_gnu (tree Input)
+{
+ tree gnu_type = TREE_TYPE (Input), gnu_base, gnu_temp;
+ /* UI_Base is defined so that 5 Uint digits is sufficient to hold the
+ largest possible signed 64-bit value. */
+ const int Max_For_Dint = 5;
+ int v[Max_For_Dint], i;
+ Vector_Template temp;
+ Int_Vector vec;
+
+#if HOST_BITS_PER_WIDE_INT == 64
+ /* On 64-bit hosts, tree_fits_shwi_p tells whether the input fits in a
+ signed 64-bit integer. Then a truncation tells whether it fits
+ in a signed 32-bit integer. */
+ if (tree_fits_shwi_p (Input))
+ {
+ HOST_WIDE_INT hw_input = tree_to_shwi (Input);
+ if (hw_input == (int) hw_input)
+ return UI_From_Int (hw_input);
+ }
+ else
+ return No_Uint;
+#else
+ /* On 32-bit hosts, tree_fits_shwi_p tells whether the input fits in a
+ signed 32-bit integer. Then a sign test tells whether it fits
+ in a signed 64-bit integer. */
+ if (tree_fits_shwi_p (Input))
+ return UI_From_Int (tree_to_shwi (Input));
+ else if (TREE_INT_CST_HIGH (Input) < 0 && TYPE_UNSIGNED (gnu_type))
+ return No_Uint;
+#endif
+
+ gnu_base = build_int_cst (gnu_type, UI_Base);
+ gnu_temp = Input;
+
+ for (i = Max_For_Dint - 1; i >= 0; i--)
+ {
+ v[i] = tree_to_shwi (fold_build1 (ABS_EXPR, gnu_type,
+ fold_build2 (TRUNC_MOD_EXPR, gnu_type,
+ gnu_temp, gnu_base)));
+ gnu_temp = fold_build2 (TRUNC_DIV_EXPR, gnu_type, gnu_temp, gnu_base);
+ }
+
+ temp.Low_Bound = 1;
+ temp.High_Bound = Max_For_Dint;
+ vec.Bounds = &temp;
+ vec.Array = v;
+ return Vector_To_Uint (vec, tree_int_cst_sgn (Input) < 0);
+}
diff --git a/gcc-4.9/gcc/ada/gcc-interface/decl.c b/gcc-4.9/gcc/ada/gcc-interface/decl.c
new file mode 100644
index 000000000..4180e59f6
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/decl.c
@@ -0,0 +1,8924 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * D E C L *
+ * *
+ * C Implementation File *
+ * *
+ * Copyright (C) 1992-2014, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License along with GCC; see the file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "flags.h"
+#include "toplev.h"
+#include "ggc.h"
+#include "target.h"
+#include "tree-inline.h"
+#include "diagnostic-core.h"
+
+#include "ada.h"
+#include "types.h"
+#include "atree.h"
+#include "elists.h"
+#include "namet.h"
+#include "nlists.h"
+#include "repinfo.h"
+#include "snames.h"
+#include "stringt.h"
+#include "uintp.h"
+#include "fe.h"
+#include "sinfo.h"
+#include "einfo.h"
+#include "ada-tree.h"
+#include "gigi.h"
+
+/* "stdcall" and "thiscall" conventions should be processed in a specific way
+ on 32-bit x86/Windows only. The macros below are helpers to avoid having
+ to check for a Windows specific attribute throughout this unit. */
+
+#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
+#ifdef TARGET_64BIT
+#define Has_Stdcall_Convention(E) \
+ (!TARGET_64BIT && Convention (E) == Convention_Stdcall)
+#define Has_Thiscall_Convention(E) \
+ (!TARGET_64BIT && is_cplusplus_method (E))
+#else
+#define Has_Stdcall_Convention(E) (Convention (E) == Convention_Stdcall)
+#define Has_Thiscall_Convention(E) (is_cplusplus_method (E))
+#endif
+#else
+#define Has_Stdcall_Convention(E) 0
+#define Has_Thiscall_Convention(E) 0
+#endif
+
+/* Stack realignment is necessary for functions with foreign conventions when
+ the ABI doesn't mandate as much as what the compiler assumes - that is, up
+ to PREFERRED_STACK_BOUNDARY.
+
+ Such realignment can be requested with a dedicated function type attribute
+ on the targets that support it. We define FOREIGN_FORCE_REALIGN_STACK to
+ characterize the situations where the attribute should be set. We rely on
+ compiler configuration settings for 'main' to decide. */
+
+#ifdef MAIN_STACK_BOUNDARY
+#define FOREIGN_FORCE_REALIGN_STACK \
+ (MAIN_STACK_BOUNDARY < PREFERRED_STACK_BOUNDARY)
+#else
+#define FOREIGN_FORCE_REALIGN_STACK 0
+#endif
+
+struct incomplete
+{
+ struct incomplete *next;
+ tree old_type;
+ Entity_Id full_type;
+};
+
+/* These variables are used to defer recursively expanding incomplete types
+ while we are processing an array, a record or a subprogram type. */
+static int defer_incomplete_level = 0;
+static struct incomplete *defer_incomplete_list;
+
+/* This variable is used to delay expanding From_Limited_With types until the
+ end of the spec. */
+static struct incomplete *defer_limited_with;
+
+typedef struct subst_pair_d {
+ tree discriminant;
+ tree replacement;
+} subst_pair;
+
+
+typedef struct variant_desc_d {
+ /* The type of the variant. */
+ tree type;
+
+ /* The associated field. */
+ tree field;
+
+ /* The value of the qualifier. */
+ tree qual;
+
+ /* The type of the variant after transformation. */
+ tree new_type;
+} variant_desc;
+
+
+/* A hash table used to cache the result of annotate_value. */
+static GTY ((if_marked ("tree_int_map_marked_p"),
+ param_is (struct tree_int_map))) htab_t annotate_value_cache;
+
+static bool allocatable_size_p (tree, bool);
+static void prepend_one_attribute (struct attrib **,
+ enum attr_type, tree, tree, Node_Id);
+static void prepend_one_attribute_pragma (struct attrib **, Node_Id);
+static void prepend_attributes (struct attrib **, Entity_Id);
+static tree elaborate_expression (Node_Id, Entity_Id, tree, bool, bool, bool);
+static bool type_has_variable_size (tree);
+static tree elaborate_expression_1 (tree, Entity_Id, tree, bool, bool);
+static tree elaborate_expression_2 (tree, Entity_Id, tree, bool, bool,
+ unsigned int);
+static tree gnat_to_gnu_component_type (Entity_Id, bool, bool);
+static tree gnat_to_gnu_param (Entity_Id, Mechanism_Type, Entity_Id, bool,
+ bool *);
+static tree gnat_to_gnu_field (Entity_Id, tree, int, bool, bool);
+static bool same_discriminant_p (Entity_Id, Entity_Id);
+static bool array_type_has_nonaliased_component (tree, Entity_Id);
+static bool compile_time_known_address_p (Node_Id);
+static bool cannot_be_superflat_p (Node_Id);
+static bool constructor_address_p (tree);
+static int compare_field_bitpos (const PTR, const PTR);
+static bool components_to_record (tree, Node_Id, tree, int, bool, bool, bool,
+ bool, bool, bool, bool, bool, tree, tree *);
+static Uint annotate_value (tree);
+static void annotate_rep (Entity_Id, tree);
+static tree build_position_list (tree, bool, tree, tree, unsigned int, tree);
+static vec<subst_pair> build_subst_list (Entity_Id, Entity_Id, bool);
+static vec<variant_desc> build_variant_list (tree,
+ vec<subst_pair> ,
+ vec<variant_desc> );
+static tree validate_size (Uint, tree, Entity_Id, enum tree_code, bool, bool);
+static void set_rm_size (Uint, tree, Entity_Id);
+static unsigned int validate_alignment (Uint, Entity_Id, unsigned int);
+static void check_ok_for_atomic (tree, Entity_Id, bool);
+static tree create_field_decl_from (tree, tree, tree, tree, tree,
+ vec<subst_pair> );
+static tree create_rep_part (tree, tree, tree);
+static tree get_rep_part (tree);
+static tree create_variant_part_from (tree, vec<variant_desc> , tree,
+ tree, vec<subst_pair> );
+static void copy_and_substitute_in_size (tree, tree, vec<subst_pair> );
+
+/* The relevant constituents of a subprogram binding to a GCC builtin. Used
+ to pass around calls performing profile compatibility checks. */
+
+typedef struct {
+ Entity_Id gnat_entity; /* The Ada subprogram entity. */
+ tree ada_fntype; /* The corresponding GCC type node. */
+ tree btin_fntype; /* The GCC builtin function type node. */
+} intrin_binding_t;
+
+static bool intrin_profiles_compatible_p (intrin_binding_t *);
+
+/* Given GNAT_ENTITY, a GNAT defining identifier node, which denotes some Ada
+ entity, return the equivalent GCC tree for that entity (a ..._DECL node)
+ and associate the ..._DECL node with the input GNAT defining identifier.
+
+ If GNAT_ENTITY is a variable or a constant declaration, GNU_EXPR gives its
+ initial value (in GCC tree form). This is optional for a variable. For
+ a renamed entity, GNU_EXPR gives the object being renamed.
+
+ DEFINITION is nonzero if this call is intended for a definition. This is
+ used for separate compilation where it is necessary to know whether an
+ external declaration or a definition must be created if the GCC equivalent
+ was not created previously. The value of 1 is normally used for a nonzero
+ DEFINITION, but a value of 2 is used in special circumstances, defined in
+ the code. */
+
+tree
+gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
+{
+ /* Contains the kind of the input GNAT node. */
+ const Entity_Kind kind = Ekind (gnat_entity);
+ /* True if this is a type. */
+ const bool is_type = IN (kind, Type_Kind);
+ /* True if debug info is requested for this entity. */
+ const bool debug_info_p = Needs_Debug_Info (gnat_entity);
+ /* True if this entity is to be considered as imported. */
+ const bool imported_p
+ = (Is_Imported (gnat_entity) && No (Address_Clause (gnat_entity)));
+ /* For a type, contains the equivalent GNAT node to be used in gigi. */
+ Entity_Id gnat_equiv_type = Empty;
+ /* Temporary used to walk the GNAT tree. */
+ Entity_Id gnat_temp;
+ /* Contains the GCC DECL node which is equivalent to the input GNAT node.
+ This node will be associated with the GNAT node by calling at the end
+ of the `switch' statement. */
+ tree gnu_decl = NULL_TREE;
+ /* Contains the GCC type to be used for the GCC node. */
+ tree gnu_type = NULL_TREE;
+ /* Contains the GCC size tree to be used for the GCC node. */
+ tree gnu_size = NULL_TREE;
+ /* Contains the GCC name to be used for the GCC node. */
+ tree gnu_entity_name;
+ /* True if we have already saved gnu_decl as a GNAT association. */
+ bool saved = false;
+ /* True if we incremented defer_incomplete_level. */
+ bool this_deferred = false;
+ /* True if we incremented force_global. */
+ bool this_global = false;
+ /* True if we should check to see if elaborated during processing. */
+ bool maybe_present = false;
+ /* True if we made GNU_DECL and its type here. */
+ bool this_made_decl = false;
+ /* Size and alignment of the GCC node, if meaningful. */
+ unsigned int esize = 0, align = 0;
+ /* Contains the list of attributes directly attached to the entity. */
+ struct attrib *attr_list = NULL;
+
+ /* Since a use of an Itype is a definition, process it as such if it
+ is not in a with'ed unit. */
+ if (!definition
+ && is_type
+ && Is_Itype (gnat_entity)
+ && !present_gnu_tree (gnat_entity)
+ && In_Extended_Main_Code_Unit (gnat_entity))
+ {
+ /* Ensure that we are in a subprogram mentioned in the Scope chain of
+ this entity, our current scope is global, or we encountered a task
+ or entry (where we can't currently accurately check scoping). */
+ if (!current_function_decl
+ || DECL_ELABORATION_PROC_P (current_function_decl))
+ {
+ process_type (gnat_entity);
+ return get_gnu_tree (gnat_entity);
+ }
+
+ for (gnat_temp = Scope (gnat_entity);
+ Present (gnat_temp);
+ gnat_temp = Scope (gnat_temp))
+ {
+ if (Is_Type (gnat_temp))
+ gnat_temp = Underlying_Type (gnat_temp);
+
+ if (Ekind (gnat_temp) == E_Subprogram_Body)
+ gnat_temp
+ = Corresponding_Spec (Parent (Declaration_Node (gnat_temp)));
+
+ if (IN (Ekind (gnat_temp), Subprogram_Kind)
+ && Present (Protected_Body_Subprogram (gnat_temp)))
+ gnat_temp = Protected_Body_Subprogram (gnat_temp);
+
+ if (Ekind (gnat_temp) == E_Entry
+ || Ekind (gnat_temp) == E_Entry_Family
+ || Ekind (gnat_temp) == E_Task_Type
+ || (IN (Ekind (gnat_temp), Subprogram_Kind)
+ && present_gnu_tree (gnat_temp)
+ && (current_function_decl
+ == gnat_to_gnu_entity (gnat_temp, NULL_TREE, 0))))
+ {
+ process_type (gnat_entity);
+ return get_gnu_tree (gnat_entity);
+ }
+ }
+
+ /* This abort means the Itype has an incorrect scope, i.e. that its
+ scope does not correspond to the subprogram it is declared in. */
+ gcc_unreachable ();
+ }
+
+ /* If we've already processed this entity, return what we got last time.
+ If we are defining the node, we should not have already processed it.
+ In that case, we will abort below when we try to save a new GCC tree
+ for this object. We also need to handle the case of getting a dummy
+ type when a Full_View exists but be careful so as not to trigger its
+ premature elaboration. */
+ if ((!definition || (is_type && imported_p))
+ && present_gnu_tree (gnat_entity))
+ {
+ gnu_decl = get_gnu_tree (gnat_entity);
+
+ if (TREE_CODE (gnu_decl) == TYPE_DECL
+ && TYPE_IS_DUMMY_P (TREE_TYPE (gnu_decl))
+ && IN (kind, Incomplete_Or_Private_Kind)
+ && Present (Full_View (gnat_entity))
+ && (present_gnu_tree (Full_View (gnat_entity))
+ || No (Freeze_Node (Full_View (gnat_entity)))))
+ {
+ gnu_decl
+ = gnat_to_gnu_entity (Full_View (gnat_entity), NULL_TREE, 0);
+ save_gnu_tree (gnat_entity, NULL_TREE, false);
+ save_gnu_tree (gnat_entity, gnu_decl, false);
+ }
+
+ return gnu_decl;
+ }
+
+ /* If this is a numeric or enumeral type, or an access type, a nonzero Esize
+ must be specified unless it was specified by the programmer. Exceptions
+ are for access-to-protected-subprogram types and all access subtypes, as
+ another GNAT type is used to lay out the GCC type for them. */
+ gcc_assert (!Unknown_Esize (gnat_entity)
+ || Has_Size_Clause (gnat_entity)
+ || (!IN (kind, Numeric_Kind)
+ && !IN (kind, Enumeration_Kind)
+ && (!IN (kind, Access_Kind)
+ || kind == E_Access_Protected_Subprogram_Type
+ || kind == E_Anonymous_Access_Protected_Subprogram_Type
+ || kind == E_Access_Subtype
+ || type_annotate_only)));
+
+ /* The RM size must be specified for all discrete and fixed-point types. */
+ gcc_assert (!(IN (kind, Discrete_Or_Fixed_Point_Kind)
+ && Unknown_RM_Size (gnat_entity)));
+
+ /* If we get here, it means we have not yet done anything with this entity.
+ If we are not defining it, it must be a type or an entity that is defined
+ elsewhere or externally, otherwise we should have defined it already. */
+ gcc_assert (definition
+ || type_annotate_only
+ || is_type
+ || kind == E_Discriminant
+ || kind == E_Component
+ || kind == E_Label
+ || (kind == E_Constant && Present (Full_View (gnat_entity)))
+ || Is_Public (gnat_entity));
+
+ /* Get the name of the entity and set up the line number and filename of
+ the original definition for use in any decl we make. */
+ gnu_entity_name = get_entity_name (gnat_entity);
+ Sloc_to_locus (Sloc (gnat_entity), &input_location);
+
+ /* For cases when we are not defining (i.e., we are referencing from
+ another compilation unit) public entities, show we are at global level
+ for the purpose of computing scopes. Don't do this for components or
+ discriminants since the relevant test is whether or not the record is
+ being defined. */
+ if (!definition
+ && kind != E_Component
+ && kind != E_Discriminant
+ && Is_Public (gnat_entity)
+ && !Is_Statically_Allocated (gnat_entity))
+ force_global++, this_global = true;
+
+ /* Handle any attributes directly attached to the entity. */
+ if (Has_Gigi_Rep_Item (gnat_entity))
+ prepend_attributes (&attr_list, gnat_entity);
+
+ /* Do some common processing for types. */
+ if (is_type)
+ {
+ /* Compute the equivalent type to be used in gigi. */
+ gnat_equiv_type = Gigi_Equivalent_Type (gnat_entity);
+
+ /* Machine_Attributes on types are expected to be propagated to
+ subtypes. The corresponding Gigi_Rep_Items are only attached
+ to the first subtype though, so we handle the propagation here. */
+ if (Base_Type (gnat_entity) != gnat_entity
+ && !Is_First_Subtype (gnat_entity)
+ && Has_Gigi_Rep_Item (First_Subtype (Base_Type (gnat_entity))))
+ prepend_attributes (&attr_list,
+ First_Subtype (Base_Type (gnat_entity)));
+
+ /* Compute a default value for the size of an elementary type. */
+ if (Known_Esize (gnat_entity) && Is_Elementary_Type (gnat_entity))
+ {
+ unsigned int max_esize;
+
+ gcc_assert (UI_Is_In_Int_Range (Esize (gnat_entity)));
+ esize = UI_To_Int (Esize (gnat_entity));
+
+ if (IN (kind, Float_Kind))
+ max_esize = fp_prec_to_size (LONG_DOUBLE_TYPE_SIZE);
+ else if (IN (kind, Access_Kind))
+ max_esize = POINTER_SIZE * 2;
+ else
+ max_esize = LONG_LONG_TYPE_SIZE;
+
+ if (esize > max_esize)
+ esize = max_esize;
+ }
+ }
+
+ switch (kind)
+ {
+ case E_Constant:
+ /* If this is a use of a deferred constant without address clause,
+ get its full definition. */
+ if (!definition
+ && No (Address_Clause (gnat_entity))
+ && Present (Full_View (gnat_entity)))
+ {
+ gnu_decl
+ = gnat_to_gnu_entity (Full_View (gnat_entity), gnu_expr, 0);
+ saved = true;
+ break;
+ }
+
+ /* If we have an external constant that we are not defining, get the
+ expression that is was defined to represent. We may throw it away
+ later if it is not a constant. But do not retrieve the expression
+ if it is an allocator because the designated type might be dummy
+ at this point. */
+ if (!definition
+ && !No_Initialization (Declaration_Node (gnat_entity))
+ && Present (Expression (Declaration_Node (gnat_entity)))
+ && Nkind (Expression (Declaration_Node (gnat_entity)))
+ != N_Allocator)
+ {
+ bool went_into_elab_proc = false;
+ int save_force_global = force_global;
+
+ /* The expression may contain N_Expression_With_Actions nodes and
+ thus object declarations from other units. In this case, even
+ though the expression will eventually be discarded since not a
+ constant, the declarations would be stuck either in the global
+ varpool or in the current scope. Therefore we force the local
+ context and create a fake scope that we'll zap at the end. */
+ if (!current_function_decl)
+ {
+ current_function_decl = get_elaboration_procedure ();
+ went_into_elab_proc = true;
+ }
+ force_global = 0;
+ gnat_pushlevel ();
+
+ gnu_expr = gnat_to_gnu (Expression (Declaration_Node (gnat_entity)));
+
+ gnat_zaplevel ();
+ force_global = save_force_global;
+ if (went_into_elab_proc)
+ current_function_decl = NULL_TREE;
+ }
+
+ /* Ignore deferred constant definitions without address clause since
+ they are processed fully in the front-end. If No_Initialization
+ is set, this is not a deferred constant but a constant whose value
+ is built manually. And constants that are renamings are handled
+ like variables. */
+ if (definition
+ && !gnu_expr
+ && No (Address_Clause (gnat_entity))
+ && !No_Initialization (Declaration_Node (gnat_entity))
+ && No (Renamed_Object (gnat_entity)))
+ {
+ gnu_decl = error_mark_node;
+ saved = true;
+ break;
+ }
+
+ /* Ignore constant definitions already marked with the error node. See
+ the N_Object_Declaration case of gnat_to_gnu for the rationale. */
+ if (definition
+ && gnu_expr
+ && present_gnu_tree (gnat_entity)
+ && get_gnu_tree (gnat_entity) == error_mark_node)
+ {
+ maybe_present = true;
+ break;
+ }
+
+ goto object;
+
+ case E_Exception:
+ /* We used to special case VMS exceptions here to directly map them to
+ their associated condition code. Since this code had to be masked
+ dynamically to strip off the severity bits, this caused trouble in
+ the GCC/ZCX case because the "type" pointers we store in the tables
+ have to be static. We now don't special case here anymore, and let
+ the regular processing take place, which leaves us with a regular
+ exception data object for VMS exceptions too. The condition code
+ mapping is taken care of by the front end and the bitmasking by the
+ run-time library. */
+ goto object;
+
+ case E_Component:
+ case E_Discriminant:
+ {
+ /* The GNAT record where the component was defined. */
+ Entity_Id gnat_record = Underlying_Type (Scope (gnat_entity));
+
+ /* If the entity is an inherited component (in the case of extended
+ tagged record types), just return the original entity, which must
+ be a FIELD_DECL. Likewise for discriminants. If the entity is a
+ non-girder discriminant (in the case of derived untagged record
+ types), return the stored discriminant it renames. */
+ if (Present (Original_Record_Component (gnat_entity))
+ && Original_Record_Component (gnat_entity) != gnat_entity)
+ {
+ gnu_decl
+ = gnat_to_gnu_entity (Original_Record_Component (gnat_entity),
+ gnu_expr, definition);
+ saved = true;
+ break;
+ }
+
+ /* If this is a discriminant of an extended tagged type used to rename
+ a discriminant of the parent type, return the latter. */
+ else if (Present (Corresponding_Discriminant (gnat_entity)))
+ {
+ /* If the derived type is untagged, then this is a non-girder
+ discriminant and its Original_Record_Component must point to
+ the stored discriminant it renames (i.e. we should have taken
+ the previous branch). */
+ gcc_assert (Is_Tagged_Type (gnat_record));
+
+ gnu_decl
+ = gnat_to_gnu_entity (Corresponding_Discriminant (gnat_entity),
+ gnu_expr, definition);
+ saved = true;
+ break;
+ }
+
+ /* Otherwise, if we are not defining this and we have no GCC type
+ for the containing record, make one for it. Then we should
+ have made our own equivalent. */
+ else if (!definition && !present_gnu_tree (gnat_record))
+ {
+ /* ??? If this is in a record whose scope is a protected
+ type and we have an Original_Record_Component, use it.
+ This is a workaround for major problems in protected type
+ handling. */
+ Entity_Id Scop = Scope (Scope (gnat_entity));
+ if ((Is_Protected_Type (Scop)
+ || (Is_Private_Type (Scop)
+ && Present (Full_View (Scop))
+ && Is_Protected_Type (Full_View (Scop))))
+ && Present (Original_Record_Component (gnat_entity)))
+ {
+ gnu_decl
+ = gnat_to_gnu_entity (Original_Record_Component
+ (gnat_entity),
+ gnu_expr, 0);
+ saved = true;
+ break;
+ }
+
+ gnat_to_gnu_entity (Scope (gnat_entity), NULL_TREE, 0);
+ gnu_decl = get_gnu_tree (gnat_entity);
+ saved = true;
+ break;
+ }
+
+ else
+ /* Here we have no GCC type and this is a reference rather than a
+ definition. This should never happen. Most likely the cause is
+ reference before declaration in the GNAT tree for gnat_entity. */
+ gcc_unreachable ();
+ }
+
+ case E_Loop_Parameter:
+ case E_Out_Parameter:
+ case E_Variable:
+
+ /* Simple variables, loop variables, Out parameters and exceptions. */
+ object:
+ {
+ /* Always create a variable for volatile objects and variables seen
+ constant but with a Linker_Section pragma. */
+ bool const_flag
+ = ((kind == E_Constant || kind == E_Variable)
+ && Is_True_Constant (gnat_entity)
+ && !(kind == E_Variable
+ && Present (Linker_Section_Pragma (gnat_entity)))
+ && !Treat_As_Volatile (gnat_entity)
+ && (((Nkind (Declaration_Node (gnat_entity))
+ == N_Object_Declaration)
+ && Present (Expression (Declaration_Node (gnat_entity))))
+ || Present (Renamed_Object (gnat_entity))
+ || imported_p));
+ bool inner_const_flag = const_flag;
+ bool static_p = Is_Statically_Allocated (gnat_entity);
+ bool mutable_p = false;
+ bool used_by_ref = false;
+ tree gnu_ext_name = NULL_TREE;
+ tree renamed_obj = NULL_TREE;
+ tree gnu_object_size;
+
+ if (Present (Renamed_Object (gnat_entity)) && !definition)
+ {
+ if (kind == E_Exception)
+ gnu_expr = gnat_to_gnu_entity (Renamed_Entity (gnat_entity),
+ NULL_TREE, 0);
+ else
+ gnu_expr = gnat_to_gnu (Renamed_Object (gnat_entity));
+ }
+
+ /* Get the type after elaborating the renamed object. */
+ gnu_type = gnat_to_gnu_type (Etype (gnat_entity));
+
+ /* If this is a standard exception definition, then use the standard
+ exception type. This is necessary to make sure that imported and
+ exported views of exceptions are properly merged in LTO mode. */
+ if (TREE_CODE (TYPE_NAME (gnu_type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (gnu_type)) == exception_data_name_id)
+ gnu_type = except_type_node;
+
+ /* For a debug renaming declaration, build a debug-only entity. */
+ if (Present (Debug_Renaming_Link (gnat_entity)))
+ {
+ /* Force a non-null value to make sure the symbol is retained. */
+ tree value = build1 (INDIRECT_REF, gnu_type,
+ build1 (NOP_EXPR,
+ build_pointer_type (gnu_type),
+ integer_minus_one_node));
+ gnu_decl = build_decl (input_location,
+ VAR_DECL, gnu_entity_name, gnu_type);
+ SET_DECL_VALUE_EXPR (gnu_decl, value);
+ DECL_HAS_VALUE_EXPR_P (gnu_decl) = 1;
+ gnat_pushdecl (gnu_decl, gnat_entity);
+ break;
+ }
+
+ /* If this is a loop variable, its type should be the base type.
+ This is because the code for processing a loop determines whether
+ a normal loop end test can be done by comparing the bounds of the
+ loop against those of the base type, which is presumed to be the
+ size used for computation. But this is not correct when the size
+ of the subtype is smaller than the type. */
+ if (kind == E_Loop_Parameter)
+ gnu_type = get_base_type (gnu_type);
+
+ /* Reject non-renamed objects whose type is an unconstrained array or
+ any object whose type is a dummy type or void. */
+ if ((TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE
+ && No (Renamed_Object (gnat_entity)))
+ || TYPE_IS_DUMMY_P (gnu_type)
+ || TREE_CODE (gnu_type) == VOID_TYPE)
+ {
+ gcc_assert (type_annotate_only);
+ if (this_global)
+ force_global--;
+ return error_mark_node;
+ }
+
+ /* If an alignment is specified, use it if valid. Note that exceptions
+ are objects but don't have an alignment. We must do this before we
+ validate the size, since the alignment can affect the size. */
+ if (kind != E_Exception && Known_Alignment (gnat_entity))
+ {
+ gcc_assert (Present (Alignment (gnat_entity)));
+
+ align = validate_alignment (Alignment (gnat_entity), gnat_entity,
+ TYPE_ALIGN (gnu_type));
+
+ /* No point in changing the type if there is an address clause
+ as the final type of the object will be a reference type. */
+ if (Present (Address_Clause (gnat_entity)))
+ align = 0;
+ else
+ {
+ tree orig_type = gnu_type;
+
+ gnu_type
+ = maybe_pad_type (gnu_type, NULL_TREE, align, gnat_entity,
+ false, false, definition, true);
+
+ /* If a padding record was made, declare it now since it will
+ never be declared otherwise. This is necessary to ensure
+ that its subtrees are properly marked. */
+ if (gnu_type != orig_type && !DECL_P (TYPE_NAME (gnu_type)))
+ create_type_decl (TYPE_NAME (gnu_type), gnu_type, true,
+ debug_info_p, gnat_entity);
+ }
+ }
+
+ /* If we are defining the object, see if it has a Size and validate it
+ if so. If we are not defining the object and a Size clause applies,
+ simply retrieve the value. We don't want to ignore the clause and
+ it is expected to have been validated already. Then get the new
+ type, if any. */
+ if (definition)
+ gnu_size = validate_size (Esize (gnat_entity), gnu_type,
+ gnat_entity, VAR_DECL, false,
+ Has_Size_Clause (gnat_entity));
+ else if (Has_Size_Clause (gnat_entity))
+ gnu_size = UI_To_gnu (Esize (gnat_entity), bitsizetype);
+
+ if (gnu_size)
+ {
+ gnu_type
+ = make_type_from_size (gnu_type, gnu_size,
+ Has_Biased_Representation (gnat_entity));
+
+ if (operand_equal_p (TYPE_SIZE (gnu_type), gnu_size, 0))
+ gnu_size = NULL_TREE;
+ }
+
+ /* If this object has self-referential size, it must be a record with
+ a default discriminant. We are supposed to allocate an object of
+ the maximum size in this case, unless it is a constant with an
+ initializing expression, in which case we can get the size from
+ that. Note that the resulting size may still be a variable, so
+ this may end up with an indirect allocation. */
+ if (No (Renamed_Object (gnat_entity))
+ && CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_type)))
+ {
+ if (gnu_expr && kind == E_Constant)
+ {
+ tree size = TYPE_SIZE (TREE_TYPE (gnu_expr));
+ if (CONTAINS_PLACEHOLDER_P (size))
+ {
+ /* If the initializing expression is itself a constant,
+ despite having a nominal type with self-referential
+ size, we can get the size directly from it. */
+ if (TREE_CODE (gnu_expr) == COMPONENT_REF
+ && TYPE_IS_PADDING_P
+ (TREE_TYPE (TREE_OPERAND (gnu_expr, 0)))
+ && TREE_CODE (TREE_OPERAND (gnu_expr, 0)) == VAR_DECL
+ && (TREE_READONLY (TREE_OPERAND (gnu_expr, 0))
+ || DECL_READONLY_ONCE_ELAB
+ (TREE_OPERAND (gnu_expr, 0))))
+ gnu_size = DECL_SIZE (TREE_OPERAND (gnu_expr, 0));
+ else
+ gnu_size
+ = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, gnu_expr);
+ }
+ else
+ gnu_size = size;
+ }
+ /* We may have no GNU_EXPR because No_Initialization is
+ set even though there's an Expression. */
+ else if (kind == E_Constant
+ && (Nkind (Declaration_Node (gnat_entity))
+ == N_Object_Declaration)
+ && Present (Expression (Declaration_Node (gnat_entity))))
+ gnu_size
+ = TYPE_SIZE (gnat_to_gnu_type
+ (Etype
+ (Expression (Declaration_Node (gnat_entity)))));
+ else
+ {
+ gnu_size = max_size (TYPE_SIZE (gnu_type), true);
+ mutable_p = true;
+ }
+
+ /* If we are at global level and the size isn't constant, call
+ elaborate_expression_1 to make a variable for it rather than
+ calculating it each time. */
+ if (global_bindings_p () && !TREE_CONSTANT (gnu_size))
+ gnu_size = elaborate_expression_1 (gnu_size, gnat_entity,
+ get_identifier ("SIZE"),
+ definition, false);
+ }
+
+ /* If the size is zero byte, make it one byte since some linkers have
+ troubles with zero-sized objects. If the object will have a
+ template, that will make it nonzero so don't bother. Also avoid
+ doing that for an object renaming or an object with an address
+ clause, as we would lose useful information on the view size
+ (e.g. for null array slices) and we are not allocating the object
+ here anyway. */
+ if (((gnu_size
+ && integer_zerop (gnu_size)
+ && !TREE_OVERFLOW (gnu_size))
+ || (TYPE_SIZE (gnu_type)
+ && integer_zerop (TYPE_SIZE (gnu_type))
+ && !TREE_OVERFLOW (TYPE_SIZE (gnu_type))))
+ && !Is_Constr_Subt_For_UN_Aliased (Etype (gnat_entity))
+ && No (Renamed_Object (gnat_entity))
+ && No (Address_Clause (gnat_entity)))
+ gnu_size = bitsize_unit_node;
+
+ /* If this is an object with no specified size and alignment, and
+ if either it is atomic or we are not optimizing alignment for
+ space and it is composite and not an exception, an Out parameter
+ or a reference to another object, and the size of its type is a
+ constant, set the alignment to the smallest one which is not
+ smaller than the size, with an appropriate cap. */
+ if (!gnu_size && align == 0
+ && (Is_Atomic (gnat_entity)
+ || (!Optimize_Alignment_Space (gnat_entity)
+ && kind != E_Exception
+ && kind != E_Out_Parameter
+ && Is_Composite_Type (Etype (gnat_entity))
+ && !Is_Constr_Subt_For_UN_Aliased (Etype (gnat_entity))
+ && !Is_Exported (gnat_entity)
+ && !imported_p
+ && No (Renamed_Object (gnat_entity))
+ && No (Address_Clause (gnat_entity))))
+ && TREE_CODE (TYPE_SIZE (gnu_type)) == INTEGER_CST)
+ {
+ unsigned int size_cap, align_cap;
+
+ /* No point in promoting the alignment if this doesn't prevent
+ BLKmode access to the object, in particular block copy, as
+ this will for example disable the NRV optimization for it.
+ No point in jumping through all the hoops needed in order
+ to support BIGGEST_ALIGNMENT if we don't really have to.
+ So we cap to the smallest alignment that corresponds to
+ a known efficient memory access pattern of the target. */
+ if (Is_Atomic (gnat_entity))
+ {
+ size_cap = UINT_MAX;
+ align_cap = BIGGEST_ALIGNMENT;
+ }
+ else
+ {
+ size_cap = MAX_FIXED_MODE_SIZE;
+ align_cap = get_mode_alignment (ptr_mode);
+ }
+
+ if (!tree_fits_uhwi_p (TYPE_SIZE (gnu_type))
+ || compare_tree_int (TYPE_SIZE (gnu_type), size_cap) > 0)
+ align = 0;
+ else if (compare_tree_int (TYPE_SIZE (gnu_type), align_cap) > 0)
+ align = align_cap;
+ else
+ align = ceil_pow2 (tree_to_uhwi (TYPE_SIZE (gnu_type)));
+
+ /* But make sure not to under-align the object. */
+ if (align <= TYPE_ALIGN (gnu_type))
+ align = 0;
+
+ /* And honor the minimum valid atomic alignment, if any. */
+#ifdef MINIMUM_ATOMIC_ALIGNMENT
+ else if (align < MINIMUM_ATOMIC_ALIGNMENT)
+ align = MINIMUM_ATOMIC_ALIGNMENT;
+#endif
+ }
+
+ /* If the object is set to have atomic components, find the component
+ type and validate it.
+
+ ??? Note that we ignore Has_Volatile_Components on objects; it's
+ not at all clear what to do in that case. */
+ if (Has_Atomic_Components (gnat_entity))
+ {
+ tree gnu_inner = (TREE_CODE (gnu_type) == ARRAY_TYPE
+ ? TREE_TYPE (gnu_type) : gnu_type);
+
+ while (TREE_CODE (gnu_inner) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (gnu_inner))
+ gnu_inner = TREE_TYPE (gnu_inner);
+
+ check_ok_for_atomic (gnu_inner, gnat_entity, true);
+ }
+
+ /* Now check if the type of the object allows atomic access. Note
+ that we must test the type, even if this object has size and
+ alignment to allow such access, because we will be going inside
+ the padded record to assign to the object. We could fix this by
+ always copying via an intermediate value, but it's not clear it's
+ worth the effort. */
+ if (Is_Atomic (gnat_entity))
+ check_ok_for_atomic (gnu_type, gnat_entity, false);
+
+ /* If this is an aliased object with an unconstrained nominal subtype,
+ make a type that includes the template. */
+ if (Is_Constr_Subt_For_UN_Aliased (Etype (gnat_entity))
+ && (Is_Array_Type (Etype (gnat_entity))
+ || (Is_Private_Type (Etype (gnat_entity))
+ && Is_Array_Type (Full_View (Etype (gnat_entity)))))
+ && !type_annotate_only)
+ {
+ tree gnu_array
+ = gnat_to_gnu_type (Base_Type (Etype (gnat_entity)));
+ gnu_type
+ = build_unc_object_type_from_ptr (TREE_TYPE (gnu_array),
+ gnu_type,
+ concat_name (gnu_entity_name,
+ "UNC"),
+ debug_info_p);
+ }
+
+ /* ??? If this is an object of CW type initialized to a value, try to
+ ensure that the object is sufficient aligned for this value, but
+ without pessimizing the allocation. This is a kludge necessary
+ because we don't support dynamic alignment. */
+ if (align == 0
+ && Ekind (Etype (gnat_entity)) == E_Class_Wide_Subtype
+ && No (Renamed_Object (gnat_entity))
+ && No (Address_Clause (gnat_entity)))
+ align = get_target_system_allocator_alignment () * BITS_PER_UNIT;
+
+#ifdef MINIMUM_ATOMIC_ALIGNMENT
+ /* If the size is a constant and no alignment is specified, force
+ the alignment to be the minimum valid atomic alignment. The
+ restriction on constant size avoids problems with variable-size
+ temporaries; if the size is variable, there's no issue with
+ atomic access. Also don't do this for a constant, since it isn't
+ necessary and can interfere with constant replacement. Finally,
+ do not do it for Out parameters since that creates an
+ size inconsistency with In parameters. */
+ if (align == 0
+ && MINIMUM_ATOMIC_ALIGNMENT > TYPE_ALIGN (gnu_type)
+ && !FLOAT_TYPE_P (gnu_type)
+ && !const_flag && No (Renamed_Object (gnat_entity))
+ && !imported_p && No (Address_Clause (gnat_entity))
+ && kind != E_Out_Parameter
+ && (gnu_size ? TREE_CODE (gnu_size) == INTEGER_CST
+ : TREE_CODE (TYPE_SIZE (gnu_type)) == INTEGER_CST))
+ align = MINIMUM_ATOMIC_ALIGNMENT;
+#endif
+
+ /* Make a new type with the desired size and alignment, if needed.
+ But do not take into account alignment promotions to compute the
+ size of the object. */
+ gnu_object_size = gnu_size ? gnu_size : TYPE_SIZE (gnu_type);
+ if (gnu_size || align > 0)
+ {
+ tree orig_type = gnu_type;
+
+ gnu_type = maybe_pad_type (gnu_type, gnu_size, align, gnat_entity,
+ false, false, definition, true);
+
+ /* If a padding record was made, declare it now since it will
+ never be declared otherwise. This is necessary to ensure
+ that its subtrees are properly marked. */
+ if (gnu_type != orig_type && !DECL_P (TYPE_NAME (gnu_type)))
+ create_type_decl (TYPE_NAME (gnu_type), gnu_type, true,
+ debug_info_p, gnat_entity);
+ }
+
+ /* If this is a renaming, avoid as much as possible to create a new
+ object. However, in several cases, creating it is required.
+ This processing needs to be applied to the raw expression so
+ as to make it more likely to rename the underlying object. */
+ if (Present (Renamed_Object (gnat_entity)))
+ {
+ bool create_normal_object = false;
+
+ /* If the renamed object had padding, strip off the reference
+ to the inner object and reset our type. */
+ if ((TREE_CODE (gnu_expr) == COMPONENT_REF
+ && TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (gnu_expr, 0))))
+ /* Strip useless conversions around the object. */
+ || gnat_useless_type_conversion (gnu_expr))
+ {
+ gnu_expr = TREE_OPERAND (gnu_expr, 0);
+ gnu_type = TREE_TYPE (gnu_expr);
+ }
+
+ /* Or else, if the renamed object has an unconstrained type with
+ default discriminant, use the padded type. */
+ else if (TYPE_IS_PADDING_P (TREE_TYPE (gnu_expr))
+ && TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_expr)))
+ == gnu_type
+ && CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_type)))
+ gnu_type = TREE_TYPE (gnu_expr);
+
+ /* Case 1: If this is a constant renaming stemming from a function
+ call, treat it as a normal object whose initial value is what
+ is being renamed. RM 3.3 says that the result of evaluating a
+ function call is a constant object. As a consequence, it can
+ be the inner object of a constant renaming. In this case, the
+ renaming must be fully instantiated, i.e. it cannot be a mere
+ reference to (part of) an existing object. */
+ if (const_flag)
+ {
+ tree inner_object = gnu_expr;
+ while (handled_component_p (inner_object))
+ inner_object = TREE_OPERAND (inner_object, 0);
+ if (TREE_CODE (inner_object) == CALL_EXPR)
+ create_normal_object = true;
+ }
+
+ /* Otherwise, see if we can proceed with a stabilized version of
+ the renamed entity or if we need to make a new object. */
+ if (!create_normal_object)
+ {
+ tree maybe_stable_expr = NULL_TREE;
+ bool stable = false;
+
+ /* Case 2: If the renaming entity need not be materialized and
+ the renamed expression is something we can stabilize, use
+ that for the renaming. At the global level, we can only do
+ this if we know no SAVE_EXPRs need be made, because the
+ expression we return might be used in arbitrary conditional
+ branches so we must force the evaluation of the SAVE_EXPRs
+ immediately and this requires a proper function context.
+ Note that an external constant is at the global level. */
+ if (!Materialize_Entity (gnat_entity)
+ && (!((!definition && kind == E_Constant)
+ || global_bindings_p ())
+ || (staticp (gnu_expr)
+ && !TREE_SIDE_EFFECTS (gnu_expr))))
+ {
+ maybe_stable_expr
+ = gnat_stabilize_reference (gnu_expr, true, &stable);
+
+ if (stable)
+ {
+ /* ??? No DECL_EXPR is created so we need to mark
+ the expression manually lest it is shared. */
+ if ((!definition && kind == E_Constant)
+ || global_bindings_p ())
+ MARK_VISITED (maybe_stable_expr);
+ gnu_decl = maybe_stable_expr;
+ save_gnu_tree (gnat_entity, gnu_decl, true);
+ saved = true;
+ annotate_object (gnat_entity, gnu_type, NULL_TREE,
+ false);
+ /* This assertion will fail if the renamed object
+ isn't aligned enough as to make it possible to
+ honor the alignment set on the renaming. */
+ if (align)
+ {
+ unsigned int renamed_align
+ = DECL_P (gnu_decl)
+ ? DECL_ALIGN (gnu_decl)
+ : TYPE_ALIGN (TREE_TYPE (gnu_decl));
+ gcc_assert (renamed_align >= align);
+ }
+ break;
+ }
+
+ /* The stabilization failed. Keep maybe_stable_expr
+ untouched here to let the pointer case below know
+ about that failure. */
+ }
+
+ /* Case 3: If this is a constant renaming and creating a
+ new object is allowed and cheap, treat it as a normal
+ object whose initial value is what is being renamed. */
+ if (const_flag
+ && !Is_Composite_Type
+ (Underlying_Type (Etype (gnat_entity))))
+ ;
+
+ /* Case 4: Make this into a constant pointer to the object we
+ are to rename and attach the object to the pointer if it is
+ something we can stabilize.
+
+ From the proper scope, attached objects will be referenced
+ directly instead of indirectly via the pointer to avoid
+ subtle aliasing problems with non-addressable entities.
+ They have to be stable because we must not evaluate the
+ variables in the expression every time the renaming is used.
+ The pointer is called a "renaming" pointer in this case.
+
+ In the rare cases where we cannot stabilize the renamed
+ object, we just make a "bare" pointer, and the renamed
+ entity is always accessed indirectly through it. */
+ else
+ {
+ /* We need to preserve the volatileness of the renamed
+ object through the indirection. */
+ if (TREE_THIS_VOLATILE (gnu_expr)
+ && !TYPE_VOLATILE (gnu_type))
+ gnu_type
+ = build_qualified_type (gnu_type,
+ (TYPE_QUALS (gnu_type)
+ | TYPE_QUAL_VOLATILE));
+ gnu_type = build_reference_type (gnu_type);
+ inner_const_flag = TREE_READONLY (gnu_expr);
+ const_flag = true;
+
+ /* If the previous attempt at stabilizing failed, there
+ is no point in trying again and we reuse the result
+ without attaching it to the pointer. In this case it
+ will only be used as the initializing expression of
+ the pointer and thus needs no special treatment with
+ regard to multiple evaluations. */
+ if (maybe_stable_expr)
+ ;
+
+ /* Otherwise, try to stabilize and attach the expression
+ to the pointer if the stabilization succeeds.
+
+ Note that this might introduce SAVE_EXPRs and we don't
+ check whether we're at the global level or not. This
+ is fine since we are building a pointer initializer and
+ neither the pointer nor the initializing expression can
+ be accessed before the pointer elaboration has taken
+ place in a correct program.
+
+ These SAVE_EXPRs will be evaluated at the right place
+ by either the evaluation of the initializer for the
+ non-global case or the elaboration code for the global
+ case, and will be attached to the elaboration procedure
+ in the latter case. */
+ else
+ {
+ maybe_stable_expr
+ = gnat_stabilize_reference (gnu_expr, true, &stable);
+
+ if (stable)
+ renamed_obj = maybe_stable_expr;
+
+ /* Attaching is actually performed downstream, as soon
+ as we have a VAR_DECL for the pointer we make. */
+ }
+
+ if (type_annotate_only
+ && TREE_CODE (maybe_stable_expr) == ERROR_MARK)
+ gnu_expr = NULL_TREE;
+ else
+ gnu_expr = build_unary_op (ADDR_EXPR, gnu_type,
+ maybe_stable_expr);
+
+ gnu_size = NULL_TREE;
+ used_by_ref = true;
+ }
+ }
+ }
+
+ /* Make a volatile version of this object's type if we are to make
+ the object volatile. We also interpret 13.3(19) conservatively
+ and disallow any optimizations for such a non-constant object. */
+ if ((Treat_As_Volatile (gnat_entity)
+ || (!const_flag
+ && gnu_type != except_type_node
+ && (Is_Exported (gnat_entity)
+ || imported_p
+ || Present (Address_Clause (gnat_entity)))))
+ && !TYPE_VOLATILE (gnu_type))
+ gnu_type = build_qualified_type (gnu_type,
+ (TYPE_QUALS (gnu_type)
+ | TYPE_QUAL_VOLATILE));
+
+ /* If we are defining an aliased object whose nominal subtype is
+ unconstrained, the object is a record that contains both the
+ template and the object. If there is an initializer, it will
+ have already been converted to the right type, but we need to
+ create the template if there is no initializer. */
+ if (definition
+ && !gnu_expr
+ && TREE_CODE (gnu_type) == RECORD_TYPE
+ && (TYPE_CONTAINS_TEMPLATE_P (gnu_type)
+ /* Beware that padding might have been introduced above. */
+ || (TYPE_PADDING_P (gnu_type)
+ && TREE_CODE (TREE_TYPE (TYPE_FIELDS (gnu_type)))
+ == RECORD_TYPE
+ && TYPE_CONTAINS_TEMPLATE_P
+ (TREE_TYPE (TYPE_FIELDS (gnu_type))))))
+ {
+ tree template_field
+ = TYPE_PADDING_P (gnu_type)
+ ? TYPE_FIELDS (TREE_TYPE (TYPE_FIELDS (gnu_type)))
+ : TYPE_FIELDS (gnu_type);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 1);
+ tree t = build_template (TREE_TYPE (template_field),
+ TREE_TYPE (DECL_CHAIN (template_field)),
+ NULL_TREE);
+ CONSTRUCTOR_APPEND_ELT (v, template_field, t);
+ gnu_expr = gnat_build_constructor (gnu_type, v);
+ }
+
+ /* Convert the expression to the type of the object except in the
+ case where the object's type is unconstrained or the object's type
+ is a padded record whose field is of self-referential size. In
+ the former case, converting will generate unnecessary evaluations
+ of the CONSTRUCTOR to compute the size and in the latter case, we
+ want to only copy the actual data. Also don't convert to a record
+ type with a variant part from a record type without one, to keep
+ the object simpler. */
+ if (gnu_expr
+ && TREE_CODE (gnu_type) != UNCONSTRAINED_ARRAY_TYPE
+ && !CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_type))
+ && !(TYPE_IS_PADDING_P (gnu_type)
+ && CONTAINS_PLACEHOLDER_P
+ (TYPE_SIZE (TREE_TYPE (TYPE_FIELDS (gnu_type)))))
+ && !(TREE_CODE (gnu_type) == RECORD_TYPE
+ && TREE_CODE (TREE_TYPE (gnu_expr)) == RECORD_TYPE
+ && get_variant_part (gnu_type) != NULL_TREE
+ && get_variant_part (TREE_TYPE (gnu_expr)) == NULL_TREE))
+ gnu_expr = convert (gnu_type, gnu_expr);
+
+ /* If this is a pointer that doesn't have an initializing expression,
+ initialize it to NULL, unless the object is imported. */
+ if (definition
+ && (POINTER_TYPE_P (gnu_type) || TYPE_IS_FAT_POINTER_P (gnu_type))
+ && !gnu_expr
+ && !Is_Imported (gnat_entity))
+ gnu_expr = integer_zero_node;
+
+ /* If we are defining the object and it has an Address clause, we must
+ either get the address expression from the saved GCC tree for the
+ object if it has a Freeze node, or elaborate the address expression
+ here since the front-end has guaranteed that the elaboration has no
+ effects in this case. */
+ if (definition && Present (Address_Clause (gnat_entity)))
+ {
+ Node_Id gnat_expr = Expression (Address_Clause (gnat_entity));
+ tree gnu_address
+ = present_gnu_tree (gnat_entity)
+ ? get_gnu_tree (gnat_entity) : gnat_to_gnu (gnat_expr);
+
+ save_gnu_tree (gnat_entity, NULL_TREE, false);
+
+ /* Ignore the size. It's either meaningless or was handled
+ above. */
+ gnu_size = NULL_TREE;
+ /* Convert the type of the object to a reference type that can
+ alias everything as per 13.3(19). */
+ gnu_type
+ = build_reference_type_for_mode (gnu_type, ptr_mode, true);
+ gnu_address = convert (gnu_type, gnu_address);
+ used_by_ref = true;
+ const_flag
+ = !Is_Public (gnat_entity)
+ || compile_time_known_address_p (gnat_expr);
+
+ /* If this is a deferred constant, the initializer is attached to
+ the full view. */
+ if (kind == E_Constant && Present (Full_View (gnat_entity)))
+ gnu_expr
+ = gnat_to_gnu
+ (Expression (Declaration_Node (Full_View (gnat_entity))));
+
+ /* If we don't have an initializing expression for the underlying
+ variable, the initializing expression for the pointer is the
+ specified address. Otherwise, we have to make a COMPOUND_EXPR
+ to assign both the address and the initial value. */
+ if (!gnu_expr)
+ gnu_expr = gnu_address;
+ else
+ gnu_expr
+ = build2 (COMPOUND_EXPR, gnu_type,
+ build_binary_op
+ (MODIFY_EXPR, NULL_TREE,
+ build_unary_op (INDIRECT_REF, NULL_TREE,
+ gnu_address),
+ gnu_expr),
+ gnu_address);
+ }
+
+ /* If it has an address clause and we are not defining it, mark it
+ as an indirect object. Likewise for Stdcall objects that are
+ imported. */
+ if ((!definition && Present (Address_Clause (gnat_entity)))
+ || (Is_Imported (gnat_entity)
+ && Has_Stdcall_Convention (gnat_entity)))
+ {
+ /* Convert the type of the object to a reference type that can
+ alias everything as per 13.3(19). */
+ gnu_type
+ = build_reference_type_for_mode (gnu_type, ptr_mode, true);
+ gnu_size = NULL_TREE;
+
+ /* No point in taking the address of an initializing expression
+ that isn't going to be used. */
+ gnu_expr = NULL_TREE;
+
+ /* If it has an address clause whose value is known at compile
+ time, make the object a CONST_DECL. This will avoid a
+ useless dereference. */
+ if (Present (Address_Clause (gnat_entity)))
+ {
+ Node_Id gnat_address
+ = Expression (Address_Clause (gnat_entity));
+
+ if (compile_time_known_address_p (gnat_address))
+ {
+ gnu_expr = gnat_to_gnu (gnat_address);
+ const_flag = true;
+ }
+ }
+
+ used_by_ref = true;
+ }
+
+ /* If we are at top level and this object is of variable size,
+ make the actual type a hidden pointer to the real type and
+ make the initializer be a memory allocation and initialization.
+ Likewise for objects we aren't defining (presumed to be
+ external references from other packages), but there we do
+ not set up an initialization.
+
+ If the object's size overflows, make an allocator too, so that
+ Storage_Error gets raised. Note that we will never free
+ such memory, so we presume it never will get allocated. */
+ if (!allocatable_size_p (TYPE_SIZE_UNIT (gnu_type),
+ global_bindings_p ()
+ || !definition
+ || static_p)
+ || (gnu_size
+ && !allocatable_size_p (convert (sizetype,
+ size_binop
+ (CEIL_DIV_EXPR, gnu_size,
+ bitsize_unit_node)),
+ global_bindings_p ()
+ || !definition
+ || static_p)))
+ {
+ gnu_type = build_reference_type (gnu_type);
+ gnu_size = NULL_TREE;
+ used_by_ref = true;
+
+ /* In case this was a aliased object whose nominal subtype is
+ unconstrained, the pointer above will be a thin pointer and
+ build_allocator will automatically make the template.
+
+ If we have a template initializer only (that we made above),
+ pretend there is none and rely on what build_allocator creates
+ again anyway. Otherwise (if we have a full initializer), get
+ the data part and feed that to build_allocator.
+
+ If we are elaborating a mutable object, tell build_allocator to
+ ignore a possibly simpler size from the initializer, if any, as
+ we must allocate the maximum possible size in this case. */
+ if (definition && !imported_p)
+ {
+ tree gnu_alloc_type = TREE_TYPE (gnu_type);
+
+ if (TREE_CODE (gnu_alloc_type) == RECORD_TYPE
+ && TYPE_CONTAINS_TEMPLATE_P (gnu_alloc_type))
+ {
+ gnu_alloc_type
+ = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_alloc_type)));
+
+ if (TREE_CODE (gnu_expr) == CONSTRUCTOR
+ && 1 == vec_safe_length (CONSTRUCTOR_ELTS (gnu_expr)))
+ gnu_expr = 0;
+ else
+ gnu_expr
+ = build_component_ref
+ (gnu_expr, NULL_TREE,
+ DECL_CHAIN (TYPE_FIELDS (TREE_TYPE (gnu_expr))),
+ false);
+ }
+
+ if (TREE_CODE (TYPE_SIZE_UNIT (gnu_alloc_type)) == INTEGER_CST
+ && !valid_constant_size_p (TYPE_SIZE_UNIT (gnu_alloc_type)))
+ post_error ("?`Storage_Error` will be raised at run time!",
+ gnat_entity);
+
+ gnu_expr
+ = build_allocator (gnu_alloc_type, gnu_expr, gnu_type,
+ Empty, Empty, gnat_entity, mutable_p);
+ const_flag = true;
+ }
+ else
+ {
+ gnu_expr = NULL_TREE;
+ const_flag = false;
+ }
+ }
+
+ /* If this object would go into the stack and has an alignment larger
+ than the largest stack alignment the back-end can honor, resort to
+ a variable of "aligning type". */
+ if (!global_bindings_p () && !static_p && definition
+ && !imported_p && TYPE_ALIGN (gnu_type) > BIGGEST_ALIGNMENT)
+ {
+ /* Create the new variable. No need for extra room before the
+ aligned field as this is in automatic storage. */
+ tree gnu_new_type
+ = make_aligning_type (gnu_type, TYPE_ALIGN (gnu_type),
+ TYPE_SIZE_UNIT (gnu_type),
+ BIGGEST_ALIGNMENT, 0, gnat_entity);
+ tree gnu_new_var
+ = create_var_decl (create_concat_name (gnat_entity, "ALIGN"),
+ NULL_TREE, gnu_new_type, NULL_TREE, false,
+ false, false, false, NULL, gnat_entity);
+
+ /* Initialize the aligned field if we have an initializer. */
+ if (gnu_expr)
+ add_stmt_with_node
+ (build_binary_op (MODIFY_EXPR, NULL_TREE,
+ build_component_ref
+ (gnu_new_var, NULL_TREE,
+ TYPE_FIELDS (gnu_new_type), false),
+ gnu_expr),
+ gnat_entity);
+
+ /* And setup this entity as a reference to the aligned field. */
+ gnu_type = build_reference_type (gnu_type);
+ gnu_expr
+ = build_unary_op
+ (ADDR_EXPR, gnu_type,
+ build_component_ref (gnu_new_var, NULL_TREE,
+ TYPE_FIELDS (gnu_new_type), false));
+
+ gnu_size = NULL_TREE;
+ used_by_ref = true;
+ const_flag = true;
+ }
+
+ /* If this is an aliased object with an unconstrained nominal subtype,
+ we make its type a thin reference, i.e. the reference counterpart
+ of a thin pointer, so that it points to the array part. This is
+ aimed at making it easier for the debugger to decode the object.
+ Note that we have to do that this late because of the couple of
+ allocation adjustments that might be made just above. */
+ if (Is_Constr_Subt_For_UN_Aliased (Etype (gnat_entity))
+ && (Is_Array_Type (Etype (gnat_entity))
+ || (Is_Private_Type (Etype (gnat_entity))
+ && Is_Array_Type (Full_View (Etype (gnat_entity)))))
+ && !type_annotate_only)
+ {
+ tree gnu_array
+ = gnat_to_gnu_type (Base_Type (Etype (gnat_entity)));
+
+ /* In case the object with the template has already been allocated
+ just above, we have nothing to do here. */
+ if (!TYPE_IS_THIN_POINTER_P (gnu_type))
+ {
+ tree gnu_unc_var
+ = create_var_decl (concat_name (gnu_entity_name, "UNC"),
+ NULL_TREE, gnu_type, gnu_expr,
+ const_flag, Is_Public (gnat_entity),
+ imported_p || !definition, static_p,
+ NULL, gnat_entity);
+ gnu_expr
+ = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_unc_var);
+ TREE_CONSTANT (gnu_expr) = 1;
+
+ gnu_size = NULL_TREE;
+ used_by_ref = true;
+ const_flag = true;
+ }
+
+ gnu_type
+ = build_reference_type (TYPE_OBJECT_RECORD_TYPE (gnu_array));
+ }
+
+ if (const_flag)
+ gnu_type = build_qualified_type (gnu_type, (TYPE_QUALS (gnu_type)
+ | TYPE_QUAL_CONST));
+
+ /* Convert the expression to the type of the object except in the
+ case where the object's type is unconstrained or the object's type
+ is a padded record whose field is of self-referential size. In
+ the former case, converting will generate unnecessary evaluations
+ of the CONSTRUCTOR to compute the size and in the latter case, we
+ want to only copy the actual data. Also don't convert to a record
+ type with a variant part from a record type without one, to keep
+ the object simpler. */
+ if (gnu_expr
+ && TREE_CODE (gnu_type) != UNCONSTRAINED_ARRAY_TYPE
+ && !CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_type))
+ && !(TYPE_IS_PADDING_P (gnu_type)
+ && CONTAINS_PLACEHOLDER_P
+ (TYPE_SIZE (TREE_TYPE (TYPE_FIELDS (gnu_type)))))
+ && !(TREE_CODE (gnu_type) == RECORD_TYPE
+ && TREE_CODE (TREE_TYPE (gnu_expr)) == RECORD_TYPE
+ && get_variant_part (gnu_type) != NULL_TREE
+ && get_variant_part (TREE_TYPE (gnu_expr)) == NULL_TREE))
+ gnu_expr = convert (gnu_type, gnu_expr);
+
+ /* If this name is external or there was a name specified, use it,
+ unless this is a VMS exception object since this would conflict
+ with the symbol we need to export in addition. Don't use the
+ Interface_Name if there is an address clause (see CD30005). */
+ if (!Is_VMS_Exception (gnat_entity)
+ && ((Present (Interface_Name (gnat_entity))
+ && No (Address_Clause (gnat_entity)))
+ || (Is_Public (gnat_entity)
+ && (!Is_Imported (gnat_entity)
+ || Is_Exported (gnat_entity)))))
+ gnu_ext_name = create_concat_name (gnat_entity, NULL);
+
+ /* If this is an aggregate constant initialized to a constant, force it
+ to be statically allocated. This saves an initialization copy. */
+ if (!static_p
+ && const_flag
+ && gnu_expr && TREE_CONSTANT (gnu_expr)
+ && AGGREGATE_TYPE_P (gnu_type)
+ && tree_fits_uhwi_p (TYPE_SIZE_UNIT (gnu_type))
+ && !(TYPE_IS_PADDING_P (gnu_type)
+ && !tree_fits_uhwi_p (TYPE_SIZE_UNIT
+ (TREE_TYPE (TYPE_FIELDS (gnu_type))))))
+ static_p = true;
+
+ /* Deal with a pragma Linker_Section on a constant or variable. */
+ if ((kind == E_Constant || kind == E_Variable)
+ && Present (Linker_Section_Pragma (gnat_entity)))
+ prepend_one_attribute_pragma (&attr_list,
+ Linker_Section_Pragma (gnat_entity));
+
+ /* Now create the variable or the constant and set various flags. */
+ gnu_decl
+ = create_var_decl (gnu_entity_name, gnu_ext_name, gnu_type,
+ gnu_expr, const_flag, Is_Public (gnat_entity),
+ imported_p || !definition, static_p, attr_list,
+ gnat_entity);
+ DECL_BY_REF_P (gnu_decl) = used_by_ref;
+ DECL_POINTS_TO_READONLY_P (gnu_decl) = used_by_ref && inner_const_flag;
+ DECL_CAN_NEVER_BE_NULL_P (gnu_decl) = Can_Never_Be_Null (gnat_entity);
+
+ /* If we are defining an Out parameter and optimization isn't enabled,
+ create a fake PARM_DECL for debugging purposes and make it point to
+ the VAR_DECL. Suppress debug info for the latter but make sure it
+ will live in memory so that it can be accessed from within the
+ debugger through the PARM_DECL. */
+ if (kind == E_Out_Parameter
+ && definition
+ && debug_info_p
+ && !optimize
+ && !flag_generate_lto)
+ {
+ tree param = create_param_decl (gnu_entity_name, gnu_type, false);
+ gnat_pushdecl (param, gnat_entity);
+ SET_DECL_VALUE_EXPR (param, gnu_decl);
+ DECL_HAS_VALUE_EXPR_P (param) = 1;
+ DECL_IGNORED_P (gnu_decl) = 1;
+ TREE_ADDRESSABLE (gnu_decl) = 1;
+ }
+
+ /* If this is a loop parameter, set the corresponding flag. */
+ else if (kind == E_Loop_Parameter)
+ DECL_LOOP_PARM_P (gnu_decl) = 1;
+
+ /* If this is a renaming pointer, attach the renamed object to it and
+ register it if we are at the global level. Note that an external
+ constant is at the global level. */
+ if (TREE_CODE (gnu_decl) == VAR_DECL && renamed_obj)
+ {
+ SET_DECL_RENAMED_OBJECT (gnu_decl, renamed_obj);
+ if ((!definition && kind == E_Constant) || global_bindings_p ())
+ {
+ DECL_RENAMING_GLOBAL_P (gnu_decl) = 1;
+ record_global_renaming_pointer (gnu_decl);
+ }
+ }
+
+ /* If this is a constant and we are defining it or it generates a real
+ symbol at the object level and we are referencing it, we may want
+ or need to have a true variable to represent it:
+ - if optimization isn't enabled, for debugging purposes,
+ - if the constant is public and not overlaid on something else,
+ - if its address is taken,
+ - if either itself or its type is aliased. */
+ if (TREE_CODE (gnu_decl) == CONST_DECL
+ && (definition || Sloc (gnat_entity) > Standard_Location)
+ && ((!optimize && debug_info_p)
+ || (Is_Public (gnat_entity)
+ && No (Address_Clause (gnat_entity)))
+ || Address_Taken (gnat_entity)
+ || Is_Aliased (gnat_entity)
+ || Is_Aliased (Etype (gnat_entity))))
+ {
+ tree gnu_corr_var
+ = create_true_var_decl (gnu_entity_name, gnu_ext_name, gnu_type,
+ gnu_expr, true, Is_Public (gnat_entity),
+ !definition, static_p, attr_list,
+ gnat_entity);
+
+ SET_DECL_CONST_CORRESPONDING_VAR (gnu_decl, gnu_corr_var);
+
+ /* As debugging information will be generated for the variable,
+ do not generate debugging information for the constant. */
+ if (debug_info_p)
+ DECL_IGNORED_P (gnu_decl) = 1;
+ else
+ DECL_IGNORED_P (gnu_corr_var) = 1;
+ }
+
+ /* If this is a constant, even if we don't need a true variable, we
+ may need to avoid returning the initializer in every case. That
+ can happen for the address of a (constant) constructor because,
+ upon dereferencing it, the constructor will be reinjected in the
+ tree, which may not be valid in every case; see lvalue_required_p
+ for more details. */
+ if (TREE_CODE (gnu_decl) == CONST_DECL)
+ DECL_CONST_ADDRESS_P (gnu_decl) = constructor_address_p (gnu_expr);
+
+ /* If this object is declared in a block that contains a block with an
+ exception handler, and we aren't using the GCC exception mechanism,
+ we must force this variable in memory in order to avoid an invalid
+ optimization. */
+ if (Exception_Mechanism != Back_End_Exceptions
+ && Has_Nested_Block_With_Handler (Scope (gnat_entity)))
+ TREE_ADDRESSABLE (gnu_decl) = 1;
+
+ /* If this is a local variable with non-BLKmode and aggregate type,
+ and optimization isn't enabled, then force it in memory so that
+ a register won't be allocated to it with possible subparts left
+ uninitialized and reaching the register allocator. */
+ else if (TREE_CODE (gnu_decl) == VAR_DECL
+ && !DECL_EXTERNAL (gnu_decl)
+ && !TREE_STATIC (gnu_decl)
+ && DECL_MODE (gnu_decl) != BLKmode
+ && AGGREGATE_TYPE_P (TREE_TYPE (gnu_decl))
+ && !TYPE_IS_FAT_POINTER_P (TREE_TYPE (gnu_decl))
+ && !optimize)
+ TREE_ADDRESSABLE (gnu_decl) = 1;
+
+ /* If we are defining an object with variable size or an object with
+ fixed size that will be dynamically allocated, and we are using the
+ setjmp/longjmp exception mechanism, update the setjmp buffer. */
+ if (definition
+ && Exception_Mechanism == Setjmp_Longjmp
+ && get_block_jmpbuf_decl ()
+ && DECL_SIZE_UNIT (gnu_decl)
+ && (TREE_CODE (DECL_SIZE_UNIT (gnu_decl)) != INTEGER_CST
+ || (flag_stack_check == GENERIC_STACK_CHECK
+ && compare_tree_int (DECL_SIZE_UNIT (gnu_decl),
+ STACK_CHECK_MAX_VAR_SIZE) > 0)))
+ add_stmt_with_node (build_call_n_expr
+ (update_setjmp_buf_decl, 1,
+ build_unary_op (ADDR_EXPR, NULL_TREE,
+ get_block_jmpbuf_decl ())),
+ gnat_entity);
+
+ /* Back-annotate Esize and Alignment of the object if not already
+ known. Note that we pick the values of the type, not those of
+ the object, to shield ourselves from low-level platform-dependent
+ adjustments like alignment promotion. This is both consistent with
+ all the treatment above, where alignment and size are set on the
+ type of the object and not on the object directly, and makes it
+ possible to support all confirming representation clauses. */
+ annotate_object (gnat_entity, TREE_TYPE (gnu_decl), gnu_object_size,
+ used_by_ref);
+ }
+ break;
+
+ case E_Void:
+ /* Return a TYPE_DECL for "void" that we previously made. */
+ gnu_decl = TYPE_NAME (void_type_node);
+ break;
+
+ case E_Enumeration_Type:
+ /* A special case: for the types Character and Wide_Character in
+ Standard, we do not list all the literals. So if the literals
+ are not specified, make this an unsigned integer type. */
+ if (No (First_Literal (gnat_entity)))
+ {
+ gnu_type = make_unsigned_type (esize);
+ TYPE_NAME (gnu_type) = gnu_entity_name;
+
+ /* Set TYPE_STRING_FLAG for Character and Wide_Character types.
+ This is needed by the DWARF-2 back-end to distinguish between
+ unsigned integer types and character types. */
+ TYPE_STRING_FLAG (gnu_type) = 1;
+ }
+ else
+ {
+ /* We have a list of enumeral constants in First_Literal. We make a
+ CONST_DECL for each one and build into GNU_LITERAL_LIST the list
+ to be placed into TYPE_FIELDS. Each node is itself a TREE_LIST
+ whose TREE_VALUE is the literal name and whose TREE_PURPOSE is the
+ value of the literal. But when we have a regular boolean type, we
+ simplify this a little by using a BOOLEAN_TYPE. */
+ const bool is_boolean = Is_Boolean_Type (gnat_entity)
+ && !Has_Non_Standard_Rep (gnat_entity);
+ const bool is_unsigned = Is_Unsigned_Type (gnat_entity);
+ tree gnu_list = NULL_TREE;
+ Entity_Id gnat_literal;
+
+ gnu_type = make_node (is_boolean ? BOOLEAN_TYPE : ENUMERAL_TYPE);
+ TYPE_PRECISION (gnu_type) = esize;
+ TYPE_UNSIGNED (gnu_type) = is_unsigned;
+ set_min_and_max_values_for_integral_type (gnu_type, esize,
+ is_unsigned);
+ process_attributes (&gnu_type, &attr_list, true, gnat_entity);
+ layout_type (gnu_type);
+
+ for (gnat_literal = First_Literal (gnat_entity);
+ Present (gnat_literal);
+ gnat_literal = Next_Literal (gnat_literal))
+ {
+ tree gnu_value
+ = UI_To_gnu (Enumeration_Rep (gnat_literal), gnu_type);
+ tree gnu_literal
+ = create_var_decl (get_entity_name (gnat_literal), NULL_TREE,
+ gnu_type, gnu_value, true, false, false,
+ false, NULL, gnat_literal);
+ /* Do not generate debug info for individual enumerators. */
+ DECL_IGNORED_P (gnu_literal) = 1;
+ save_gnu_tree (gnat_literal, gnu_literal, false);
+ gnu_list
+ = tree_cons (DECL_NAME (gnu_literal), gnu_value, gnu_list);
+ }
+
+ if (!is_boolean)
+ TYPE_VALUES (gnu_type) = nreverse (gnu_list);
+
+ /* Note that the bounds are updated at the end of this function
+ to avoid an infinite recursion since they refer to the type. */
+ goto discrete_type;
+ }
+ break;
+
+ case E_Signed_Integer_Type:
+ case E_Ordinary_Fixed_Point_Type:
+ case E_Decimal_Fixed_Point_Type:
+ /* For integer types, just make a signed type the appropriate number
+ of bits. */
+ gnu_type = make_signed_type (esize);
+ goto discrete_type;
+
+ case E_Modular_Integer_Type:
+ {
+ /* For modular types, make the unsigned type of the proper number
+ of bits and then set up the modulus, if required. */
+ tree gnu_modulus, gnu_high = NULL_TREE;
+
+ /* Packed array types are supposed to be subtypes only. */
+ gcc_assert (!Is_Packed_Array_Type (gnat_entity));
+
+ gnu_type = make_unsigned_type (esize);
+
+ /* Get the modulus in this type. If it overflows, assume it is because
+ it is equal to 2**Esize. Note that there is no overflow checking
+ done on unsigned type, so we detect the overflow by looking for
+ a modulus of zero, which is otherwise invalid. */
+ gnu_modulus = UI_To_gnu (Modulus (gnat_entity), gnu_type);
+
+ if (!integer_zerop (gnu_modulus))
+ {
+ TYPE_MODULAR_P (gnu_type) = 1;
+ SET_TYPE_MODULUS (gnu_type, gnu_modulus);
+ gnu_high = fold_build2 (MINUS_EXPR, gnu_type, gnu_modulus,
+ convert (gnu_type, integer_one_node));
+ }
+
+ /* If the upper bound is not maximal, make an extra subtype. */
+ if (gnu_high
+ && !tree_int_cst_equal (gnu_high, TYPE_MAX_VALUE (gnu_type)))
+ {
+ tree gnu_subtype = make_unsigned_type (esize);
+ SET_TYPE_RM_MAX_VALUE (gnu_subtype, gnu_high);
+ TREE_TYPE (gnu_subtype) = gnu_type;
+ TYPE_EXTRA_SUBTYPE_P (gnu_subtype) = 1;
+ TYPE_NAME (gnu_type) = create_concat_name (gnat_entity, "UMT");
+ gnu_type = gnu_subtype;
+ }
+ }
+ goto discrete_type;
+
+ case E_Signed_Integer_Subtype:
+ case E_Enumeration_Subtype:
+ case E_Modular_Integer_Subtype:
+ case E_Ordinary_Fixed_Point_Subtype:
+ case E_Decimal_Fixed_Point_Subtype:
+
+ /* For integral subtypes, we make a new INTEGER_TYPE. Note that we do
+ not want to call create_range_type since we would like each subtype
+ node to be distinct. ??? Historically this was in preparation for
+ when memory aliasing is implemented, but that's obsolete now given
+ the call to relate_alias_sets below.
+
+ The TREE_TYPE field of the INTEGER_TYPE points to the base type;
+ this fact is used by the arithmetic conversion functions.
+
+ We elaborate the Ancestor_Subtype if it is not in the current unit
+ and one of our bounds is non-static. We do this to ensure consistent
+ naming in the case where several subtypes share the same bounds, by
+ elaborating the first such subtype first, thus using its name. */
+
+ if (!definition
+ && Present (Ancestor_Subtype (gnat_entity))
+ && !In_Extended_Main_Code_Unit (Ancestor_Subtype (gnat_entity))
+ && (!Compile_Time_Known_Value (Type_Low_Bound (gnat_entity))
+ || !Compile_Time_Known_Value (Type_High_Bound (gnat_entity))))
+ gnat_to_gnu_entity (Ancestor_Subtype (gnat_entity), gnu_expr, 0);
+
+ /* Set the precision to the Esize except for bit-packed arrays. */
+ if (Is_Packed_Array_Type (gnat_entity)
+ && Is_Bit_Packed_Array (Original_Array_Type (gnat_entity)))
+ esize = UI_To_Int (RM_Size (gnat_entity));
+
+ /* This should be an unsigned type if the base type is unsigned or
+ if the lower bound is constant and non-negative or if the type
+ is biased. */
+ if (Is_Unsigned_Type (Etype (gnat_entity))
+ || Is_Unsigned_Type (gnat_entity)
+ || Has_Biased_Representation (gnat_entity))
+ gnu_type = make_unsigned_type (esize);
+ else
+ gnu_type = make_signed_type (esize);
+ TREE_TYPE (gnu_type) = get_unpadded_type (Etype (gnat_entity));
+
+ SET_TYPE_RM_MIN_VALUE
+ (gnu_type,
+ convert (TREE_TYPE (gnu_type),
+ elaborate_expression (Type_Low_Bound (gnat_entity),
+ gnat_entity, get_identifier ("L"),
+ definition, true,
+ Needs_Debug_Info (gnat_entity))));
+
+ SET_TYPE_RM_MAX_VALUE
+ (gnu_type,
+ convert (TREE_TYPE (gnu_type),
+ elaborate_expression (Type_High_Bound (gnat_entity),
+ gnat_entity, get_identifier ("U"),
+ definition, true,
+ Needs_Debug_Info (gnat_entity))));
+
+ TYPE_BIASED_REPRESENTATION_P (gnu_type)
+ = Has_Biased_Representation (gnat_entity);
+
+ /* Inherit our alias set from what we're a subtype of. Subtypes
+ are not different types and a pointer can designate any instance
+ within a subtype hierarchy. */
+ relate_alias_sets (gnu_type, TREE_TYPE (gnu_type), ALIAS_SET_COPY);
+
+ /* One of the above calls might have caused us to be elaborated,
+ so don't blow up if so. */
+ if (present_gnu_tree (gnat_entity))
+ {
+ maybe_present = true;
+ break;
+ }
+
+ /* Attach the TYPE_STUB_DECL in case we have a parallel type. */
+ TYPE_STUB_DECL (gnu_type)
+ = create_type_stub_decl (gnu_entity_name, gnu_type);
+
+ /* For a packed array, make the original array type a parallel type. */
+ if (debug_info_p
+ && Is_Packed_Array_Type (gnat_entity)
+ && present_gnu_tree (Original_Array_Type (gnat_entity)))
+ add_parallel_type (gnu_type,
+ gnat_to_gnu_type
+ (Original_Array_Type (gnat_entity)));
+
+ discrete_type:
+
+ /* We have to handle clauses that under-align the type specially. */
+ if ((Present (Alignment_Clause (gnat_entity))
+ || (Is_Packed_Array_Type (gnat_entity)
+ && Present
+ (Alignment_Clause (Original_Array_Type (gnat_entity)))))
+ && UI_Is_In_Int_Range (Alignment (gnat_entity)))
+ {
+ align = UI_To_Int (Alignment (gnat_entity)) * BITS_PER_UNIT;
+ if (align >= TYPE_ALIGN (gnu_type))
+ align = 0;
+ }
+
+ /* If the type we are dealing with represents a bit-packed array,
+ we need to have the bits left justified on big-endian targets
+ and right justified on little-endian targets. We also need to
+ ensure that when the value is read (e.g. for comparison of two
+ such values), we only get the good bits, since the unused bits
+ are uninitialized. Both goals are accomplished by wrapping up
+ the modular type in an enclosing record type. */
+ if (Is_Packed_Array_Type (gnat_entity)
+ && Is_Bit_Packed_Array (Original_Array_Type (gnat_entity)))
+ {
+ tree gnu_field_type, gnu_field;
+
+ /* Set the RM size before wrapping up the original type. */
+ SET_TYPE_RM_SIZE (gnu_type,
+ UI_To_gnu (RM_Size (gnat_entity), bitsizetype));
+ TYPE_PACKED_ARRAY_TYPE_P (gnu_type) = 1;
+
+ /* Create a stripped-down declaration, mainly for debugging. */
+ create_type_decl (gnu_entity_name, gnu_type, true, debug_info_p,
+ gnat_entity);
+
+ /* Now save it and build the enclosing record type. */
+ gnu_field_type = gnu_type;
+
+ gnu_type = make_node (RECORD_TYPE);
+ TYPE_NAME (gnu_type) = create_concat_name (gnat_entity, "JM");
+ TYPE_PACKED (gnu_type) = 1;
+ TYPE_SIZE (gnu_type) = TYPE_SIZE (gnu_field_type);
+ TYPE_SIZE_UNIT (gnu_type) = TYPE_SIZE_UNIT (gnu_field_type);
+ SET_TYPE_ADA_SIZE (gnu_type, TYPE_RM_SIZE (gnu_field_type));
+
+ /* Propagate the alignment of the modular type to the record type,
+ unless there is an alignment clause that under-aligns the type.
+ This means that bit-packed arrays are given "ceil" alignment for
+ their size by default, which may seem counter-intuitive but makes
+ it possible to overlay them on modular types easily. */
+ TYPE_ALIGN (gnu_type)
+ = align > 0 ? align : TYPE_ALIGN (gnu_field_type);
+
+ relate_alias_sets (gnu_type, gnu_field_type, ALIAS_SET_COPY);
+
+ /* Don't declare the field as addressable since we won't be taking
+ its address and this would prevent create_field_decl from making
+ a bitfield. */
+ gnu_field
+ = create_field_decl (get_identifier ("OBJECT"), gnu_field_type,
+ gnu_type, NULL_TREE, bitsize_zero_node, 1, 0);
+
+ /* Do not emit debug info until after the parallel type is added. */
+ finish_record_type (gnu_type, gnu_field, 2, false);
+ compute_record_mode (gnu_type);
+ TYPE_JUSTIFIED_MODULAR_P (gnu_type) = 1;
+
+ if (debug_info_p)
+ {
+ /* Make the original array type a parallel type. */
+ if (present_gnu_tree (Original_Array_Type (gnat_entity)))
+ add_parallel_type (gnu_type,
+ gnat_to_gnu_type
+ (Original_Array_Type (gnat_entity)));
+
+ rest_of_record_type_compilation (gnu_type);
+ }
+ }
+
+ /* If the type we are dealing with has got a smaller alignment than the
+ natural one, we need to wrap it up in a record type and misalign the
+ latter; we reuse the padding machinery for this purpose. Note that,
+ even if the record type is marked as packed because of misalignment,
+ we don't pack the field so as to give it the size of the type. */
+ else if (align > 0)
+ {
+ tree gnu_field_type, gnu_field;
+
+ /* Set the RM size before wrapping up the type. */
+ SET_TYPE_RM_SIZE (gnu_type,
+ UI_To_gnu (RM_Size (gnat_entity), bitsizetype));
+
+ /* Create a stripped-down declaration, mainly for debugging. */
+ create_type_decl (gnu_entity_name, gnu_type, true, debug_info_p,
+ gnat_entity);
+
+ /* Now save it and build the enclosing record type. */
+ gnu_field_type = gnu_type;
+
+ gnu_type = make_node (RECORD_TYPE);
+ TYPE_NAME (gnu_type) = create_concat_name (gnat_entity, "PAD");
+ TYPE_PACKED (gnu_type) = 1;
+ TYPE_SIZE (gnu_type) = TYPE_SIZE (gnu_field_type);
+ TYPE_SIZE_UNIT (gnu_type) = TYPE_SIZE_UNIT (gnu_field_type);
+ SET_TYPE_ADA_SIZE (gnu_type, TYPE_RM_SIZE (gnu_field_type));
+ TYPE_ALIGN (gnu_type) = align;
+ relate_alias_sets (gnu_type, gnu_field_type, ALIAS_SET_COPY);
+
+ /* Don't declare the field as addressable since we won't be taking
+ its address and this would prevent create_field_decl from making
+ a bitfield. */
+ gnu_field
+ = create_field_decl (get_identifier ("F"), gnu_field_type,
+ gnu_type, TYPE_SIZE (gnu_field_type),
+ bitsize_zero_node, 0, 0);
+
+ finish_record_type (gnu_type, gnu_field, 2, debug_info_p);
+ compute_record_mode (gnu_type);
+ TYPE_PADDING_P (gnu_type) = 1;
+ }
+
+ break;
+
+ case E_Floating_Point_Type:
+ /* If this is a VAX floating-point type, use an integer of the proper
+ size. All the operations will be handled with ASM statements. */
+ if (Vax_Float (gnat_entity))
+ {
+ gnu_type = make_signed_type (esize);
+ TYPE_VAX_FLOATING_POINT_P (gnu_type) = 1;
+ SET_TYPE_DIGITS_VALUE (gnu_type,
+ UI_To_gnu (Digits_Value (gnat_entity),
+ sizetype));
+ break;
+ }
+
+ /* The type of the Low and High bounds can be our type if this is
+ a type from Standard, so set them at the end of the function. */
+ gnu_type = make_node (REAL_TYPE);
+ TYPE_PRECISION (gnu_type) = fp_size_to_prec (esize);
+ layout_type (gnu_type);
+ break;
+
+ case E_Floating_Point_Subtype:
+ if (Vax_Float (gnat_entity))
+ {
+ gnu_type = gnat_to_gnu_type (Etype (gnat_entity));
+ break;
+ }
+
+ /* See the E_Signed_Integer_Subtype case for the rationale. */
+ if (!definition
+ && Present (Ancestor_Subtype (gnat_entity))
+ && !In_Extended_Main_Code_Unit (Ancestor_Subtype (gnat_entity))
+ && (!Compile_Time_Known_Value (Type_Low_Bound (gnat_entity))
+ || !Compile_Time_Known_Value (Type_High_Bound (gnat_entity))))
+ gnat_to_gnu_entity (Ancestor_Subtype (gnat_entity), gnu_expr, 0);
+
+ gnu_type = make_node (REAL_TYPE);
+ TREE_TYPE (gnu_type) = get_unpadded_type (Etype (gnat_entity));
+ TYPE_PRECISION (gnu_type) = fp_size_to_prec (esize);
+ TYPE_GCC_MIN_VALUE (gnu_type)
+ = TYPE_GCC_MIN_VALUE (TREE_TYPE (gnu_type));
+ TYPE_GCC_MAX_VALUE (gnu_type)
+ = TYPE_GCC_MAX_VALUE (TREE_TYPE (gnu_type));
+ layout_type (gnu_type);
+
+ SET_TYPE_RM_MIN_VALUE
+ (gnu_type,
+ convert (TREE_TYPE (gnu_type),
+ elaborate_expression (Type_Low_Bound (gnat_entity),
+ gnat_entity, get_identifier ("L"),
+ definition, true,
+ Needs_Debug_Info (gnat_entity))));
+
+ SET_TYPE_RM_MAX_VALUE
+ (gnu_type,
+ convert (TREE_TYPE (gnu_type),
+ elaborate_expression (Type_High_Bound (gnat_entity),
+ gnat_entity, get_identifier ("U"),
+ definition, true,
+ Needs_Debug_Info (gnat_entity))));
+
+ /* Inherit our alias set from what we're a subtype of, as for
+ integer subtypes. */
+ relate_alias_sets (gnu_type, TREE_TYPE (gnu_type), ALIAS_SET_COPY);
+
+ /* One of the above calls might have caused us to be elaborated,
+ so don't blow up if so. */
+ maybe_present = true;
+ break;
+
+ /* Array and String Types and Subtypes
+
+ Unconstrained array types are represented by E_Array_Type and
+ constrained array types are represented by E_Array_Subtype. There
+ are no actual objects of an unconstrained array type; all we have
+ are pointers to that type.
+
+ The following fields are defined on array types and subtypes:
+
+ Component_Type Component type of the array.
+ Number_Dimensions Number of dimensions (an int).
+ First_Index Type of first index. */
+
+ case E_String_Type:
+ case E_Array_Type:
+ {
+ const bool convention_fortran_p
+ = (Convention (gnat_entity) == Convention_Fortran);
+ const int ndim = Number_Dimensions (gnat_entity);
+ tree gnu_template_type;
+ tree gnu_ptr_template;
+ tree gnu_template_reference, gnu_template_fields, gnu_fat_type;
+ tree *gnu_index_types = XALLOCAVEC (tree, ndim);
+ tree *gnu_temp_fields = XALLOCAVEC (tree, ndim);
+ tree gnu_max_size = size_one_node, gnu_max_size_unit, tem, t;
+ Entity_Id gnat_index, gnat_name;
+ int index;
+ tree comp_type;
+
+ /* Create the type for the component now, as it simplifies breaking
+ type reference loops. */
+ comp_type
+ = gnat_to_gnu_component_type (gnat_entity, definition, debug_info_p);
+ if (present_gnu_tree (gnat_entity))
+ {
+ /* As a side effect, the type may have been translated. */
+ maybe_present = true;
+ break;
+ }
+
+ /* We complete an existing dummy fat pointer type in place. This both
+ avoids further complex adjustments in update_pointer_to and yields
+ better debugging information in DWARF by leveraging the support for
+ incomplete declarations of "tagged" types in the DWARF back-end. */
+ gnu_type = get_dummy_type (gnat_entity);
+ if (gnu_type && TYPE_POINTER_TO (gnu_type))
+ {
+ gnu_fat_type = TYPE_MAIN_VARIANT (TYPE_POINTER_TO (gnu_type));
+ TYPE_NAME (gnu_fat_type) = NULL_TREE;
+ /* Save the contents of the dummy type for update_pointer_to. */
+ TYPE_POINTER_TO (gnu_type) = copy_type (gnu_fat_type);
+ gnu_ptr_template =
+ TREE_TYPE (TREE_CHAIN (TYPE_FIELDS (gnu_fat_type)));
+ gnu_template_type = TREE_TYPE (gnu_ptr_template);
+ }
+ else
+ {
+ gnu_fat_type = make_node (RECORD_TYPE);
+ gnu_template_type = make_node (RECORD_TYPE);
+ gnu_ptr_template = build_pointer_type (gnu_template_type);
+ }
+
+ /* Make a node for the array. If we are not defining the array
+ suppress expanding incomplete types. */
+ gnu_type = make_node (UNCONSTRAINED_ARRAY_TYPE);
+
+ if (!definition)
+ {
+ defer_incomplete_level++;
+ this_deferred = true;
+ }
+
+ /* Build the fat pointer type. Use a "void *" object instead of
+ a pointer to the array type since we don't have the array type
+ yet (it will reference the fat pointer via the bounds). */
+ tem
+ = create_field_decl (get_identifier ("P_ARRAY"), ptr_void_type_node,
+ gnu_fat_type, NULL_TREE, NULL_TREE, 0, 0);
+ DECL_CHAIN (tem)
+ = create_field_decl (get_identifier ("P_BOUNDS"), gnu_ptr_template,
+ gnu_fat_type, NULL_TREE, NULL_TREE, 0, 0);
+
+ if (COMPLETE_TYPE_P (gnu_fat_type))
+ {
+ /* We are going to lay it out again so reset the alias set. */
+ alias_set_type alias_set = TYPE_ALIAS_SET (gnu_fat_type);
+ TYPE_ALIAS_SET (gnu_fat_type) = -1;
+ finish_fat_pointer_type (gnu_fat_type, tem);
+ TYPE_ALIAS_SET (gnu_fat_type) = alias_set;
+ for (t = gnu_fat_type; t; t = TYPE_NEXT_VARIANT (t))
+ {
+ TYPE_FIELDS (t) = tem;
+ SET_TYPE_UNCONSTRAINED_ARRAY (t, gnu_type);
+ }
+ }
+ else
+ {
+ finish_fat_pointer_type (gnu_fat_type, tem);
+ SET_TYPE_UNCONSTRAINED_ARRAY (gnu_fat_type, gnu_type);
+ }
+
+ /* Build a reference to the template from a PLACEHOLDER_EXPR that
+ is the fat pointer. This will be used to access the individual
+ fields once we build them. */
+ tem = build3 (COMPONENT_REF, gnu_ptr_template,
+ build0 (PLACEHOLDER_EXPR, gnu_fat_type),
+ DECL_CHAIN (TYPE_FIELDS (gnu_fat_type)), NULL_TREE);
+ gnu_template_reference
+ = build_unary_op (INDIRECT_REF, gnu_template_type, tem);
+ TREE_READONLY (gnu_template_reference) = 1;
+ TREE_THIS_NOTRAP (gnu_template_reference) = 1;
+
+ /* Now create the GCC type for each index and add the fields for that
+ index to the template. */
+ for (index = (convention_fortran_p ? ndim - 1 : 0),
+ gnat_index = First_Index (gnat_entity);
+ 0 <= index && index < ndim;
+ index += (convention_fortran_p ? - 1 : 1),
+ gnat_index = Next_Index (gnat_index))
+ {
+ char field_name[16];
+ tree gnu_index_base_type
+ = get_unpadded_type (Base_Type (Etype (gnat_index)));
+ tree gnu_lb_field, gnu_hb_field, gnu_orig_min, gnu_orig_max;
+ tree gnu_min, gnu_max, gnu_high;
+
+ /* Make the FIELD_DECLs for the low and high bounds of this
+ type and then make extractions of these fields from the
+ template. */
+ sprintf (field_name, "LB%d", index);
+ gnu_lb_field = create_field_decl (get_identifier (field_name),
+ gnu_index_base_type,
+ gnu_template_type, NULL_TREE,
+ NULL_TREE, 0, 0);
+ Sloc_to_locus (Sloc (gnat_entity),
+ &DECL_SOURCE_LOCATION (gnu_lb_field));
+
+ field_name[0] = 'U';
+ gnu_hb_field = create_field_decl (get_identifier (field_name),
+ gnu_index_base_type,
+ gnu_template_type, NULL_TREE,
+ NULL_TREE, 0, 0);
+ Sloc_to_locus (Sloc (gnat_entity),
+ &DECL_SOURCE_LOCATION (gnu_hb_field));
+
+ gnu_temp_fields[index] = chainon (gnu_lb_field, gnu_hb_field);
+
+ /* We can't use build_component_ref here since the template type
+ isn't complete yet. */
+ gnu_orig_min = build3 (COMPONENT_REF, gnu_index_base_type,
+ gnu_template_reference, gnu_lb_field,
+ NULL_TREE);
+ gnu_orig_max = build3 (COMPONENT_REF, gnu_index_base_type,
+ gnu_template_reference, gnu_hb_field,
+ NULL_TREE);
+ TREE_READONLY (gnu_orig_min) = TREE_READONLY (gnu_orig_max) = 1;
+
+ gnu_min = convert (sizetype, gnu_orig_min);
+ gnu_max = convert (sizetype, gnu_orig_max);
+
+ /* Compute the size of this dimension. See the E_Array_Subtype
+ case below for the rationale. */
+ gnu_high
+ = build3 (COND_EXPR, sizetype,
+ build2 (GE_EXPR, boolean_type_node,
+ gnu_orig_max, gnu_orig_min),
+ gnu_max,
+ size_binop (MINUS_EXPR, gnu_min, size_one_node));
+
+ /* Make a range type with the new range in the Ada base type.
+ Then make an index type with the size range in sizetype. */
+ gnu_index_types[index]
+ = create_index_type (gnu_min, gnu_high,
+ create_range_type (gnu_index_base_type,
+ gnu_orig_min,
+ gnu_orig_max),
+ gnat_entity);
+
+ /* Update the maximum size of the array in elements. */
+ if (gnu_max_size)
+ {
+ tree gnu_index_type = get_unpadded_type (Etype (gnat_index));
+ tree gnu_min
+ = convert (sizetype, TYPE_MIN_VALUE (gnu_index_type));
+ tree gnu_max
+ = convert (sizetype, TYPE_MAX_VALUE (gnu_index_type));
+ tree gnu_this_max
+ = size_binop (MAX_EXPR,
+ size_binop (PLUS_EXPR, size_one_node,
+ size_binop (MINUS_EXPR,
+ gnu_max, gnu_min)),
+ size_zero_node);
+
+ if (TREE_CODE (gnu_this_max) == INTEGER_CST
+ && TREE_OVERFLOW (gnu_this_max))
+ gnu_max_size = NULL_TREE;
+ else
+ gnu_max_size
+ = size_binop (MULT_EXPR, gnu_max_size, gnu_this_max);
+ }
+
+ TYPE_NAME (gnu_index_types[index])
+ = create_concat_name (gnat_entity, field_name);
+ }
+
+ /* Install all the fields into the template. */
+ TYPE_NAME (gnu_template_type)
+ = create_concat_name (gnat_entity, "XUB");
+ gnu_template_fields = NULL_TREE;
+ for (index = 0; index < ndim; index++)
+ gnu_template_fields
+ = chainon (gnu_template_fields, gnu_temp_fields[index]);
+ finish_record_type (gnu_template_type, gnu_template_fields, 0,
+ debug_info_p);
+ TYPE_READONLY (gnu_template_type) = 1;
+
+ /* If Component_Size is not already specified, annotate it with the
+ size of the component. */
+ if (Unknown_Component_Size (gnat_entity))
+ Set_Component_Size (gnat_entity,
+ annotate_value (TYPE_SIZE (comp_type)));
+
+ /* Compute the maximum size of the array in units and bits. */
+ if (gnu_max_size)
+ {
+ gnu_max_size_unit = size_binop (MULT_EXPR, gnu_max_size,
+ TYPE_SIZE_UNIT (comp_type));
+ gnu_max_size = size_binop (MULT_EXPR,
+ convert (bitsizetype, gnu_max_size),
+ TYPE_SIZE (comp_type));
+ }
+ else
+ gnu_max_size_unit = NULL_TREE;
+
+ /* Now build the array type. */
+ tem = comp_type;
+ for (index = ndim - 1; index >= 0; index--)
+ {
+ tem = build_nonshared_array_type (tem, gnu_index_types[index]);
+ if (Reverse_Storage_Order (gnat_entity))
+ sorry ("non-default Scalar_Storage_Order");
+ TYPE_MULTI_ARRAY_P (tem) = (index > 0);
+ if (array_type_has_nonaliased_component (tem, gnat_entity))
+ TYPE_NONALIASED_COMPONENT (tem) = 1;
+
+ /* If it is passed by reference, force BLKmode to ensure that
+ objects of this type will always be put in memory. */
+ if (TYPE_MODE (tem) != BLKmode
+ && Is_By_Reference_Type (gnat_entity))
+ SET_TYPE_MODE (tem, BLKmode);
+ }
+
+ /* If an alignment is specified, use it if valid. But ignore it
+ for the original type of packed array types. If the alignment
+ was requested with an explicit alignment clause, state so. */
+ if (No (Packed_Array_Type (gnat_entity))
+ && Known_Alignment (gnat_entity))
+ {
+ TYPE_ALIGN (tem)
+ = validate_alignment (Alignment (gnat_entity), gnat_entity,
+ TYPE_ALIGN (tem));
+ if (Present (Alignment_Clause (gnat_entity)))
+ TYPE_USER_ALIGN (tem) = 1;
+ }
+
+ TYPE_CONVENTION_FORTRAN_P (tem) = convention_fortran_p;
+
+ /* Adjust the type of the pointer-to-array field of the fat pointer
+ and record the aliasing relationships if necessary. */
+ TREE_TYPE (TYPE_FIELDS (gnu_fat_type)) = build_pointer_type (tem);
+ if (TYPE_ALIAS_SET_KNOWN_P (gnu_fat_type))
+ record_component_aliases (gnu_fat_type);
+
+ /* The result type is an UNCONSTRAINED_ARRAY_TYPE that indicates the
+ corresponding fat pointer. */
+ TREE_TYPE (gnu_type) = gnu_fat_type;
+ TYPE_POINTER_TO (gnu_type) = gnu_fat_type;
+ TYPE_REFERENCE_TO (gnu_type) = gnu_fat_type;
+ SET_TYPE_MODE (gnu_type, BLKmode);
+ TYPE_ALIGN (gnu_type) = TYPE_ALIGN (tem);
+
+ /* If the maximum size doesn't overflow, use it. */
+ if (gnu_max_size
+ && TREE_CODE (gnu_max_size) == INTEGER_CST
+ && !TREE_OVERFLOW (gnu_max_size)
+ && TREE_CODE (gnu_max_size_unit) == INTEGER_CST
+ && !TREE_OVERFLOW (gnu_max_size_unit))
+ {
+ TYPE_SIZE (tem) = size_binop (MIN_EXPR, gnu_max_size,
+ TYPE_SIZE (tem));
+ TYPE_SIZE_UNIT (tem) = size_binop (MIN_EXPR, gnu_max_size_unit,
+ TYPE_SIZE_UNIT (tem));
+ }
+
+ create_type_decl (create_concat_name (gnat_entity, "XUA"), tem,
+ !Comes_From_Source (gnat_entity), debug_info_p,
+ gnat_entity);
+
+ /* Give the fat pointer type a name. If this is a packed type, tell
+ the debugger how to interpret the underlying bits. */
+ if (Present (Packed_Array_Type (gnat_entity)))
+ gnat_name = Packed_Array_Type (gnat_entity);
+ else
+ gnat_name = gnat_entity;
+ create_type_decl (create_concat_name (gnat_name, "XUP"), gnu_fat_type,
+ !Comes_From_Source (gnat_entity), debug_info_p,
+ gnat_entity);
+
+ /* Create the type to be designated by thin pointers: a record type for
+ the array and its template. We used to shift the fields to have the
+ template at a negative offset, but this was somewhat of a kludge; we
+ now shift thin pointer values explicitly but only those which have a
+ TYPE_UNCONSTRAINED_ARRAY attached to the designated RECORD_TYPE. */
+ tem = build_unc_object_type (gnu_template_type, tem,
+ create_concat_name (gnat_name, "XUT"),
+ debug_info_p);
+
+ SET_TYPE_UNCONSTRAINED_ARRAY (tem, gnu_type);
+ TYPE_OBJECT_RECORD_TYPE (gnu_type) = tem;
+ }
+ break;
+
+ case E_String_Subtype:
+ case E_Array_Subtype:
+
+ /* This is the actual data type for array variables. Multidimensional
+ arrays are implemented as arrays of arrays. Note that arrays which
+ have sparse enumeration subtypes as index components create sparse
+ arrays, which is obviously space inefficient but so much easier to
+ code for now.
+
+ Also note that the subtype never refers to the unconstrained array
+ type, which is somewhat at variance with Ada semantics.
+
+ First check to see if this is simply a renaming of the array type.
+ If so, the result is the array type. */
+
+ gnu_type = gnat_to_gnu_type (Etype (gnat_entity));
+ if (!Is_Constrained (gnat_entity))
+ ;
+ else
+ {
+ Entity_Id gnat_index, gnat_base_index;
+ const bool convention_fortran_p
+ = (Convention (gnat_entity) == Convention_Fortran);
+ const int ndim = Number_Dimensions (gnat_entity);
+ tree gnu_base_type = gnu_type;
+ tree *gnu_index_types = XALLOCAVEC (tree, ndim);
+ tree gnu_max_size = size_one_node, gnu_max_size_unit;
+ bool need_index_type_struct = false;
+ int index;
+
+ /* First create the GCC type for each index and find out whether
+ special types are needed for debugging information. */
+ for (index = (convention_fortran_p ? ndim - 1 : 0),
+ gnat_index = First_Index (gnat_entity),
+ gnat_base_index
+ = First_Index (Implementation_Base_Type (gnat_entity));
+ 0 <= index && index < ndim;
+ index += (convention_fortran_p ? - 1 : 1),
+ gnat_index = Next_Index (gnat_index),
+ gnat_base_index = Next_Index (gnat_base_index))
+ {
+ tree gnu_index_type = get_unpadded_type (Etype (gnat_index));
+ tree gnu_orig_min = TYPE_MIN_VALUE (gnu_index_type);
+ tree gnu_orig_max = TYPE_MAX_VALUE (gnu_index_type);
+ tree gnu_min = convert (sizetype, gnu_orig_min);
+ tree gnu_max = convert (sizetype, gnu_orig_max);
+ tree gnu_base_index_type
+ = get_unpadded_type (Etype (gnat_base_index));
+ tree gnu_base_orig_min = TYPE_MIN_VALUE (gnu_base_index_type);
+ tree gnu_base_orig_max = TYPE_MAX_VALUE (gnu_base_index_type);
+ tree gnu_high;
+
+ /* See if the base array type is already flat. If it is, we
+ are probably compiling an ACATS test but it will cause the
+ code below to malfunction if we don't handle it specially. */
+ if (TREE_CODE (gnu_base_orig_min) == INTEGER_CST
+ && TREE_CODE (gnu_base_orig_max) == INTEGER_CST
+ && tree_int_cst_lt (gnu_base_orig_max, gnu_base_orig_min))
+ {
+ gnu_min = size_one_node;
+ gnu_max = size_zero_node;
+ gnu_high = gnu_max;
+ }
+
+ /* Similarly, if one of the values overflows in sizetype and the
+ range is null, use 1..0 for the sizetype bounds. */
+ else if (TREE_CODE (gnu_min) == INTEGER_CST
+ && TREE_CODE (gnu_max) == INTEGER_CST
+ && (TREE_OVERFLOW (gnu_min) || TREE_OVERFLOW (gnu_max))
+ && tree_int_cst_lt (gnu_orig_max, gnu_orig_min))
+ {
+ gnu_min = size_one_node;
+ gnu_max = size_zero_node;
+ gnu_high = gnu_max;
+ }
+
+ /* If the minimum and maximum values both overflow in sizetype,
+ but the difference in the original type does not overflow in
+ sizetype, ignore the overflow indication. */
+ else if (TREE_CODE (gnu_min) == INTEGER_CST
+ && TREE_CODE (gnu_max) == INTEGER_CST
+ && TREE_OVERFLOW (gnu_min) && TREE_OVERFLOW (gnu_max)
+ && !TREE_OVERFLOW
+ (convert (sizetype,
+ fold_build2 (MINUS_EXPR, gnu_index_type,
+ gnu_orig_max,
+ gnu_orig_min))))
+ {
+ TREE_OVERFLOW (gnu_min) = 0;
+ TREE_OVERFLOW (gnu_max) = 0;
+ gnu_high = gnu_max;
+ }
+
+ /* Compute the size of this dimension in the general case. We
+ need to provide GCC with an upper bound to use but have to
+ deal with the "superflat" case. There are three ways to do
+ this. If we can prove that the array can never be superflat,
+ we can just use the high bound of the index type. */
+ else if ((Nkind (gnat_index) == N_Range
+ && cannot_be_superflat_p (gnat_index))
+ /* Packed Array Types are never superflat. */
+ || Is_Packed_Array_Type (gnat_entity))
+ gnu_high = gnu_max;
+
+ /* Otherwise, if the high bound is constant but the low bound is
+ not, we use the expression (hb >= lb) ? lb : hb + 1 for the
+ lower bound. Note that the comparison must be done in the
+ original type to avoid any overflow during the conversion. */
+ else if (TREE_CODE (gnu_max) == INTEGER_CST
+ && TREE_CODE (gnu_min) != INTEGER_CST)
+ {
+ gnu_high = gnu_max;
+ gnu_min
+ = build_cond_expr (sizetype,
+ build_binary_op (GE_EXPR,
+ boolean_type_node,
+ gnu_orig_max,
+ gnu_orig_min),
+ gnu_min,
+ int_const_binop (PLUS_EXPR, gnu_max,
+ size_one_node));
+ }
+
+ /* Finally we use (hb >= lb) ? hb : lb - 1 for the upper bound
+ in all the other cases. Note that, here as well as above,
+ the condition used in the comparison must be equivalent to
+ the condition (length != 0). This is relied upon in order
+ to optimize array comparisons in compare_arrays. Moreover
+ we use int_const_binop for the shift by 1 if the bound is
+ constant to avoid any unwanted overflow. */
+ else
+ gnu_high
+ = build_cond_expr (sizetype,
+ build_binary_op (GE_EXPR,
+ boolean_type_node,
+ gnu_orig_max,
+ gnu_orig_min),
+ gnu_max,
+ TREE_CODE (gnu_min) == INTEGER_CST
+ ? int_const_binop (MINUS_EXPR, gnu_min,
+ size_one_node)
+ : size_binop (MINUS_EXPR, gnu_min,
+ size_one_node));
+
+ /* Reuse the index type for the range type. Then make an index
+ type with the size range in sizetype. */
+ gnu_index_types[index]
+ = create_index_type (gnu_min, gnu_high, gnu_index_type,
+ gnat_entity);
+
+ /* Update the maximum size of the array in elements. Here we
+ see if any constraint on the index type of the base type
+ can be used in the case of self-referential bound on the
+ index type of the subtype. We look for a non-"infinite"
+ and non-self-referential bound from any type involved and
+ handle each bound separately. */
+ if (gnu_max_size)
+ {
+ tree gnu_base_min = convert (sizetype, gnu_base_orig_min);
+ tree gnu_base_max = convert (sizetype, gnu_base_orig_max);
+ tree gnu_base_index_base_type
+ = get_base_type (gnu_base_index_type);
+ tree gnu_base_base_min
+ = convert (sizetype,
+ TYPE_MIN_VALUE (gnu_base_index_base_type));
+ tree gnu_base_base_max
+ = convert (sizetype,
+ TYPE_MAX_VALUE (gnu_base_index_base_type));
+
+ if (!CONTAINS_PLACEHOLDER_P (gnu_min)
+ || !(TREE_CODE (gnu_base_min) == INTEGER_CST
+ && !TREE_OVERFLOW (gnu_base_min)))
+ gnu_base_min = gnu_min;
+
+ if (!CONTAINS_PLACEHOLDER_P (gnu_max)
+ || !(TREE_CODE (gnu_base_max) == INTEGER_CST
+ && !TREE_OVERFLOW (gnu_base_max)))
+ gnu_base_max = gnu_max;
+
+ if ((TREE_CODE (gnu_base_min) == INTEGER_CST
+ && TREE_OVERFLOW (gnu_base_min))
+ || operand_equal_p (gnu_base_min, gnu_base_base_min, 0)
+ || (TREE_CODE (gnu_base_max) == INTEGER_CST
+ && TREE_OVERFLOW (gnu_base_max))
+ || operand_equal_p (gnu_base_max, gnu_base_base_max, 0))
+ gnu_max_size = NULL_TREE;
+ else
+ {
+ tree gnu_this_max
+ = size_binop (MAX_EXPR,
+ size_binop (PLUS_EXPR, size_one_node,
+ size_binop (MINUS_EXPR,
+ gnu_base_max,
+ gnu_base_min)),
+ size_zero_node);
+
+ if (TREE_CODE (gnu_this_max) == INTEGER_CST
+ && TREE_OVERFLOW (gnu_this_max))
+ gnu_max_size = NULL_TREE;
+ else
+ gnu_max_size
+ = size_binop (MULT_EXPR, gnu_max_size, gnu_this_max);
+ }
+ }
+
+ /* We need special types for debugging information to point to
+ the index types if they have variable bounds, are not integer
+ types, are biased or are wider than sizetype. */
+ if (!integer_onep (gnu_orig_min)
+ || TREE_CODE (gnu_orig_max) != INTEGER_CST
+ || TREE_CODE (gnu_index_type) != INTEGER_TYPE
+ || (TREE_TYPE (gnu_index_type)
+ && TREE_CODE (TREE_TYPE (gnu_index_type))
+ != INTEGER_TYPE)
+ || TYPE_BIASED_REPRESENTATION_P (gnu_index_type)
+ || compare_tree_int (rm_size (gnu_index_type),
+ TYPE_PRECISION (sizetype)) > 0)
+ need_index_type_struct = true;
+ }
+
+ /* Then flatten: create the array of arrays. For an array type
+ used to implement a packed array, get the component type from
+ the original array type since the representation clauses that
+ can affect it are on the latter. */
+ if (Is_Packed_Array_Type (gnat_entity)
+ && !Is_Bit_Packed_Array (Original_Array_Type (gnat_entity)))
+ {
+ gnu_type = gnat_to_gnu_type (Original_Array_Type (gnat_entity));
+ for (index = ndim - 1; index >= 0; index--)
+ gnu_type = TREE_TYPE (gnu_type);
+
+ /* One of the above calls might have caused us to be elaborated,
+ so don't blow up if so. */
+ if (present_gnu_tree (gnat_entity))
+ {
+ maybe_present = true;
+ break;
+ }
+ }
+ else
+ {
+ gnu_type = gnat_to_gnu_component_type (gnat_entity, definition,
+ debug_info_p);
+
+ /* One of the above calls might have caused us to be elaborated,
+ so don't blow up if so. */
+ if (present_gnu_tree (gnat_entity))
+ {
+ maybe_present = true;
+ break;
+ }
+ }
+
+ /* Compute the maximum size of the array in units and bits. */
+ if (gnu_max_size)
+ {
+ gnu_max_size_unit = size_binop (MULT_EXPR, gnu_max_size,
+ TYPE_SIZE_UNIT (gnu_type));
+ gnu_max_size = size_binop (MULT_EXPR,
+ convert (bitsizetype, gnu_max_size),
+ TYPE_SIZE (gnu_type));
+ }
+ else
+ gnu_max_size_unit = NULL_TREE;
+
+ /* Now build the array type. */
+ for (index = ndim - 1; index >= 0; index --)
+ {
+ gnu_type = build_nonshared_array_type (gnu_type,
+ gnu_index_types[index]);
+ TYPE_MULTI_ARRAY_P (gnu_type) = (index > 0);
+ if (array_type_has_nonaliased_component (gnu_type, gnat_entity))
+ TYPE_NONALIASED_COMPONENT (gnu_type) = 1;
+
+ /* See the E_Array_Type case for the rationale. */
+ if (TYPE_MODE (gnu_type) != BLKmode
+ && Is_By_Reference_Type (gnat_entity))
+ SET_TYPE_MODE (gnu_type, BLKmode);
+ }
+
+ /* Attach the TYPE_STUB_DECL in case we have a parallel type. */
+ TYPE_STUB_DECL (gnu_type)
+ = create_type_stub_decl (gnu_entity_name, gnu_type);
+
+ /* If we are at file level and this is a multi-dimensional array,
+ we need to make a variable corresponding to the stride of the
+ inner dimensions. */
+ if (global_bindings_p () && ndim > 1)
+ {
+ tree gnu_st_name = get_identifier ("ST");
+ tree gnu_arr_type;
+
+ for (gnu_arr_type = TREE_TYPE (gnu_type);
+ TREE_CODE (gnu_arr_type) == ARRAY_TYPE;
+ gnu_arr_type = TREE_TYPE (gnu_arr_type),
+ gnu_st_name = concat_name (gnu_st_name, "ST"))
+ {
+ tree eltype = TREE_TYPE (gnu_arr_type);
+
+ TYPE_SIZE (gnu_arr_type)
+ = elaborate_expression_1 (TYPE_SIZE (gnu_arr_type),
+ gnat_entity, gnu_st_name,
+ definition, false);
+
+ /* ??? For now, store the size as a multiple of the
+ alignment of the element type in bytes so that we
+ can see the alignment from the tree. */
+ TYPE_SIZE_UNIT (gnu_arr_type)
+ = elaborate_expression_2 (TYPE_SIZE_UNIT (gnu_arr_type),
+ gnat_entity,
+ concat_name (gnu_st_name, "A_U"),
+ definition, false,
+ TYPE_ALIGN (eltype));
+
+ /* ??? create_type_decl is not invoked on the inner types so
+ the MULT_EXPR node built above will never be marked. */
+ MARK_VISITED (TYPE_SIZE_UNIT (gnu_arr_type));
+ }
+ }
+
+ /* If we need to write out a record type giving the names of the
+ bounds for debugging purposes, do it now and make the record
+ type a parallel type. This is not needed for a packed array
+ since the bounds are conveyed by the original array type. */
+ if (need_index_type_struct
+ && debug_info_p
+ && !Is_Packed_Array_Type (gnat_entity))
+ {
+ tree gnu_bound_rec = make_node (RECORD_TYPE);
+ tree gnu_field_list = NULL_TREE;
+ tree gnu_field;
+
+ TYPE_NAME (gnu_bound_rec)
+ = create_concat_name (gnat_entity, "XA");
+
+ for (index = ndim - 1; index >= 0; index--)
+ {
+ tree gnu_index = TYPE_INDEX_TYPE (gnu_index_types[index]);
+ tree gnu_index_name = TYPE_NAME (gnu_index);
+
+ if (TREE_CODE (gnu_index_name) == TYPE_DECL)
+ gnu_index_name = DECL_NAME (gnu_index_name);
+
+ /* Make sure to reference the types themselves, and not just
+ their names, as the debugger may fall back on them. */
+ gnu_field = create_field_decl (gnu_index_name, gnu_index,
+ gnu_bound_rec, NULL_TREE,
+ NULL_TREE, 0, 0);
+ DECL_CHAIN (gnu_field) = gnu_field_list;
+ gnu_field_list = gnu_field;
+ }
+
+ finish_record_type (gnu_bound_rec, gnu_field_list, 0, true);
+ add_parallel_type (gnu_type, gnu_bound_rec);
+ }
+
+ /* If this is a packed array type, make the original array type a
+ parallel type. Otherwise, do it for the base array type if it
+ isn't artificial to make sure it is kept in the debug info. */
+ if (debug_info_p)
+ {
+ if (Is_Packed_Array_Type (gnat_entity)
+ && present_gnu_tree (Original_Array_Type (gnat_entity)))
+ add_parallel_type (gnu_type,
+ gnat_to_gnu_type
+ (Original_Array_Type (gnat_entity)));
+ else
+ {
+ tree gnu_base_decl
+ = gnat_to_gnu_entity (Etype (gnat_entity), NULL_TREE, 0);
+ if (!DECL_ARTIFICIAL (gnu_base_decl))
+ add_parallel_type (gnu_type,
+ TREE_TYPE (TREE_TYPE (gnu_base_decl)));
+ }
+ }
+
+ TYPE_CONVENTION_FORTRAN_P (gnu_type) = convention_fortran_p;
+ TYPE_PACKED_ARRAY_TYPE_P (gnu_type)
+ = (Is_Packed_Array_Type (gnat_entity)
+ && Is_Bit_Packed_Array (Original_Array_Type (gnat_entity)));
+
+ /* If the size is self-referential and the maximum size doesn't
+ overflow, use it. */
+ if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_type))
+ && gnu_max_size
+ && !(TREE_CODE (gnu_max_size) == INTEGER_CST
+ && TREE_OVERFLOW (gnu_max_size))
+ && !(TREE_CODE (gnu_max_size_unit) == INTEGER_CST
+ && TREE_OVERFLOW (gnu_max_size_unit)))
+ {
+ TYPE_SIZE (gnu_type) = size_binop (MIN_EXPR, gnu_max_size,
+ TYPE_SIZE (gnu_type));
+ TYPE_SIZE_UNIT (gnu_type)
+ = size_binop (MIN_EXPR, gnu_max_size_unit,
+ TYPE_SIZE_UNIT (gnu_type));
+ }
+
+ /* Set our alias set to that of our base type. This gives all
+ array subtypes the same alias set. */
+ relate_alias_sets (gnu_type, gnu_base_type, ALIAS_SET_COPY);
+
+ /* If this is a packed type, make this type the same as the packed
+ array type, but do some adjusting in the type first. */
+ if (Present (Packed_Array_Type (gnat_entity)))
+ {
+ Entity_Id gnat_index;
+ tree gnu_inner;
+
+ /* First finish the type we had been making so that we output
+ debugging information for it. */
+ process_attributes (&gnu_type, &attr_list, false, gnat_entity);
+ if (Treat_As_Volatile (gnat_entity))
+ gnu_type
+ = build_qualified_type (gnu_type,
+ TYPE_QUALS (gnu_type)
+ | TYPE_QUAL_VOLATILE);
+ /* Make it artificial only if the base type was artificial too.
+ That's sort of "morally" true and will make it possible for
+ the debugger to look it up by name in DWARF, which is needed
+ in order to decode the packed array type. */
+ gnu_decl
+ = create_type_decl (gnu_entity_name, gnu_type,
+ !Comes_From_Source (Etype (gnat_entity))
+ && !Comes_From_Source (gnat_entity),
+ debug_info_p, gnat_entity);
+
+ /* Save it as our equivalent in case the call below elaborates
+ this type again. */
+ save_gnu_tree (gnat_entity, gnu_decl, false);
+
+ gnu_decl = gnat_to_gnu_entity (Packed_Array_Type (gnat_entity),
+ NULL_TREE, 0);
+ this_made_decl = true;
+ gnu_type = TREE_TYPE (gnu_decl);
+ save_gnu_tree (gnat_entity, NULL_TREE, false);
+
+ gnu_inner = gnu_type;
+ while (TREE_CODE (gnu_inner) == RECORD_TYPE
+ && (TYPE_JUSTIFIED_MODULAR_P (gnu_inner)
+ || TYPE_PADDING_P (gnu_inner)))
+ gnu_inner = TREE_TYPE (TYPE_FIELDS (gnu_inner));
+
+ /* We need to attach the index type to the type we just made so
+ that the actual bounds can later be put into a template. */
+ if ((TREE_CODE (gnu_inner) == ARRAY_TYPE
+ && !TYPE_ACTUAL_BOUNDS (gnu_inner))
+ || (TREE_CODE (gnu_inner) == INTEGER_TYPE
+ && !TYPE_HAS_ACTUAL_BOUNDS_P (gnu_inner)))
+ {
+ if (TREE_CODE (gnu_inner) == INTEGER_TYPE)
+ {
+ /* The TYPE_ACTUAL_BOUNDS field is overloaded with the
+ TYPE_MODULUS for modular types so we make an extra
+ subtype if necessary. */
+ if (TYPE_MODULAR_P (gnu_inner))
+ {
+ tree gnu_subtype
+ = make_unsigned_type (TYPE_PRECISION (gnu_inner));
+ TREE_TYPE (gnu_subtype) = gnu_inner;
+ TYPE_EXTRA_SUBTYPE_P (gnu_subtype) = 1;
+ SET_TYPE_RM_MIN_VALUE (gnu_subtype,
+ TYPE_MIN_VALUE (gnu_inner));
+ SET_TYPE_RM_MAX_VALUE (gnu_subtype,
+ TYPE_MAX_VALUE (gnu_inner));
+ gnu_inner = gnu_subtype;
+ }
+
+ TYPE_HAS_ACTUAL_BOUNDS_P (gnu_inner) = 1;
+
+#ifdef ENABLE_CHECKING
+ /* Check for other cases of overloading. */
+ gcc_assert (!TYPE_ACTUAL_BOUNDS (gnu_inner));
+#endif
+ }
+
+ for (gnat_index = First_Index (gnat_entity);
+ Present (gnat_index);
+ gnat_index = Next_Index (gnat_index))
+ SET_TYPE_ACTUAL_BOUNDS
+ (gnu_inner,
+ tree_cons (NULL_TREE,
+ get_unpadded_type (Etype (gnat_index)),
+ TYPE_ACTUAL_BOUNDS (gnu_inner)));
+
+ if (Convention (gnat_entity) != Convention_Fortran)
+ SET_TYPE_ACTUAL_BOUNDS
+ (gnu_inner, nreverse (TYPE_ACTUAL_BOUNDS (gnu_inner)));
+
+ if (TREE_CODE (gnu_type) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (gnu_type))
+ TREE_TYPE (TYPE_FIELDS (gnu_type)) = gnu_inner;
+ }
+ }
+
+ else
+ /* Abort if packed array with no Packed_Array_Type field set. */
+ gcc_assert (!Is_Packed (gnat_entity));
+ }
+ break;
+
+ case E_String_Literal_Subtype:
+ /* Create the type for a string literal. */
+ {
+ Entity_Id gnat_full_type
+ = (IN (Ekind (Etype (gnat_entity)), Private_Kind)
+ && Present (Full_View (Etype (gnat_entity)))
+ ? Full_View (Etype (gnat_entity)) : Etype (gnat_entity));
+ tree gnu_string_type = get_unpadded_type (gnat_full_type);
+ tree gnu_string_array_type
+ = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_string_type))));
+ tree gnu_string_index_type
+ = get_base_type (TREE_TYPE (TYPE_INDEX_TYPE
+ (TYPE_DOMAIN (gnu_string_array_type))));
+ tree gnu_lower_bound
+ = convert (gnu_string_index_type,
+ gnat_to_gnu (String_Literal_Low_Bound (gnat_entity)));
+ tree gnu_length
+ = UI_To_gnu (String_Literal_Length (gnat_entity),
+ gnu_string_index_type);
+ tree gnu_upper_bound
+ = build_binary_op (PLUS_EXPR, gnu_string_index_type,
+ gnu_lower_bound,
+ int_const_binop (MINUS_EXPR, gnu_length,
+ integer_one_node));
+ tree gnu_index_type
+ = create_index_type (convert (sizetype, gnu_lower_bound),
+ convert (sizetype, gnu_upper_bound),
+ create_range_type (gnu_string_index_type,
+ gnu_lower_bound,
+ gnu_upper_bound),
+ gnat_entity);
+
+ gnu_type
+ = build_nonshared_array_type (gnat_to_gnu_type
+ (Component_Type (gnat_entity)),
+ gnu_index_type);
+ if (array_type_has_nonaliased_component (gnu_type, gnat_entity))
+ TYPE_NONALIASED_COMPONENT (gnu_type) = 1;
+ relate_alias_sets (gnu_type, gnu_string_type, ALIAS_SET_COPY);
+ }
+ break;
+
+ /* Record Types and Subtypes
+
+ The following fields are defined on record types:
+
+ Has_Discriminants True if the record has discriminants
+ First_Discriminant Points to head of list of discriminants
+ First_Entity Points to head of list of fields
+ Is_Tagged_Type True if the record is tagged
+
+ Implementation of Ada records and discriminated records:
+
+ A record type definition is transformed into the equivalent of a C
+ struct definition. The fields that are the discriminants which are
+ found in the Full_Type_Declaration node and the elements of the
+ Component_List found in the Record_Type_Definition node. The
+ Component_List can be a recursive structure since each Variant of
+ the Variant_Part of the Component_List has a Component_List.
+
+ Processing of a record type definition comprises starting the list of
+ field declarations here from the discriminants and the calling the
+ function components_to_record to add the rest of the fields from the
+ component list and return the gnu type node. The function
+ components_to_record will call itself recursively as it traverses
+ the tree. */
+
+ case E_Record_Type:
+ if (Has_Complex_Representation (gnat_entity))
+ {
+ gnu_type
+ = build_complex_type
+ (get_unpadded_type
+ (Etype (Defining_Entity
+ (First (Component_Items
+ (Component_List
+ (Type_Definition
+ (Declaration_Node (gnat_entity)))))))));
+
+ break;
+ }
+
+ {
+ Node_Id full_definition = Declaration_Node (gnat_entity);
+ Node_Id record_definition = Type_Definition (full_definition);
+ Node_Id gnat_constr;
+ Entity_Id gnat_field;
+ tree gnu_field, gnu_field_list = NULL_TREE;
+ tree gnu_get_parent;
+ /* Set PACKED in keeping with gnat_to_gnu_field. */
+ const int packed
+ = Is_Packed (gnat_entity)
+ ? 1
+ : Component_Alignment (gnat_entity) == Calign_Storage_Unit
+ ? -1
+ : (Known_Alignment (gnat_entity)
+ || (Strict_Alignment (gnat_entity)
+ && Known_RM_Size (gnat_entity)))
+ ? -2
+ : 0;
+ const bool has_discr = Has_Discriminants (gnat_entity);
+ const bool has_rep = Has_Specified_Layout (gnat_entity);
+ const bool is_extension
+ = (Is_Tagged_Type (gnat_entity)
+ && Nkind (record_definition) == N_Derived_Type_Definition);
+ const bool is_unchecked_union = Is_Unchecked_Union (gnat_entity);
+ bool all_rep = has_rep;
+
+ /* See if all fields have a rep clause. Stop when we find one
+ that doesn't. */
+ if (all_rep)
+ for (gnat_field = First_Entity (gnat_entity);
+ Present (gnat_field);
+ gnat_field = Next_Entity (gnat_field))
+ if ((Ekind (gnat_field) == E_Component
+ || Ekind (gnat_field) == E_Discriminant)
+ && No (Component_Clause (gnat_field)))
+ {
+ all_rep = false;
+ break;
+ }
+
+ /* If this is a record extension, go a level further to find the
+ record definition. Also, verify we have a Parent_Subtype. */
+ if (is_extension)
+ {
+ if (!type_annotate_only
+ || Present (Record_Extension_Part (record_definition)))
+ record_definition = Record_Extension_Part (record_definition);
+
+ gcc_assert (type_annotate_only
+ || Present (Parent_Subtype (gnat_entity)));
+ }
+
+ /* Make a node for the record. If we are not defining the record,
+ suppress expanding incomplete types. */
+ gnu_type = make_node (tree_code_for_record_type (gnat_entity));
+ TYPE_NAME (gnu_type) = gnu_entity_name;
+ TYPE_PACKED (gnu_type) = (packed != 0) || has_rep;
+ if (Reverse_Storage_Order (gnat_entity))
+ sorry ("non-default Scalar_Storage_Order");
+ process_attributes (&gnu_type, &attr_list, true, gnat_entity);
+
+ if (!definition)
+ {
+ defer_incomplete_level++;
+ this_deferred = true;
+ }
+
+ /* If both a size and rep clause was specified, put the size in
+ the record type now so that it can get the proper mode. */
+ if (has_rep && Known_RM_Size (gnat_entity))
+ TYPE_SIZE (gnu_type)
+ = UI_To_gnu (RM_Size (gnat_entity), bitsizetype);
+
+ /* Always set the alignment here so that it can be used to
+ set the mode, if it is making the alignment stricter. If
+ it is invalid, it will be checked again below. If this is to
+ be Atomic, choose a default alignment of a word unless we know
+ the size and it's smaller. */
+ if (Known_Alignment (gnat_entity))
+ TYPE_ALIGN (gnu_type)
+ = validate_alignment (Alignment (gnat_entity), gnat_entity, 0);
+ else if (Is_Atomic (gnat_entity) && Known_Esize (gnat_entity))
+ {
+ unsigned int size = UI_To_Int (Esize (gnat_entity));
+ TYPE_ALIGN (gnu_type)
+ = size >= BITS_PER_WORD ? BITS_PER_WORD : ceil_pow2 (size);
+ }
+ /* If a type needs strict alignment, the minimum size will be the
+ type size instead of the RM size (see validate_size). Cap the
+ alignment, lest it causes this type size to become too large. */
+ else if (Strict_Alignment (gnat_entity) && Known_RM_Size (gnat_entity))
+ {
+ unsigned int raw_size = UI_To_Int (RM_Size (gnat_entity));
+ unsigned int raw_align = raw_size & -raw_size;
+ if (raw_align < BIGGEST_ALIGNMENT)
+ TYPE_ALIGN (gnu_type) = raw_align;
+ }
+ else
+ TYPE_ALIGN (gnu_type) = 0;
+
+ /* If we have a Parent_Subtype, make a field for the parent. If
+ this record has rep clauses, force the position to zero. */
+ if (Present (Parent_Subtype (gnat_entity)))
+ {
+ Entity_Id gnat_parent = Parent_Subtype (gnat_entity);
+ tree gnu_dummy_parent_type = make_node (RECORD_TYPE);
+ tree gnu_parent;
+
+ /* A major complexity here is that the parent subtype will
+ reference our discriminants in its Stored_Constraint list.
+ But those must reference the parent component of this record
+ which is precisely of the parent subtype we have not built yet!
+ To break the circle we first build a dummy COMPONENT_REF which
+ represents the "get to the parent" operation and initialize
+ each of those discriminants to a COMPONENT_REF of the above
+ dummy parent referencing the corresponding discriminant of the
+ base type of the parent subtype. */
+ gnu_get_parent = build3 (COMPONENT_REF, gnu_dummy_parent_type,
+ build0 (PLACEHOLDER_EXPR, gnu_type),
+ build_decl (input_location,
+ FIELD_DECL, NULL_TREE,
+ gnu_dummy_parent_type),
+ NULL_TREE);
+
+ if (has_discr)
+ for (gnat_field = First_Stored_Discriminant (gnat_entity);
+ Present (gnat_field);
+ gnat_field = Next_Stored_Discriminant (gnat_field))
+ if (Present (Corresponding_Discriminant (gnat_field)))
+ {
+ tree gnu_field
+ = gnat_to_gnu_field_decl (Corresponding_Discriminant
+ (gnat_field));
+ save_gnu_tree
+ (gnat_field,
+ build3 (COMPONENT_REF, TREE_TYPE (gnu_field),
+ gnu_get_parent, gnu_field, NULL_TREE),
+ true);
+ }
+
+ /* Then we build the parent subtype. If it has discriminants but
+ the type itself has unknown discriminants, this means that it
+ doesn't contain information about how the discriminants are
+ derived from those of the ancestor type, so it cannot be used
+ directly. Instead it is built by cloning the parent subtype
+ of the underlying record view of the type, for which the above
+ derivation of discriminants has been made explicit. */
+ if (Has_Discriminants (gnat_parent)
+ && Has_Unknown_Discriminants (gnat_entity))
+ {
+ Entity_Id gnat_uview = Underlying_Record_View (gnat_entity);
+
+ /* If we are defining the type, the underlying record
+ view must already have been elaborated at this point.
+ Otherwise do it now as its parent subtype cannot be
+ technically elaborated on its own. */
+ if (definition)
+ gcc_assert (present_gnu_tree (gnat_uview));
+ else
+ gnat_to_gnu_entity (gnat_uview, NULL_TREE, 0);
+
+ gnu_parent = gnat_to_gnu_type (Parent_Subtype (gnat_uview));
+
+ /* Substitute the "get to the parent" of the type for that
+ of its underlying record view in the cloned type. */
+ for (gnat_field = First_Stored_Discriminant (gnat_uview);
+ Present (gnat_field);
+ gnat_field = Next_Stored_Discriminant (gnat_field))
+ if (Present (Corresponding_Discriminant (gnat_field)))
+ {
+ tree gnu_field = gnat_to_gnu_field_decl (gnat_field);
+ tree gnu_ref
+ = build3 (COMPONENT_REF, TREE_TYPE (gnu_field),
+ gnu_get_parent, gnu_field, NULL_TREE);
+ gnu_parent
+ = substitute_in_type (gnu_parent, gnu_field, gnu_ref);
+ }
+ }
+ else
+ gnu_parent = gnat_to_gnu_type (gnat_parent);
+
+ /* Finally we fix up both kinds of twisted COMPONENT_REF we have
+ initially built. The discriminants must reference the fields
+ of the parent subtype and not those of its base type for the
+ placeholder machinery to properly work. */
+ if (has_discr)
+ {
+ /* The actual parent subtype is the full view. */
+ if (IN (Ekind (gnat_parent), Private_Kind))
+ {
+ if (Present (Full_View (gnat_parent)))
+ gnat_parent = Full_View (gnat_parent);
+ else
+ gnat_parent = Underlying_Full_View (gnat_parent);
+ }
+
+ for (gnat_field = First_Stored_Discriminant (gnat_entity);
+ Present (gnat_field);
+ gnat_field = Next_Stored_Discriminant (gnat_field))
+ if (Present (Corresponding_Discriminant (gnat_field)))
+ {
+ Entity_Id field = Empty;
+ for (field = First_Stored_Discriminant (gnat_parent);
+ Present (field);
+ field = Next_Stored_Discriminant (field))
+ if (same_discriminant_p (gnat_field, field))
+ break;
+ gcc_assert (Present (field));
+ TREE_OPERAND (get_gnu_tree (gnat_field), 1)
+ = gnat_to_gnu_field_decl (field);
+ }
+ }
+
+ /* The "get to the parent" COMPONENT_REF must be given its
+ proper type... */
+ TREE_TYPE (gnu_get_parent) = gnu_parent;
+
+ /* ...and reference the _Parent field of this record. */
+ gnu_field
+ = create_field_decl (parent_name_id,
+ gnu_parent, gnu_type,
+ has_rep
+ ? TYPE_SIZE (gnu_parent) : NULL_TREE,
+ has_rep
+ ? bitsize_zero_node : NULL_TREE,
+ 0, 1);
+ DECL_INTERNAL_P (gnu_field) = 1;
+ TREE_OPERAND (gnu_get_parent, 1) = gnu_field;
+ TYPE_FIELDS (gnu_type) = gnu_field;
+ }
+
+ /* Make the fields for the discriminants and put them into the record
+ unless it's an Unchecked_Union. */
+ if (has_discr)
+ for (gnat_field = First_Stored_Discriminant (gnat_entity);
+ Present (gnat_field);
+ gnat_field = Next_Stored_Discriminant (gnat_field))
+ {
+ /* If this is a record extension and this discriminant is the
+ renaming of another discriminant, we've handled it above. */
+ if (Present (Parent_Subtype (gnat_entity))
+ && Present (Corresponding_Discriminant (gnat_field)))
+ continue;
+
+ gnu_field
+ = gnat_to_gnu_field (gnat_field, gnu_type, packed, definition,
+ debug_info_p);
+
+ /* Make an expression using a PLACEHOLDER_EXPR from the
+ FIELD_DECL node just created and link that with the
+ corresponding GNAT defining identifier. */
+ save_gnu_tree (gnat_field,
+ build3 (COMPONENT_REF, TREE_TYPE (gnu_field),
+ build0 (PLACEHOLDER_EXPR, gnu_type),
+ gnu_field, NULL_TREE),
+ true);
+
+ if (!is_unchecked_union)
+ {
+ DECL_CHAIN (gnu_field) = gnu_field_list;
+ gnu_field_list = gnu_field;
+ }
+ }
+
+ /* If we have a derived untagged type that renames discriminants in
+ the root type, the (stored) discriminants are a just copy of the
+ discriminants of the root type. This means that any constraints
+ added by the renaming in the derivation are disregarded as far
+ as the layout of the derived type is concerned. To rescue them,
+ we change the type of the (stored) discriminants to a subtype
+ with the bounds of the type of the visible discriminants. */
+ if (has_discr
+ && !is_extension
+ && Stored_Constraint (gnat_entity) != No_Elist)
+ for (gnat_constr = First_Elmt (Stored_Constraint (gnat_entity));
+ gnat_constr != No_Elmt;
+ gnat_constr = Next_Elmt (gnat_constr))
+ if (Nkind (Node (gnat_constr)) == N_Identifier
+ /* Ignore access discriminants. */
+ && !Is_Access_Type (Etype (Node (gnat_constr)))
+ && Ekind (Entity (Node (gnat_constr))) == E_Discriminant)
+ {
+ Entity_Id gnat_discr = Entity (Node (gnat_constr));
+ tree gnu_discr_type = gnat_to_gnu_type (Etype (gnat_discr));
+ tree gnu_ref
+ = gnat_to_gnu_entity (Original_Record_Component (gnat_discr),
+ NULL_TREE, 0);
+
+ /* GNU_REF must be an expression using a PLACEHOLDER_EXPR built
+ just above for one of the stored discriminants. */
+ gcc_assert (TREE_TYPE (TREE_OPERAND (gnu_ref, 0)) == gnu_type);
+
+ if (gnu_discr_type != TREE_TYPE (gnu_ref))
+ {
+ const unsigned prec = TYPE_PRECISION (TREE_TYPE (gnu_ref));
+ tree gnu_subtype
+ = TYPE_UNSIGNED (TREE_TYPE (gnu_ref))
+ ? make_unsigned_type (prec) : make_signed_type (prec);
+ TREE_TYPE (gnu_subtype) = TREE_TYPE (gnu_ref);
+ TYPE_EXTRA_SUBTYPE_P (gnu_subtype) = 1;
+ SET_TYPE_RM_MIN_VALUE (gnu_subtype,
+ TYPE_MIN_VALUE (gnu_discr_type));
+ SET_TYPE_RM_MAX_VALUE (gnu_subtype,
+ TYPE_MAX_VALUE (gnu_discr_type));
+ TREE_TYPE (gnu_ref)
+ = TREE_TYPE (TREE_OPERAND (gnu_ref, 1)) = gnu_subtype;
+ }
+ }
+
+ /* Add the fields into the record type and finish it up. */
+ components_to_record (gnu_type, Component_List (record_definition),
+ gnu_field_list, packed, definition, false,
+ all_rep, is_unchecked_union,
+ !Comes_From_Source (gnat_entity), debug_info_p,
+ false, OK_To_Reorder_Components (gnat_entity),
+ all_rep ? NULL_TREE : bitsize_zero_node, NULL);
+
+ /* If it is passed by reference, force BLKmode to ensure that objects
+ of this type will always be put in memory. */
+ if (TYPE_MODE (gnu_type) != BLKmode
+ && Is_By_Reference_Type (gnat_entity))
+ SET_TYPE_MODE (gnu_type, BLKmode);
+
+ /* We used to remove the associations of the discriminants and _Parent
+ for validity checking but we may need them if there's a Freeze_Node
+ for a subtype used in this record. */
+ TYPE_VOLATILE (gnu_type) = Treat_As_Volatile (gnat_entity);
+
+ /* Fill in locations of fields. */
+ annotate_rep (gnat_entity, gnu_type);
+
+ /* If there are any entities in the chain corresponding to components
+ that we did not elaborate, ensure we elaborate their types if they
+ are Itypes. */
+ for (gnat_temp = First_Entity (gnat_entity);
+ Present (gnat_temp);
+ gnat_temp = Next_Entity (gnat_temp))
+ if ((Ekind (gnat_temp) == E_Component
+ || Ekind (gnat_temp) == E_Discriminant)
+ && Is_Itype (Etype (gnat_temp))
+ && !present_gnu_tree (gnat_temp))
+ gnat_to_gnu_entity (Etype (gnat_temp), NULL_TREE, 0);
+
+ /* If this is a record type associated with an exception definition,
+ equate its fields to those of the standard exception type. This
+ will make it possible to convert between them. */
+ if (gnu_entity_name == exception_data_name_id)
+ {
+ tree gnu_std_field;
+ for (gnu_field = TYPE_FIELDS (gnu_type),
+ gnu_std_field = TYPE_FIELDS (except_type_node);
+ gnu_field;
+ gnu_field = DECL_CHAIN (gnu_field),
+ gnu_std_field = DECL_CHAIN (gnu_std_field))
+ SET_DECL_ORIGINAL_FIELD_TO_FIELD (gnu_field, gnu_std_field);
+ gcc_assert (!gnu_std_field);
+ }
+ }
+ break;
+
+ case E_Class_Wide_Subtype:
+ /* If an equivalent type is present, that is what we should use.
+ Otherwise, fall through to handle this like a record subtype
+ since it may have constraints. */
+ if (gnat_equiv_type != gnat_entity)
+ {
+ gnu_decl = gnat_to_gnu_entity (gnat_equiv_type, NULL_TREE, 0);
+ maybe_present = true;
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case E_Record_Subtype:
+ /* If Cloned_Subtype is Present it means this record subtype has
+ identical layout to that type or subtype and we should use
+ that GCC type for this one. The front end guarantees that
+ the component list is shared. */
+ if (Present (Cloned_Subtype (gnat_entity)))
+ {
+ gnu_decl = gnat_to_gnu_entity (Cloned_Subtype (gnat_entity),
+ NULL_TREE, 0);
+ maybe_present = true;
+ break;
+ }
+
+ /* Otherwise, first ensure the base type is elaborated. Then, if we are
+ changing the type, make a new type with each field having the type of
+ the field in the new subtype but the position computed by transforming
+ every discriminant reference according to the constraints. We don't
+ see any difference between private and non-private type here since
+ derivations from types should have been deferred until the completion
+ of the private type. */
+ else
+ {
+ Entity_Id gnat_base_type = Implementation_Base_Type (gnat_entity);
+ tree gnu_base_type;
+
+ if (!definition)
+ {
+ defer_incomplete_level++;
+ this_deferred = true;
+ }
+
+ gnu_base_type = gnat_to_gnu_type (gnat_base_type);
+
+ if (present_gnu_tree (gnat_entity))
+ {
+ maybe_present = true;
+ break;
+ }
+
+ /* If this is a record subtype associated with a dispatch table,
+ strip the suffix. This is necessary to make sure 2 different
+ subtypes associated with the imported and exported views of a
+ dispatch table are properly merged in LTO mode. */
+ if (Is_Dispatch_Table_Entity (gnat_entity))
+ {
+ char *p;
+ Get_Encoded_Name (gnat_entity);
+ p = strchr (Name_Buffer, '_');
+ gcc_assert (p);
+ strcpy (p+2, "dtS");
+ gnu_entity_name = get_identifier (Name_Buffer);
+ }
+
+ /* When the subtype has discriminants and these discriminants affect
+ the initial shape it has inherited, factor them in. But for an
+ Unchecked_Union (it must be an Itype), just return the type.
+ We can't just test Is_Constrained because private subtypes without
+ discriminants of types with discriminants with default expressions
+ are Is_Constrained but aren't constrained! */
+ if (IN (Ekind (gnat_base_type), Record_Kind)
+ && !Is_Unchecked_Union (gnat_base_type)
+ && !Is_For_Access_Subtype (gnat_entity)
+ && Has_Discriminants (gnat_entity)
+ && Is_Constrained (gnat_entity)
+ && Stored_Constraint (gnat_entity) != No_Elist)
+ {
+ vec<subst_pair> gnu_subst_list
+ = build_subst_list (gnat_entity, gnat_base_type, definition);
+ tree gnu_unpad_base_type, gnu_rep_part, gnu_variant_part;
+ tree gnu_pos_list, gnu_field_list = NULL_TREE;
+ bool selected_variant = false, all_constant_pos = true;
+ Entity_Id gnat_field;
+ vec<variant_desc> gnu_variant_list;
+
+ gnu_type = make_node (RECORD_TYPE);
+ TYPE_NAME (gnu_type) = gnu_entity_name;
+ TYPE_PACKED (gnu_type) = TYPE_PACKED (gnu_base_type);
+ process_attributes (&gnu_type, &attr_list, true, gnat_entity);
+
+ /* Set the size, alignment and alias set of the new type to
+ match that of the old one, doing required substitutions. */
+ copy_and_substitute_in_size (gnu_type, gnu_base_type,
+ gnu_subst_list);
+
+ if (TYPE_IS_PADDING_P (gnu_base_type))
+ gnu_unpad_base_type = TREE_TYPE (TYPE_FIELDS (gnu_base_type));
+ else
+ gnu_unpad_base_type = gnu_base_type;
+
+ /* Look for REP and variant parts in the base type. */
+ gnu_rep_part = get_rep_part (gnu_unpad_base_type);
+ gnu_variant_part = get_variant_part (gnu_unpad_base_type);
+
+ /* If there is a variant part, we must compute whether the
+ constraints statically select a particular variant. If
+ so, we simply drop the qualified union and flatten the
+ list of fields. Otherwise we'll build a new qualified
+ union for the variants that are still relevant. */
+ if (gnu_variant_part)
+ {
+ variant_desc *v;
+ unsigned int i;
+
+ gnu_variant_list
+ = build_variant_list (TREE_TYPE (gnu_variant_part),
+ gnu_subst_list,
+ vNULL);
+
+ /* If all the qualifiers are unconditionally true, the
+ innermost variant is statically selected. */
+ selected_variant = true;
+ FOR_EACH_VEC_ELT (gnu_variant_list, i, v)
+ if (!integer_onep (v->qual))
+ {
+ selected_variant = false;
+ break;
+ }
+
+ /* Otherwise, create the new variants. */
+ if (!selected_variant)
+ FOR_EACH_VEC_ELT (gnu_variant_list, i, v)
+ {
+ tree old_variant = v->type;
+ tree new_variant = make_node (RECORD_TYPE);
+ tree suffix
+ = concat_name (DECL_NAME (gnu_variant_part),
+ IDENTIFIER_POINTER
+ (DECL_NAME (v->field)));
+ TYPE_NAME (new_variant)
+ = concat_name (TYPE_NAME (gnu_type),
+ IDENTIFIER_POINTER (suffix));
+ copy_and_substitute_in_size (new_variant, old_variant,
+ gnu_subst_list);
+ v->new_type = new_variant;
+ }
+ }
+ else
+ {
+ gnu_variant_list.create (0);
+ selected_variant = false;
+ }
+
+ /* Make a list of fields and their position in the base type. */
+ gnu_pos_list
+ = build_position_list (gnu_unpad_base_type,
+ gnu_variant_list.exists ()
+ && !selected_variant,
+ size_zero_node, bitsize_zero_node,
+ BIGGEST_ALIGNMENT, NULL_TREE);
+
+ /* Now go down every component in the subtype and compute its
+ size and position from those of the component in the base
+ type and from the constraints of the subtype. */
+ for (gnat_field = First_Entity (gnat_entity);
+ Present (gnat_field);
+ gnat_field = Next_Entity (gnat_field))
+ if ((Ekind (gnat_field) == E_Component
+ || Ekind (gnat_field) == E_Discriminant)
+ && !(Present (Corresponding_Discriminant (gnat_field))
+ && Is_Tagged_Type (gnat_base_type))
+ && Underlying_Type
+ (Scope (Original_Record_Component (gnat_field)))
+ == gnat_base_type)
+ {
+ Name_Id gnat_name = Chars (gnat_field);
+ Entity_Id gnat_old_field
+ = Original_Record_Component (gnat_field);
+ tree gnu_old_field
+ = gnat_to_gnu_field_decl (gnat_old_field);
+ tree gnu_context = DECL_CONTEXT (gnu_old_field);
+ tree gnu_field, gnu_field_type, gnu_size, gnu_pos;
+ tree gnu_cont_type, gnu_last = NULL_TREE;
+
+ /* If the type is the same, retrieve the GCC type from the
+ old field to take into account possible adjustments. */
+ if (Etype (gnat_field) == Etype (gnat_old_field))
+ gnu_field_type = TREE_TYPE (gnu_old_field);
+ else
+ gnu_field_type = gnat_to_gnu_type (Etype (gnat_field));
+
+ /* If there was a component clause, the field types must be
+ the same for the type and subtype, so copy the data from
+ the old field to avoid recomputation here. Also if the
+ field is justified modular and the optimization in
+ gnat_to_gnu_field was applied. */
+ if (Present (Component_Clause (gnat_old_field))
+ || (TREE_CODE (gnu_field_type) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (gnu_field_type)
+ && TREE_TYPE (TYPE_FIELDS (gnu_field_type))
+ == TREE_TYPE (gnu_old_field)))
+ {
+ gnu_size = DECL_SIZE (gnu_old_field);
+ gnu_field_type = TREE_TYPE (gnu_old_field);
+ }
+
+ /* If the old field was packed and of constant size, we
+ have to get the old size here, as it might differ from
+ what the Etype conveys and the latter might overlap
+ onto the following field. Try to arrange the type for
+ possible better packing along the way. */
+ else if (DECL_PACKED (gnu_old_field)
+ && TREE_CODE (DECL_SIZE (gnu_old_field))
+ == INTEGER_CST)
+ {
+ gnu_size = DECL_SIZE (gnu_old_field);
+ if (RECORD_OR_UNION_TYPE_P (gnu_field_type)
+ && !TYPE_FAT_POINTER_P (gnu_field_type)
+ && tree_fits_uhwi_p (TYPE_SIZE (gnu_field_type)))
+ gnu_field_type
+ = make_packable_type (gnu_field_type, true);
+ }
+
+ else
+ gnu_size = TYPE_SIZE (gnu_field_type);
+
+ /* If the context of the old field is the base type or its
+ REP part (if any), put the field directly in the new
+ type; otherwise look up the context in the variant list
+ and put the field either in the new type if there is a
+ selected variant or in one of the new variants. */
+ if (gnu_context == gnu_unpad_base_type
+ || (gnu_rep_part
+ && gnu_context == TREE_TYPE (gnu_rep_part)))
+ gnu_cont_type = gnu_type;
+ else
+ {
+ variant_desc *v;
+ unsigned int i;
+ tree rep_part;
+
+ FOR_EACH_VEC_ELT (gnu_variant_list, i, v)
+ if (gnu_context == v->type
+ || ((rep_part = get_rep_part (v->type))
+ && gnu_context == TREE_TYPE (rep_part)))
+ break;
+ if (v)
+ {
+ if (selected_variant)
+ gnu_cont_type = gnu_type;
+ else
+ gnu_cont_type = v->new_type;
+ }
+ else
+ /* The front-end may pass us "ghost" components if
+ it fails to recognize that a constrained subtype
+ is statically constrained. Discard them. */
+ continue;
+ }
+
+ /* Now create the new field modeled on the old one. */
+ gnu_field
+ = create_field_decl_from (gnu_old_field, gnu_field_type,
+ gnu_cont_type, gnu_size,
+ gnu_pos_list, gnu_subst_list);
+ gnu_pos = DECL_FIELD_OFFSET (gnu_field);
+
+ /* Put it in one of the new variants directly. */
+ if (gnu_cont_type != gnu_type)
+ {
+ DECL_CHAIN (gnu_field) = TYPE_FIELDS (gnu_cont_type);
+ TYPE_FIELDS (gnu_cont_type) = gnu_field;
+ }
+
+ /* To match the layout crafted in components_to_record,
+ if this is the _Tag or _Parent field, put it before
+ any other fields. */
+ else if (gnat_name == Name_uTag
+ || gnat_name == Name_uParent)
+ gnu_field_list = chainon (gnu_field_list, gnu_field);
+
+ /* Similarly, if this is the _Controller field, put
+ it before the other fields except for the _Tag or
+ _Parent field. */
+ else if (gnat_name == Name_uController && gnu_last)
+ {
+ DECL_CHAIN (gnu_field) = DECL_CHAIN (gnu_last);
+ DECL_CHAIN (gnu_last) = gnu_field;
+ }
+
+ /* Otherwise, if this is a regular field, put it after
+ the other fields. */
+ else
+ {
+ DECL_CHAIN (gnu_field) = gnu_field_list;
+ gnu_field_list = gnu_field;
+ if (!gnu_last)
+ gnu_last = gnu_field;
+ if (TREE_CODE (gnu_pos) != INTEGER_CST)
+ all_constant_pos = false;
+ }
+
+ save_gnu_tree (gnat_field, gnu_field, false);
+ }
+
+ /* If there is a variant list, a selected variant and the fields
+ all have a constant position, put them in order of increasing
+ position to match that of constant CONSTRUCTORs. Likewise if
+ there is no variant list but a REP part, since the latter has
+ been flattened in the process. */
+ if (((gnu_variant_list.exists () && selected_variant)
+ || (!gnu_variant_list.exists () && gnu_rep_part))
+ && all_constant_pos)
+ {
+ const int len = list_length (gnu_field_list);
+ tree *field_arr = XALLOCAVEC (tree, len), t;
+ int i;
+
+ for (t = gnu_field_list, i = 0; t; t = DECL_CHAIN (t), i++)
+ field_arr[i] = t;
+
+ qsort (field_arr, len, sizeof (tree), compare_field_bitpos);
+
+ gnu_field_list = NULL_TREE;
+ for (i = 0; i < len; i++)
+ {
+ DECL_CHAIN (field_arr[i]) = gnu_field_list;
+ gnu_field_list = field_arr[i];
+ }
+ }
+
+ /* If there is a variant list and no selected variant, we need
+ to create the nest of variant parts from the old nest. */
+ else if (gnu_variant_list.exists () && !selected_variant)
+ {
+ tree new_variant_part
+ = create_variant_part_from (gnu_variant_part,
+ gnu_variant_list, gnu_type,
+ gnu_pos_list, gnu_subst_list);
+ DECL_CHAIN (new_variant_part) = gnu_field_list;
+ gnu_field_list = new_variant_part;
+ }
+
+ /* Now go through the entities again looking for Itypes that
+ we have not elaborated but should (e.g., Etypes of fields
+ that have Original_Components). */
+ for (gnat_field = First_Entity (gnat_entity);
+ Present (gnat_field); gnat_field = Next_Entity (gnat_field))
+ if ((Ekind (gnat_field) == E_Discriminant
+ || Ekind (gnat_field) == E_Component)
+ && !present_gnu_tree (Etype (gnat_field)))
+ gnat_to_gnu_entity (Etype (gnat_field), NULL_TREE, 0);
+
+ /* Do not emit debug info for the type yet since we're going to
+ modify it below. */
+ finish_record_type (gnu_type, nreverse (gnu_field_list), 2,
+ false);
+ compute_record_mode (gnu_type);
+
+ /* See the E_Record_Type case for the rationale. */
+ if (TYPE_MODE (gnu_type) != BLKmode
+ && Is_By_Reference_Type (gnat_entity))
+ SET_TYPE_MODE (gnu_type, BLKmode);
+
+ TYPE_VOLATILE (gnu_type) = Treat_As_Volatile (gnat_entity);
+
+ /* Fill in locations of fields. */
+ annotate_rep (gnat_entity, gnu_type);
+
+ /* If debugging information is being written for the type, write
+ a record that shows what we are a subtype of and also make a
+ variable that indicates our size, if still variable. */
+ if (debug_info_p)
+ {
+ tree gnu_subtype_marker = make_node (RECORD_TYPE);
+ tree gnu_unpad_base_name = TYPE_NAME (gnu_unpad_base_type);
+ tree gnu_size_unit = TYPE_SIZE_UNIT (gnu_type);
+
+ if (TREE_CODE (gnu_unpad_base_name) == TYPE_DECL)
+ gnu_unpad_base_name = DECL_NAME (gnu_unpad_base_name);
+
+ TYPE_NAME (gnu_subtype_marker)
+ = create_concat_name (gnat_entity, "XVS");
+ finish_record_type (gnu_subtype_marker,
+ create_field_decl (gnu_unpad_base_name,
+ build_reference_type
+ (gnu_unpad_base_type),
+ gnu_subtype_marker,
+ NULL_TREE, NULL_TREE,
+ 0, 0),
+ 0, true);
+
+ add_parallel_type (gnu_type, gnu_subtype_marker);
+
+ if (definition
+ && TREE_CODE (gnu_size_unit) != INTEGER_CST
+ && !CONTAINS_PLACEHOLDER_P (gnu_size_unit))
+ TYPE_SIZE_UNIT (gnu_subtype_marker)
+ = create_var_decl (create_concat_name (gnat_entity,
+ "XVZ"),
+ NULL_TREE, sizetype, gnu_size_unit,
+ false, false, false, false, NULL,
+ gnat_entity);
+ }
+
+ gnu_variant_list.release ();
+ gnu_subst_list.release ();
+
+ /* Now we can finalize it. */
+ rest_of_record_type_compilation (gnu_type);
+ }
+
+ /* Otherwise, go down all the components in the new type and make
+ them equivalent to those in the base type. */
+ else
+ {
+ gnu_type = gnu_base_type;
+
+ for (gnat_temp = First_Entity (gnat_entity);
+ Present (gnat_temp);
+ gnat_temp = Next_Entity (gnat_temp))
+ if ((Ekind (gnat_temp) == E_Discriminant
+ && !Is_Unchecked_Union (gnat_base_type))
+ || Ekind (gnat_temp) == E_Component)
+ save_gnu_tree (gnat_temp,
+ gnat_to_gnu_field_decl
+ (Original_Record_Component (gnat_temp)),
+ false);
+ }
+ }
+ break;
+
+ case E_Access_Subprogram_Type:
+ /* Use the special descriptor type for dispatch tables if needed,
+ that is to say for the Prim_Ptr of a-tags.ads and its clones.
+ Note that we are only required to do so for static tables in
+ order to be compatible with the C++ ABI, but Ada 2005 allows
+ to extend library level tagged types at the local level so
+ we do it in the non-static case as well. */
+ if (TARGET_VTABLE_USES_DESCRIPTORS
+ && Is_Dispatch_Table_Entity (gnat_entity))
+ {
+ gnu_type = fdesc_type_node;
+ gnu_size = TYPE_SIZE (gnu_type);
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case E_Anonymous_Access_Subprogram_Type:
+ /* If we are not defining this entity, and we have incomplete
+ entities being processed above us, make a dummy type and
+ fill it in later. */
+ if (!definition && defer_incomplete_level != 0)
+ {
+ struct incomplete *p = XNEW (struct incomplete);
+
+ gnu_type
+ = build_pointer_type
+ (make_dummy_type (Directly_Designated_Type (gnat_entity)));
+ gnu_decl = create_type_decl (gnu_entity_name, gnu_type,
+ !Comes_From_Source (gnat_entity),
+ debug_info_p, gnat_entity);
+ this_made_decl = true;
+ gnu_type = TREE_TYPE (gnu_decl);
+ save_gnu_tree (gnat_entity, gnu_decl, false);
+ saved = true;
+
+ p->old_type = TREE_TYPE (gnu_type);
+ p->full_type = Directly_Designated_Type (gnat_entity);
+ p->next = defer_incomplete_list;
+ defer_incomplete_list = p;
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case E_Allocator_Type:
+ case E_Access_Type:
+ case E_Access_Attribute_Type:
+ case E_Anonymous_Access_Type:
+ case E_General_Access_Type:
+ {
+ /* The designated type and its equivalent type for gigi. */
+ Entity_Id gnat_desig_type = Directly_Designated_Type (gnat_entity);
+ Entity_Id gnat_desig_equiv = Gigi_Equivalent_Type (gnat_desig_type);
+ /* Whether it comes from a limited with. */
+ bool is_from_limited_with
+ = (IN (Ekind (gnat_desig_equiv), Incomplete_Kind)
+ && From_Limited_With (gnat_desig_equiv));
+ /* The "full view" of the designated type. If this is an incomplete
+ entity from a limited with, treat its non-limited view as the full
+ view. Otherwise, if this is an incomplete or private type, use the
+ full view. In the former case, we might point to a private type,
+ in which case, we need its full view. Also, we want to look at the
+ actual type used for the representation, so this takes a total of
+ three steps. */
+ Entity_Id gnat_desig_full_direct_first
+ = (is_from_limited_with
+ ? Non_Limited_View (gnat_desig_equiv)
+ : (IN (Ekind (gnat_desig_equiv), Incomplete_Or_Private_Kind)
+ ? Full_View (gnat_desig_equiv) : Empty));
+ Entity_Id gnat_desig_full_direct
+ = ((is_from_limited_with
+ && Present (gnat_desig_full_direct_first)
+ && IN (Ekind (gnat_desig_full_direct_first), Private_Kind))
+ ? Full_View (gnat_desig_full_direct_first)
+ : gnat_desig_full_direct_first);
+ Entity_Id gnat_desig_full
+ = Gigi_Equivalent_Type (gnat_desig_full_direct);
+ /* The type actually used to represent the designated type, either
+ gnat_desig_full or gnat_desig_equiv. */
+ Entity_Id gnat_desig_rep;
+ /* True if this is a pointer to an unconstrained array. */
+ bool is_unconstrained_array;
+ /* We want to know if we'll be seeing the freeze node for any
+ incomplete type we may be pointing to. */
+ bool in_main_unit
+ = (Present (gnat_desig_full)
+ ? In_Extended_Main_Code_Unit (gnat_desig_full)
+ : In_Extended_Main_Code_Unit (gnat_desig_type));
+ /* True if we make a dummy type here. */
+ bool made_dummy = false;
+ /* The mode to be used for the pointer type. */
+ enum machine_mode p_mode = mode_for_size (esize, MODE_INT, 0);
+ /* The GCC type used for the designated type. */
+ tree gnu_desig_type = NULL_TREE;
+
+ if (!targetm.valid_pointer_mode (p_mode))
+ p_mode = ptr_mode;
+
+ /* If either the designated type or its full view is an unconstrained
+ array subtype, replace it with the type it's a subtype of. This
+ avoids problems with multiple copies of unconstrained array types.
+ Likewise, if the designated type is a subtype of an incomplete
+ record type, use the parent type to avoid order of elaboration
+ issues. This can lose some code efficiency, but there is no
+ alternative. */
+ if (Ekind (gnat_desig_equiv) == E_Array_Subtype
+ && !Is_Constrained (gnat_desig_equiv))
+ gnat_desig_equiv = Etype (gnat_desig_equiv);
+ if (Present (gnat_desig_full)
+ && ((Ekind (gnat_desig_full) == E_Array_Subtype
+ && !Is_Constrained (gnat_desig_full))
+ || (Ekind (gnat_desig_full) == E_Record_Subtype
+ && Ekind (Etype (gnat_desig_full)) == E_Record_Type)))
+ gnat_desig_full = Etype (gnat_desig_full);
+
+ /* Set the type that's actually the representation of the designated
+ type and also flag whether we have a unconstrained array. */
+ gnat_desig_rep
+ = Present (gnat_desig_full) ? gnat_desig_full : gnat_desig_equiv;
+ is_unconstrained_array
+ = Is_Array_Type (gnat_desig_rep) && !Is_Constrained (gnat_desig_rep);
+
+ /* If we are pointing to an incomplete type whose completion is an
+ unconstrained array, make dummy fat and thin pointer types to it.
+ Likewise if the type itself is dummy or an unconstrained array. */
+ if (is_unconstrained_array
+ && (Present (gnat_desig_full)
+ || (present_gnu_tree (gnat_desig_equiv)
+ && TYPE_IS_DUMMY_P
+ (TREE_TYPE (get_gnu_tree (gnat_desig_equiv))))
+ || (!in_main_unit
+ && defer_incomplete_level != 0
+ && !present_gnu_tree (gnat_desig_equiv))
+ || (in_main_unit
+ && is_from_limited_with
+ && Present (Freeze_Node (gnat_desig_equiv)))))
+ {
+ if (present_gnu_tree (gnat_desig_rep))
+ gnu_desig_type = TREE_TYPE (get_gnu_tree (gnat_desig_rep));
+ else
+ {
+ gnu_desig_type = make_dummy_type (gnat_desig_rep);
+ made_dummy = true;
+ }
+
+ /* If the call above got something that has a pointer, the pointer
+ is our type. This could have happened either because the type
+ was elaborated or because somebody else executed the code. */
+ if (!TYPE_POINTER_TO (gnu_desig_type))
+ build_dummy_unc_pointer_types (gnat_desig_equiv, gnu_desig_type);
+ gnu_type = TYPE_POINTER_TO (gnu_desig_type);
+ }
+
+ /* If we already know what the full type is, use it. */
+ else if (Present (gnat_desig_full)
+ && present_gnu_tree (gnat_desig_full))
+ gnu_desig_type = TREE_TYPE (get_gnu_tree (gnat_desig_full));
+
+ /* Get the type of the thing we are to point to and build a pointer to
+ it. If it is a reference to an incomplete or private type with a
+ full view that is a record, make a dummy type node and get the
+ actual type later when we have verified it is safe. */
+ else if ((!in_main_unit
+ && !present_gnu_tree (gnat_desig_equiv)
+ && Present (gnat_desig_full)
+ && !present_gnu_tree (gnat_desig_full)
+ && Is_Record_Type (gnat_desig_full))
+ /* Likewise if we are pointing to a record or array and we are
+ to defer elaborating incomplete types. We do this as this
+ access type may be the full view of a private type. Note
+ that the unconstrained array case is handled above. */
+ || ((!in_main_unit || imported_p)
+ && defer_incomplete_level != 0
+ && !present_gnu_tree (gnat_desig_equiv)
+ && (Is_Record_Type (gnat_desig_rep)
+ || Is_Array_Type (gnat_desig_rep)))
+ /* If this is a reference from a limited_with type back to our
+ main unit and there's a freeze node for it, either we have
+ already processed the declaration and made the dummy type,
+ in which case we just reuse the latter, or we have not yet,
+ in which case we make the dummy type and it will be reused
+ when the declaration is finally processed. In both cases,
+ the pointer eventually created below will be automatically
+ adjusted when the freeze node is processed. Note that the
+ unconstrained array case is handled above. */
+ || (in_main_unit
+ && is_from_limited_with
+ && Present (Freeze_Node (gnat_desig_rep))))
+ {
+ gnu_desig_type = make_dummy_type (gnat_desig_equiv);
+ made_dummy = true;
+ }
+
+ /* Otherwise handle the case of a pointer to itself. */
+ else if (gnat_desig_equiv == gnat_entity)
+ {
+ gnu_type
+ = build_pointer_type_for_mode (void_type_node, p_mode,
+ No_Strict_Aliasing (gnat_entity));
+ TREE_TYPE (gnu_type) = TYPE_POINTER_TO (gnu_type) = gnu_type;
+ }
+
+ /* If expansion is disabled, the equivalent type of a concurrent type
+ is absent, so build a dummy pointer type. */
+ else if (type_annotate_only && No (gnat_desig_equiv))
+ gnu_type = ptr_void_type_node;
+
+ /* Finally, handle the default case where we can just elaborate our
+ designated type. */
+ else
+ gnu_desig_type = gnat_to_gnu_type (gnat_desig_equiv);
+
+ /* It is possible that a call to gnat_to_gnu_type above resolved our
+ type. If so, just return it. */
+ if (present_gnu_tree (gnat_entity))
+ {
+ maybe_present = true;
+ break;
+ }
+
+ /* If we haven't done it yet, build the pointer type the usual way. */
+ if (!gnu_type)
+ {
+ /* Modify the designated type if we are pointing only to constant
+ objects, but don't do it for unconstrained arrays. */
+ if (Is_Access_Constant (gnat_entity)
+ && TREE_CODE (gnu_desig_type) != UNCONSTRAINED_ARRAY_TYPE)
+ {
+ gnu_desig_type
+ = build_qualified_type
+ (gnu_desig_type,
+ TYPE_QUALS (gnu_desig_type) | TYPE_QUAL_CONST);
+
+ /* Some extra processing is required if we are building a
+ pointer to an incomplete type (in the GCC sense). We might
+ have such a type if we just made a dummy, or directly out
+ of the call to gnat_to_gnu_type above if we are processing
+ an access type for a record component designating the
+ record type itself. */
+ if (TYPE_MODE (gnu_desig_type) == VOIDmode)
+ {
+ /* We must ensure that the pointer to variant we make will
+ be processed by update_pointer_to when the initial type
+ is completed. Pretend we made a dummy and let further
+ processing act as usual. */
+ made_dummy = true;
+
+ /* We must ensure that update_pointer_to will not retrieve
+ the dummy variant when building a properly qualified
+ version of the complete type. We take advantage of the
+ fact that get_qualified_type is requiring TYPE_NAMEs to
+ match to influence build_qualified_type and then also
+ update_pointer_to here. */
+ TYPE_NAME (gnu_desig_type)
+ = create_concat_name (gnat_desig_type, "INCOMPLETE_CST");
+ }
+ }
+
+ gnu_type
+ = build_pointer_type_for_mode (gnu_desig_type, p_mode,
+ No_Strict_Aliasing (gnat_entity));
+ }
+
+ /* If we are not defining this object and we have made a dummy pointer,
+ save our current definition, evaluate the actual type, and replace
+ the tentative type we made with the actual one. If we are to defer
+ actually looking up the actual type, make an entry in the deferred
+ list. If this is from a limited with, we may have to defer to the
+ end of the current unit. */
+ if ((!in_main_unit || is_from_limited_with) && made_dummy)
+ {
+ tree gnu_old_desig_type;
+
+ if (TYPE_IS_FAT_POINTER_P (gnu_type))
+ {
+ gnu_old_desig_type = TYPE_UNCONSTRAINED_ARRAY (gnu_type);
+ if (esize == POINTER_SIZE)
+ gnu_type = build_pointer_type
+ (TYPE_OBJECT_RECORD_TYPE (gnu_old_desig_type));
+ }
+ else
+ gnu_old_desig_type = TREE_TYPE (gnu_type);
+
+ process_attributes (&gnu_type, &attr_list, false, gnat_entity);
+ gnu_decl = create_type_decl (gnu_entity_name, gnu_type,
+ !Comes_From_Source (gnat_entity),
+ debug_info_p, gnat_entity);
+ this_made_decl = true;
+ gnu_type = TREE_TYPE (gnu_decl);
+ save_gnu_tree (gnat_entity, gnu_decl, false);
+ saved = true;
+
+ /* Note that the call to gnat_to_gnu_type on gnat_desig_equiv might
+ update gnu_old_desig_type directly, in which case it will not be
+ a dummy type any more when we get into update_pointer_to.
+
+ This can happen e.g. when the designated type is a record type,
+ because their elaboration starts with an initial node from
+ make_dummy_type, which may be the same node as the one we got.
+
+ Besides, variants of this non-dummy type might have been created
+ along the way. update_pointer_to is expected to properly take
+ care of those situations. */
+ if (defer_incomplete_level == 0 && !is_from_limited_with)
+ {
+ update_pointer_to (TYPE_MAIN_VARIANT (gnu_old_desig_type),
+ gnat_to_gnu_type (gnat_desig_equiv));
+ }
+ else
+ {
+ struct incomplete *p = XNEW (struct incomplete);
+ struct incomplete **head
+ = (is_from_limited_with
+ ? &defer_limited_with : &defer_incomplete_list);
+ p->old_type = gnu_old_desig_type;
+ p->full_type = gnat_desig_equiv;
+ p->next = *head;
+ *head = p;
+ }
+ }
+ }
+ break;
+
+ case E_Access_Protected_Subprogram_Type:
+ case E_Anonymous_Access_Protected_Subprogram_Type:
+ if (type_annotate_only && No (gnat_equiv_type))
+ gnu_type = ptr_void_type_node;
+ else
+ {
+ /* The run-time representation is the equivalent type. */
+ gnu_type = gnat_to_gnu_type (gnat_equiv_type);
+ maybe_present = true;
+ }
+
+ if (Is_Itype (Directly_Designated_Type (gnat_entity))
+ && !present_gnu_tree (Directly_Designated_Type (gnat_entity))
+ && No (Freeze_Node (Directly_Designated_Type (gnat_entity)))
+ && !Is_Record_Type (Scope (Directly_Designated_Type (gnat_entity))))
+ gnat_to_gnu_entity (Directly_Designated_Type (gnat_entity),
+ NULL_TREE, 0);
+
+ break;
+
+ case E_Access_Subtype:
+
+ /* We treat this as identical to its base type; any constraint is
+ meaningful only to the front-end.
+
+ The designated type must be elaborated as well, if it does
+ not have its own freeze node. Designated (sub)types created
+ for constrained components of records with discriminants are
+ not frozen by the front-end and thus not elaborated by gigi,
+ because their use may appear before the base type is frozen,
+ and because it is not clear that they are needed anywhere in
+ gigi. With the current model, there is no correct place where
+ they could be elaborated. */
+
+ gnu_type = gnat_to_gnu_type (Etype (gnat_entity));
+ if (Is_Itype (Directly_Designated_Type (gnat_entity))
+ && !present_gnu_tree (Directly_Designated_Type (gnat_entity))
+ && Is_Frozen (Directly_Designated_Type (gnat_entity))
+ && No (Freeze_Node (Directly_Designated_Type (gnat_entity))))
+ {
+ /* If we are not defining this entity, and we have incomplete
+ entities being processed above us, make a dummy type and
+ elaborate it later. */
+ if (!definition && defer_incomplete_level != 0)
+ {
+ struct incomplete *p = XNEW (struct incomplete);
+
+ p->old_type
+ = make_dummy_type (Directly_Designated_Type (gnat_entity));
+ p->full_type = Directly_Designated_Type (gnat_entity);
+ p->next = defer_incomplete_list;
+ defer_incomplete_list = p;
+ }
+ else if (!IN (Ekind (Base_Type
+ (Directly_Designated_Type (gnat_entity))),
+ Incomplete_Or_Private_Kind))
+ gnat_to_gnu_entity (Directly_Designated_Type (gnat_entity),
+ NULL_TREE, 0);
+ }
+
+ maybe_present = true;
+ break;
+
+ /* Subprogram Entities
+
+ The following access functions are defined for subprograms:
+
+ Etype Return type or Standard_Void_Type.
+ First_Formal The first formal parameter.
+ Is_Imported Indicates that the subprogram has appeared in
+ an INTERFACE or IMPORT pragma. For now we
+ assume that the external language is C.
+ Is_Exported Likewise but for an EXPORT pragma.
+ Is_Inlined True if the subprogram is to be inlined.
+
+ Each parameter is first checked by calling must_pass_by_ref on its
+ type to determine if it is passed by reference. For parameters which
+ are copied in, if they are Ada In Out or Out parameters, their return
+ value becomes part of a record which becomes the return type of the
+ function (C function - note that this applies only to Ada procedures
+ so there is no Ada return type). Additional code to store back the
+ parameters will be generated on the caller side. This transformation
+ is done here, not in the front-end.
+
+ The intended result of the transformation can be seen from the
+ equivalent source rewritings that follow:
+
+ struct temp {int a,b};
+ procedure P (A,B: In Out ...) is temp P (int A,B)
+ begin {
+ .. ..
+ end P; return {A,B};
+ }
+
+ temp t;
+ P(X,Y); t = P(X,Y);
+ X = t.a , Y = t.b;
+
+ For subprogram types we need to perform mainly the same conversions to
+ GCC form that are needed for procedures and function declarations. The
+ only difference is that at the end, we make a type declaration instead
+ of a function declaration. */
+
+ case E_Subprogram_Type:
+ case E_Function:
+ case E_Procedure:
+ {
+ /* The type returned by a function or else Standard_Void_Type for a
+ procedure. */
+ Entity_Id gnat_return_type = Etype (gnat_entity);
+ tree gnu_return_type;
+ /* The first GCC parameter declaration (a PARM_DECL node). The
+ PARM_DECL nodes are chained through the DECL_CHAIN field, so this
+ actually is the head of this parameter list. */
+ tree gnu_param_list = NULL_TREE;
+ /* Likewise for the stub associated with an exported procedure. */
+ tree gnu_stub_param_list = NULL_TREE;
+ /* Non-null for subprograms containing parameters passed by copy-in
+ copy-out (Ada In Out or Out parameters not passed by reference),
+ in which case it is the list of nodes used to specify the values
+ of the In Out/Out parameters that are returned as a record upon
+ procedure return. The TREE_PURPOSE of an element of this list is
+ a field of the record and the TREE_VALUE is the PARM_DECL
+ corresponding to that field. This list will be saved in the
+ TYPE_CI_CO_LIST field of the FUNCTION_TYPE node we create. */
+ tree gnu_cico_list = NULL_TREE;
+ /* List of fields in return type of procedure with copy-in copy-out
+ parameters. */
+ tree gnu_field_list = NULL_TREE;
+ /* If an import pragma asks to map this subprogram to a GCC builtin,
+ this is the builtin DECL node. */
+ tree gnu_builtin_decl = NULL_TREE;
+ /* For the stub associated with an exported procedure. */
+ tree gnu_stub_type = NULL_TREE, gnu_stub_name = NULL_TREE;
+ tree gnu_ext_name = create_concat_name (gnat_entity, NULL);
+ Entity_Id gnat_param;
+ enum inline_status_t inline_status
+ = Has_Pragma_No_Inline (gnat_entity)
+ ? is_suppressed
+ : (Is_Inlined (gnat_entity) ? is_enabled : is_disabled);
+ bool public_flag = Is_Public (gnat_entity) || imported_p;
+ bool extern_flag
+ = (Is_Public (gnat_entity) && !definition) || imported_p;
+ bool artificial_flag = !Comes_From_Source (gnat_entity);
+ /* The semantics of "pure" in Ada essentially matches that of "const"
+ in the back-end. In particular, both properties are orthogonal to
+ the "nothrow" property if the EH circuitry is explicit in the
+ internal representation of the back-end. If we are to completely
+ hide the EH circuitry from it, we need to declare that calls to pure
+ Ada subprograms that can throw have side effects since they can
+ trigger an "abnormal" transfer of control flow; thus they can be
+ neither "const" nor "pure" in the back-end sense. */
+ bool const_flag
+ = (Exception_Mechanism == Back_End_Exceptions
+ && Is_Pure (gnat_entity));
+ bool volatile_flag = No_Return (gnat_entity);
+ bool return_by_direct_ref_p = false;
+ bool return_by_invisi_ref_p = false;
+ bool return_unconstrained_p = false;
+ bool has_stub = false;
+ int parmnum;
+
+ /* A parameter may refer to this type, so defer completion of any
+ incomplete types. */
+ if (kind == E_Subprogram_Type && !definition)
+ {
+ defer_incomplete_level++;
+ this_deferred = true;
+ }
+
+ /* If the subprogram has an alias, it is probably inherited, so
+ we can use the original one. If the original "subprogram"
+ is actually an enumeration literal, it may be the first use
+ of its type, so we must elaborate that type now. */
+ if (Present (Alias (gnat_entity)))
+ {
+ if (Ekind (Alias (gnat_entity)) == E_Enumeration_Literal)
+ gnat_to_gnu_entity (Etype (Alias (gnat_entity)), NULL_TREE, 0);
+
+ gnu_decl = gnat_to_gnu_entity (Alias (gnat_entity), gnu_expr, 0);
+
+ /* Elaborate any Itypes in the parameters of this entity. */
+ for (gnat_temp = First_Formal_With_Extras (gnat_entity);
+ Present (gnat_temp);
+ gnat_temp = Next_Formal_With_Extras (gnat_temp))
+ if (Is_Itype (Etype (gnat_temp)))
+ gnat_to_gnu_entity (Etype (gnat_temp), NULL_TREE, 0);
+
+ break;
+ }
+
+ /* If this subprogram is expectedly bound to a GCC builtin, fetch the
+ corresponding DECL node. Proper generation of calls later on need
+ proper parameter associations so we don't "break;" here. */
+ if (Convention (gnat_entity) == Convention_Intrinsic
+ && Present (Interface_Name (gnat_entity)))
+ {
+ gnu_builtin_decl = builtin_decl_for (gnu_ext_name);
+
+ /* Inability to find the builtin decl most often indicates a
+ genuine mistake, but imports of unregistered intrinsics are
+ sometimes issued on purpose to allow hooking in alternate
+ bodies. We post a warning conditioned on Wshadow in this case,
+ to let developers be notified on demand without risking false
+ positives with common default sets of options. */
+
+ if (gnu_builtin_decl == NULL_TREE && warn_shadow)
+ post_error ("?gcc intrinsic not found for&!", gnat_entity);
+ }
+
+ /* ??? What if we don't find the builtin node above ? warn ? err ?
+ In the current state we neither warn nor err, and calls will just
+ be handled as for regular subprograms. */
+
+ /* Look into the return type and get its associated GCC tree. If it
+ is not void, compute various flags for the subprogram type. */
+ if (Ekind (gnat_return_type) == E_Void)
+ gnu_return_type = void_type_node;
+ else
+ {
+ /* Ada 2012 (AI05-0151): Incomplete types coming from a limited
+ context may now appear in parameter and result profiles. If
+ we are only annotating types, break circularities here. */
+ if (type_annotate_only
+ && IN (Ekind (gnat_return_type), Incomplete_Kind)
+ && From_Limited_With (gnat_return_type)
+ && In_Extended_Main_Code_Unit
+ (Non_Limited_View (gnat_return_type))
+ && !present_gnu_tree (Non_Limited_View (gnat_return_type)))
+ gnu_return_type = ptr_void_type_node;
+ else
+ gnu_return_type = gnat_to_gnu_type (gnat_return_type);
+
+ /* If this function returns by reference, make the actual return
+ type the pointer type and make a note of that. */
+ if (Returns_By_Ref (gnat_entity))
+ {
+ gnu_return_type = build_pointer_type (gnu_return_type);
+ return_by_direct_ref_p = true;
+ }
+
+ /* If we are supposed to return an unconstrained array type, make
+ the actual return type the fat pointer type. */
+ else if (TREE_CODE (gnu_return_type) == UNCONSTRAINED_ARRAY_TYPE)
+ {
+ gnu_return_type = TREE_TYPE (gnu_return_type);
+ return_unconstrained_p = true;
+ }
+
+ /* Likewise, if the return type requires a transient scope, the
+ return value will be allocated on the secondary stack so the
+ actual return type is the pointer type. */
+ else if (Requires_Transient_Scope (gnat_return_type))
+ {
+ gnu_return_type = build_pointer_type (gnu_return_type);
+ return_unconstrained_p = true;
+ }
+
+ /* If the Mechanism is By_Reference, ensure this function uses the
+ target's by-invisible-reference mechanism, which may not be the
+ same as above (e.g. it might be passing an extra parameter). */
+ else if (kind == E_Function
+ && Mechanism (gnat_entity) == By_Reference)
+ return_by_invisi_ref_p = true;
+
+ /* Likewise, if the return type is itself By_Reference. */
+ else if (TYPE_IS_BY_REFERENCE_P (gnu_return_type))
+ return_by_invisi_ref_p = true;
+
+ /* If the type is a padded type and the underlying type would not
+ be passed by reference or the function has a foreign convention,
+ return the underlying type. */
+ else if (TYPE_IS_PADDING_P (gnu_return_type)
+ && (!default_pass_by_ref
+ (TREE_TYPE (TYPE_FIELDS (gnu_return_type)))
+ || Has_Foreign_Convention (gnat_entity)))
+ gnu_return_type = TREE_TYPE (TYPE_FIELDS (gnu_return_type));
+
+ /* If the return type is unconstrained, that means it must have a
+ maximum size. Use the padded type as the effective return type.
+ And ensure the function uses the target's by-invisible-reference
+ mechanism to avoid copying too much data when it returns. */
+ if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_return_type)))
+ {
+ tree orig_type = gnu_return_type;
+
+ gnu_return_type
+ = maybe_pad_type (gnu_return_type,
+ max_size (TYPE_SIZE (gnu_return_type),
+ true),
+ 0, gnat_entity, false, false, false, true);
+
+ /* Declare it now since it will never be declared otherwise.
+ This is necessary to ensure that its subtrees are properly
+ marked. */
+ if (gnu_return_type != orig_type
+ && !DECL_P (TYPE_NAME (gnu_return_type)))
+ create_type_decl (TYPE_NAME (gnu_return_type),
+ gnu_return_type, true, debug_info_p,
+ gnat_entity);
+
+ return_by_invisi_ref_p = true;
+ }
+
+ /* If the return type has a size that overflows, we cannot have
+ a function that returns that type. This usage doesn't make
+ sense anyway, so give an error here. */
+ if (TYPE_SIZE_UNIT (gnu_return_type)
+ && TREE_CODE (TYPE_SIZE_UNIT (gnu_return_type)) == INTEGER_CST
+ && !valid_constant_size_p (TYPE_SIZE_UNIT (gnu_return_type)))
+ {
+ post_error ("cannot return type whose size overflows",
+ gnat_entity);
+ gnu_return_type = copy_node (gnu_return_type);
+ TYPE_SIZE (gnu_return_type) = bitsize_zero_node;
+ TYPE_SIZE_UNIT (gnu_return_type) = size_zero_node;
+ TYPE_MAIN_VARIANT (gnu_return_type) = gnu_return_type;
+ TYPE_NEXT_VARIANT (gnu_return_type) = NULL_TREE;
+ }
+ }
+
+ /* Loop over the parameters and get their associated GCC tree. While
+ doing this, build a copy-in copy-out structure if we need one. */
+ for (gnat_param = First_Formal_With_Extras (gnat_entity), parmnum = 0;
+ Present (gnat_param);
+ gnat_param = Next_Formal_With_Extras (gnat_param), parmnum++)
+ {
+ Entity_Id gnat_param_type = Etype (gnat_param);
+ tree gnu_param_name = get_entity_name (gnat_param);
+ tree gnu_param_type, gnu_param, gnu_field;
+ Mechanism_Type mech = Mechanism (gnat_param);
+ bool copy_in_copy_out = false, fake_param_type;
+
+ /* Ada 2012 (AI05-0151): Incomplete types coming from a limited
+ context may now appear in parameter and result profiles. If
+ we are only annotating types, break circularities here. */
+ if (type_annotate_only
+ && IN (Ekind (gnat_param_type), Incomplete_Kind)
+ && From_Limited_With (Etype (gnat_param_type))
+ && In_Extended_Main_Code_Unit
+ (Non_Limited_View (gnat_param_type))
+ && !present_gnu_tree (Non_Limited_View (gnat_param_type)))
+ {
+ gnu_param_type = ptr_void_type_node;
+ fake_param_type = true;
+ }
+ else
+ {
+ gnu_param_type = gnat_to_gnu_type (gnat_param_type);
+ fake_param_type = false;
+ }
+
+ /* Builtins are expanded inline and there is no real call sequence
+ involved. So the type expected by the underlying expander is
+ always the type of each argument "as is". */
+ if (gnu_builtin_decl)
+ mech = By_Copy;
+ /* Handle the first parameter of a valued procedure specially. */
+ else if (Is_Valued_Procedure (gnat_entity) && parmnum == 0)
+ mech = By_Copy_Return;
+ /* Otherwise, see if a Mechanism was supplied that forced this
+ parameter to be passed one way or another. */
+ else if (mech == Default
+ || mech == By_Copy || mech == By_Reference)
+ ;
+ else if (By_Descriptor_Last <= mech && mech <= By_Descriptor)
+ mech = By_Descriptor;
+
+ else if (By_Short_Descriptor_Last <= mech &&
+ mech <= By_Short_Descriptor)
+ mech = By_Short_Descriptor;
+
+ else if (mech > 0)
+ {
+ if (TREE_CODE (gnu_param_type) == UNCONSTRAINED_ARRAY_TYPE
+ || TREE_CODE (TYPE_SIZE (gnu_param_type)) != INTEGER_CST
+ || 0 < compare_tree_int (TYPE_SIZE (gnu_param_type),
+ mech))
+ mech = By_Reference;
+ else
+ mech = By_Copy;
+ }
+ else
+ {
+ post_error ("unsupported mechanism for&", gnat_param);
+ mech = Default;
+ }
+
+ /* Do not call gnat_to_gnu_param for a fake parameter type since
+ it will try to use the real type again. */
+ if (fake_param_type)
+ {
+ if (Ekind (gnat_param) == E_Out_Parameter)
+ gnu_param = NULL_TREE;
+ else
+ {
+ gnu_param
+ = create_param_decl (gnu_param_name, gnu_param_type,
+ false);
+ Set_Mechanism (gnat_param,
+ mech == Default ? By_Copy : mech);
+ if (Ekind (gnat_param) == E_In_Out_Parameter)
+ copy_in_copy_out = true;
+ }
+ }
+ else
+ gnu_param
+ = gnat_to_gnu_param (gnat_param, mech, gnat_entity,
+ Has_Foreign_Convention (gnat_entity),
+ &copy_in_copy_out);
+
+ /* We are returned either a PARM_DECL or a type if no parameter
+ needs to be passed; in either case, adjust the type. */
+ if (DECL_P (gnu_param))
+ gnu_param_type = TREE_TYPE (gnu_param);
+ else
+ {
+ gnu_param_type = gnu_param;
+ gnu_param = NULL_TREE;
+ }
+
+ /* The failure of this assertion will very likely come from an
+ order of elaboration issue for the type of the parameter. */
+ gcc_assert (kind == E_Subprogram_Type
+ || !TYPE_IS_DUMMY_P (gnu_param_type)
+ || type_annotate_only);
+
+ if (gnu_param)
+ {
+ /* If it's an exported subprogram, we build a parameter list
+ in parallel, in case we need to emit a stub for it. */
+ if (Is_Exported (gnat_entity))
+ {
+ gnu_stub_param_list
+ = chainon (gnu_param, gnu_stub_param_list);
+ /* Change By_Descriptor parameter to By_Reference for
+ the internal version of an exported subprogram. */
+ if (mech == By_Descriptor || mech == By_Short_Descriptor)
+ {
+ gnu_param
+ = gnat_to_gnu_param (gnat_param, By_Reference,
+ gnat_entity, false,
+ &copy_in_copy_out);
+ has_stub = true;
+ }
+ else
+ gnu_param = copy_node (gnu_param);
+ }
+
+ gnu_param_list = chainon (gnu_param, gnu_param_list);
+ Sloc_to_locus (Sloc (gnat_param),
+ &DECL_SOURCE_LOCATION (gnu_param));
+ save_gnu_tree (gnat_param, gnu_param, false);
+
+ /* If a parameter is a pointer, this function may modify
+ memory through it and thus shouldn't be considered
+ a const function. Also, the memory may be modified
+ between two calls, so they can't be CSE'ed. The latter
+ case also handles by-ref parameters. */
+ if (POINTER_TYPE_P (gnu_param_type)
+ || TYPE_IS_FAT_POINTER_P (gnu_param_type))
+ const_flag = false;
+ }
+
+ if (copy_in_copy_out)
+ {
+ if (!gnu_cico_list)
+ {
+ tree gnu_new_ret_type = make_node (RECORD_TYPE);
+
+ /* If this is a function, we also need a field for the
+ return value to be placed. */
+ if (TREE_CODE (gnu_return_type) != VOID_TYPE)
+ {
+ gnu_field
+ = create_field_decl (get_identifier ("RETVAL"),
+ gnu_return_type,
+ gnu_new_ret_type, NULL_TREE,
+ NULL_TREE, 0, 0);
+ Sloc_to_locus (Sloc (gnat_entity),
+ &DECL_SOURCE_LOCATION (gnu_field));
+ gnu_field_list = gnu_field;
+ gnu_cico_list
+ = tree_cons (gnu_field, void_type_node, NULL_TREE);
+ }
+
+ gnu_return_type = gnu_new_ret_type;
+ TYPE_NAME (gnu_return_type) = get_identifier ("RETURN");
+ /* Set a default alignment to speed up accesses. But we
+ shouldn't increase the size of the structure too much,
+ lest it doesn't fit in return registers anymore. */
+ TYPE_ALIGN (gnu_return_type)
+ = get_mode_alignment (ptr_mode);
+ }
+
+ gnu_field
+ = create_field_decl (gnu_param_name, gnu_param_type,
+ gnu_return_type, NULL_TREE, NULL_TREE,
+ 0, 0);
+ Sloc_to_locus (Sloc (gnat_param),
+ &DECL_SOURCE_LOCATION (gnu_field));
+ DECL_CHAIN (gnu_field) = gnu_field_list;
+ gnu_field_list = gnu_field;
+ gnu_cico_list
+ = tree_cons (gnu_field, gnu_param, gnu_cico_list);
+ }
+ }
+
+ if (gnu_cico_list)
+ {
+ /* If we have a CICO list but it has only one entry, we convert
+ this function into a function that returns this object. */
+ if (list_length (gnu_cico_list) == 1)
+ gnu_return_type = TREE_TYPE (TREE_PURPOSE (gnu_cico_list));
+
+ /* Do not finalize the return type if the subprogram is stubbed
+ since structures are incomplete for the back-end. */
+ else if (Convention (gnat_entity) != Convention_Stubbed)
+ {
+ finish_record_type (gnu_return_type, nreverse (gnu_field_list),
+ 0, false);
+
+ /* Try to promote the mode of the return type if it is passed
+ in registers, again to speed up accesses. */
+ if (TYPE_MODE (gnu_return_type) == BLKmode
+ && !targetm.calls.return_in_memory (gnu_return_type,
+ NULL_TREE))
+ {
+ unsigned int size
+ = TREE_INT_CST_LOW (TYPE_SIZE (gnu_return_type));
+ unsigned int i = BITS_PER_UNIT;
+ enum machine_mode mode;
+
+ while (i < size)
+ i <<= 1;
+ mode = mode_for_size (i, MODE_INT, 0);
+ if (mode != BLKmode)
+ {
+ SET_TYPE_MODE (gnu_return_type, mode);
+ TYPE_ALIGN (gnu_return_type)
+ = GET_MODE_ALIGNMENT (mode);
+ TYPE_SIZE (gnu_return_type)
+ = bitsize_int (GET_MODE_BITSIZE (mode));
+ TYPE_SIZE_UNIT (gnu_return_type)
+ = size_int (GET_MODE_SIZE (mode));
+ }
+ }
+
+ if (debug_info_p)
+ rest_of_record_type_compilation (gnu_return_type);
+ }
+ }
+
+ /* Deal with platform-specific calling conventions. */
+ if (Has_Stdcall_Convention (gnat_entity))
+ prepend_one_attribute
+ (&attr_list, ATTR_MACHINE_ATTRIBUTE,
+ get_identifier ("stdcall"), NULL_TREE,
+ gnat_entity);
+ else if (Has_Thiscall_Convention (gnat_entity))
+ prepend_one_attribute
+ (&attr_list, ATTR_MACHINE_ATTRIBUTE,
+ get_identifier ("thiscall"), NULL_TREE,
+ gnat_entity);
+
+ /* If we should request stack realignment for a foreign convention
+ subprogram, do so. Note that this applies to task entry points
+ in particular. */
+ if (FOREIGN_FORCE_REALIGN_STACK
+ && Has_Foreign_Convention (gnat_entity))
+ prepend_one_attribute
+ (&attr_list, ATTR_MACHINE_ATTRIBUTE,
+ get_identifier ("force_align_arg_pointer"), NULL_TREE,
+ gnat_entity);
+
+ /* Deal with a pragma Linker_Section on a subprogram. */
+ if ((kind == E_Function || kind == E_Procedure)
+ && Present (Linker_Section_Pragma (gnat_entity)))
+ prepend_one_attribute_pragma (&attr_list,
+ Linker_Section_Pragma (gnat_entity));
+
+ /* The lists have been built in reverse. */
+ gnu_param_list = nreverse (gnu_param_list);
+ if (has_stub)
+ gnu_stub_param_list = nreverse (gnu_stub_param_list);
+ gnu_cico_list = nreverse (gnu_cico_list);
+
+ if (kind == E_Function)
+ Set_Mechanism (gnat_entity, return_unconstrained_p
+ || return_by_direct_ref_p
+ || return_by_invisi_ref_p
+ ? By_Reference : By_Copy);
+ gnu_type
+ = create_subprog_type (gnu_return_type, gnu_param_list,
+ gnu_cico_list, return_unconstrained_p,
+ return_by_direct_ref_p,
+ return_by_invisi_ref_p);
+
+ if (has_stub)
+ gnu_stub_type
+ = create_subprog_type (gnu_return_type, gnu_stub_param_list,
+ gnu_cico_list, return_unconstrained_p,
+ return_by_direct_ref_p,
+ return_by_invisi_ref_p);
+
+ /* A subprogram (something that doesn't return anything) shouldn't
+ be considered const since there would be no reason for such a
+ subprogram. Note that procedures with Out (or In Out) parameters
+ have already been converted into a function with a return type. */
+ if (TREE_CODE (gnu_return_type) == VOID_TYPE)
+ const_flag = false;
+
+ gnu_type
+ = build_qualified_type (gnu_type,
+ TYPE_QUALS (gnu_type)
+ | (TYPE_QUAL_CONST * const_flag)
+ | (TYPE_QUAL_VOLATILE * volatile_flag));
+
+ if (has_stub)
+ gnu_stub_type
+ = build_qualified_type (gnu_stub_type,
+ TYPE_QUALS (gnu_stub_type)
+ | (TYPE_QUAL_CONST * const_flag)
+ | (TYPE_QUAL_VOLATILE * volatile_flag));
+
+ /* If we have a builtin decl for that function, use it. Check if the
+ profiles are compatible and warn if they are not. The checker is
+ expected to post extra diagnostics in this case. */
+ if (gnu_builtin_decl)
+ {
+ intrin_binding_t inb;
+
+ inb.gnat_entity = gnat_entity;
+ inb.ada_fntype = gnu_type;
+ inb.btin_fntype = TREE_TYPE (gnu_builtin_decl);
+
+ if (!intrin_profiles_compatible_p (&inb))
+ post_error
+ ("?profile of& doesn''t match the builtin it binds!",
+ gnat_entity);
+
+ gnu_decl = gnu_builtin_decl;
+ gnu_type = TREE_TYPE (gnu_builtin_decl);
+ break;
+ }
+
+ /* If there was no specified Interface_Name and the external and
+ internal names of the subprogram are the same, only use the
+ internal name to allow disambiguation of nested subprograms. */
+ if (No (Interface_Name (gnat_entity))
+ && gnu_ext_name == gnu_entity_name)
+ gnu_ext_name = NULL_TREE;
+
+ /* If we are defining the subprogram and it has an Address clause
+ we must get the address expression from the saved GCC tree for the
+ subprogram if it has a Freeze_Node. Otherwise, we elaborate
+ the address expression here since the front-end has guaranteed
+ in that case that the elaboration has no effects. If there is
+ an Address clause and we are not defining the object, just
+ make it a constant. */
+ if (Present (Address_Clause (gnat_entity)))
+ {
+ tree gnu_address = NULL_TREE;
+
+ if (definition)
+ gnu_address
+ = (present_gnu_tree (gnat_entity)
+ ? get_gnu_tree (gnat_entity)
+ : gnat_to_gnu (Expression (Address_Clause (gnat_entity))));
+
+ save_gnu_tree (gnat_entity, NULL_TREE, false);
+
+ /* Convert the type of the object to a reference type that can
+ alias everything as per 13.3(19). */
+ gnu_type
+ = build_reference_type_for_mode (gnu_type, ptr_mode, true);
+ if (gnu_address)
+ gnu_address = convert (gnu_type, gnu_address);
+
+ gnu_decl
+ = create_var_decl (gnu_entity_name, gnu_ext_name, gnu_type,
+ gnu_address, false, Is_Public (gnat_entity),
+ extern_flag, false, NULL, gnat_entity);
+ DECL_BY_REF_P (gnu_decl) = 1;
+ }
+
+ else if (kind == E_Subprogram_Type)
+ {
+ process_attributes (&gnu_type, &attr_list, false, gnat_entity);
+ gnu_decl
+ = create_type_decl (gnu_entity_name, gnu_type, artificial_flag,
+ debug_info_p, gnat_entity);
+ }
+ else
+ {
+ if (has_stub)
+ {
+ gnu_stub_name = gnu_ext_name;
+ gnu_ext_name = create_concat_name (gnat_entity, "internal");
+ public_flag = false;
+ artificial_flag = true;
+ }
+
+ gnu_decl
+ = create_subprog_decl (gnu_entity_name, gnu_ext_name, gnu_type,
+ gnu_param_list, inline_status,
+ public_flag, extern_flag, artificial_flag,
+ attr_list, gnat_entity);
+ if (has_stub)
+ {
+ tree gnu_stub_decl
+ = create_subprog_decl (gnu_entity_name, gnu_stub_name,
+ gnu_stub_type, gnu_stub_param_list,
+ inline_status, true, extern_flag,
+ false, attr_list, gnat_entity);
+ SET_DECL_FUNCTION_STUB (gnu_decl, gnu_stub_decl);
+ }
+
+ /* This is unrelated to the stub built right above. */
+ DECL_STUBBED_P (gnu_decl)
+ = Convention (gnat_entity) == Convention_Stubbed;
+ }
+ }
+ break;
+
+ case E_Incomplete_Type:
+ case E_Incomplete_Subtype:
+ case E_Private_Type:
+ case E_Private_Subtype:
+ case E_Limited_Private_Type:
+ case E_Limited_Private_Subtype:
+ case E_Record_Type_With_Private:
+ case E_Record_Subtype_With_Private:
+ {
+ /* Get the "full view" of this entity. If this is an incomplete
+ entity from a limited with, treat its non-limited view as the
+ full view. Otherwise, use either the full view or the underlying
+ full view, whichever is present. This is used in all the tests
+ below. */
+ Entity_Id full_view
+ = (IN (kind, Incomplete_Kind) && From_Limited_With (gnat_entity))
+ ? Non_Limited_View (gnat_entity)
+ : Present (Full_View (gnat_entity))
+ ? Full_View (gnat_entity)
+ : Underlying_Full_View (gnat_entity);
+
+ /* If this is an incomplete type with no full view, it must be a Taft
+ Amendment type, in which case we return a dummy type. Otherwise,
+ just get the type from its Etype. */
+ if (No (full_view))
+ {
+ if (kind == E_Incomplete_Type)
+ {
+ gnu_type = make_dummy_type (gnat_entity);
+ gnu_decl = TYPE_STUB_DECL (gnu_type);
+ }
+ else
+ {
+ gnu_decl = gnat_to_gnu_entity (Etype (gnat_entity),
+ NULL_TREE, 0);
+ maybe_present = true;
+ }
+ break;
+ }
+
+ /* If we already made a type for the full view, reuse it. */
+ else if (present_gnu_tree (full_view))
+ {
+ gnu_decl = get_gnu_tree (full_view);
+ break;
+ }
+
+ /* Otherwise, if we are not defining the type now, get the type
+ from the full view. But always get the type from the full view
+ for define on use types, since otherwise we won't see them! */
+ else if (!definition
+ || (Is_Itype (full_view) && No (Freeze_Node (gnat_entity)))
+ || (Is_Itype (gnat_entity) && No (Freeze_Node (full_view))))
+ {
+ gnu_decl = gnat_to_gnu_entity (full_view, NULL_TREE, 0);
+ maybe_present = true;
+ break;
+ }
+
+ /* For incomplete types, make a dummy type entry which will be
+ replaced later. Save it as the full declaration's type so
+ we can do any needed updates when we see it. */
+ gnu_type = make_dummy_type (gnat_entity);
+ gnu_decl = TYPE_STUB_DECL (gnu_type);
+ if (Has_Completion_In_Body (gnat_entity))
+ DECL_TAFT_TYPE_P (gnu_decl) = 1;
+ save_gnu_tree (full_view, gnu_decl, 0);
+ break;
+ }
+
+ case E_Class_Wide_Type:
+ /* Class-wide types are always transformed into their root type. */
+ gnu_decl = gnat_to_gnu_entity (gnat_equiv_type, NULL_TREE, 0);
+ maybe_present = true;
+ break;
+
+ case E_Task_Type:
+ case E_Task_Subtype:
+ case E_Protected_Type:
+ case E_Protected_Subtype:
+ /* Concurrent types are always transformed into their record type. */
+ if (type_annotate_only && No (gnat_equiv_type))
+ gnu_type = void_type_node;
+ else
+ gnu_decl = gnat_to_gnu_entity (gnat_equiv_type, NULL_TREE, 0);
+ maybe_present = true;
+ break;
+
+ case E_Label:
+ gnu_decl = create_label_decl (gnu_entity_name, gnat_entity);
+ break;
+
+ case E_Block:
+ case E_Loop:
+ /* Nothing at all to do here, so just return an ERROR_MARK and claim
+ we've already saved it, so we don't try to. */
+ gnu_decl = error_mark_node;
+ saved = true;
+ break;
+
+ case E_Abstract_State:
+ /* This is a SPARK annotation that only reaches here when compiling in
+ ASIS mode and has no characteristics to annotate. */
+ gcc_assert (type_annotate_only);
+ return error_mark_node;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* If we had a case where we evaluated another type and it might have
+ defined this one, handle it here. */
+ if (maybe_present && present_gnu_tree (gnat_entity))
+ {
+ gnu_decl = get_gnu_tree (gnat_entity);
+ saved = true;
+ }
+
+ /* If we are processing a type and there is either no decl for it or
+ we just made one, do some common processing for the type, such as
+ handling alignment and possible padding. */
+ if (is_type && (!gnu_decl || this_made_decl))
+ {
+ /* Process the attributes, if not already done. Note that the type is
+ already defined so we cannot pass true for IN_PLACE here. */
+ process_attributes (&gnu_type, &attr_list, false, gnat_entity);
+
+ /* Tell the middle-end that objects of tagged types are guaranteed to
+ be properly aligned. This is necessary because conversions to the
+ class-wide type are translated into conversions to the root type,
+ which can be less aligned than some of its derived types. */
+ if (Is_Tagged_Type (gnat_entity)
+ || Is_Class_Wide_Equivalent_Type (gnat_entity))
+ TYPE_ALIGN_OK (gnu_type) = 1;
+
+ /* Record whether the type is passed by reference. */
+ if (!VOID_TYPE_P (gnu_type) && Is_By_Reference_Type (gnat_entity))
+ TYPE_BY_REFERENCE_P (gnu_type) = 1;
+
+ /* ??? Don't set the size for a String_Literal since it is either
+ confirming or we don't handle it properly (if the low bound is
+ non-constant). */
+ if (!gnu_size && kind != E_String_Literal_Subtype)
+ {
+ Uint gnat_size = Known_Esize (gnat_entity)
+ ? Esize (gnat_entity) : RM_Size (gnat_entity);
+ gnu_size
+ = validate_size (gnat_size, gnu_type, gnat_entity, TYPE_DECL,
+ false, Has_Size_Clause (gnat_entity));
+ }
+
+ /* If a size was specified, see if we can make a new type of that size
+ by rearranging the type, for example from a fat to a thin pointer. */
+ if (gnu_size)
+ {
+ gnu_type
+ = make_type_from_size (gnu_type, gnu_size,
+ Has_Biased_Representation (gnat_entity));
+
+ if (operand_equal_p (TYPE_SIZE (gnu_type), gnu_size, 0)
+ && operand_equal_p (rm_size (gnu_type), gnu_size, 0))
+ gnu_size = NULL_TREE;
+ }
+
+ /* If the alignment hasn't already been processed and this is
+ not an unconstrained array, see if an alignment is specified.
+ If not, we pick a default alignment for atomic objects. */
+ if (align != 0 || TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE)
+ ;
+ else if (Known_Alignment (gnat_entity))
+ {
+ align = validate_alignment (Alignment (gnat_entity), gnat_entity,
+ TYPE_ALIGN (gnu_type));
+
+ /* Warn on suspiciously large alignments. This should catch
+ errors about the (alignment,byte)/(size,bit) discrepancy. */
+ if (align > BIGGEST_ALIGNMENT && Has_Alignment_Clause (gnat_entity))
+ {
+ tree size;
+
+ /* If a size was specified, take it into account. Otherwise
+ use the RM size for records or unions as the type size has
+ already been adjusted to the alignment. */
+ if (gnu_size)
+ size = gnu_size;
+ else if (RECORD_OR_UNION_TYPE_P (gnu_type)
+ && !TYPE_FAT_POINTER_P (gnu_type))
+ size = rm_size (gnu_type);
+ else
+ size = TYPE_SIZE (gnu_type);
+
+ /* Consider an alignment as suspicious if the alignment/size
+ ratio is greater or equal to the byte/bit ratio. */
+ if (tree_fits_uhwi_p (size)
+ && align >= tree_to_uhwi (size) * BITS_PER_UNIT)
+ post_error_ne ("?suspiciously large alignment specified for&",
+ Expression (Alignment_Clause (gnat_entity)),
+ gnat_entity);
+ }
+ }
+ else if (Is_Atomic (gnat_entity) && !gnu_size
+ && tree_fits_uhwi_p (TYPE_SIZE (gnu_type))
+ && integer_pow2p (TYPE_SIZE (gnu_type)))
+ align = MIN (BIGGEST_ALIGNMENT,
+ tree_to_uhwi (TYPE_SIZE (gnu_type)));
+ else if (Is_Atomic (gnat_entity) && gnu_size
+ && tree_fits_uhwi_p (gnu_size)
+ && integer_pow2p (gnu_size))
+ align = MIN (BIGGEST_ALIGNMENT, tree_to_uhwi (gnu_size));
+
+ /* See if we need to pad the type. If we did, and made a record,
+ the name of the new type may be changed. So get it back for
+ us when we make the new TYPE_DECL below. */
+ if (gnu_size || align > 0)
+ gnu_type = maybe_pad_type (gnu_type, gnu_size, align, gnat_entity,
+ false, !gnu_decl, definition, false);
+
+ if (TYPE_IS_PADDING_P (gnu_type))
+ {
+ gnu_entity_name = TYPE_NAME (gnu_type);
+ if (TREE_CODE (gnu_entity_name) == TYPE_DECL)
+ gnu_entity_name = DECL_NAME (gnu_entity_name);
+ }
+
+ /* Now set the RM size of the type. We cannot do it before padding
+ because we need to accept arbitrary RM sizes on integral types. */
+ set_rm_size (RM_Size (gnat_entity), gnu_type, gnat_entity);
+
+ /* If we are at global level, GCC will have applied variable_size to
+ the type, but that won't have done anything. So, if it's not
+ a constant or self-referential, call elaborate_expression_1 to
+ make a variable for the size rather than calculating it each time.
+ Handle both the RM size and the actual size. */
+ if (global_bindings_p ()
+ && TYPE_SIZE (gnu_type)
+ && !TREE_CONSTANT (TYPE_SIZE (gnu_type))
+ && !CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_type)))
+ {
+ tree size = TYPE_SIZE (gnu_type);
+
+ TYPE_SIZE (gnu_type)
+ = elaborate_expression_1 (size, gnat_entity,
+ get_identifier ("SIZE"),
+ definition, false);
+
+ /* ??? For now, store the size as a multiple of the alignment in
+ bytes so that we can see the alignment from the tree. */
+ TYPE_SIZE_UNIT (gnu_type)
+ = elaborate_expression_2 (TYPE_SIZE_UNIT (gnu_type), gnat_entity,
+ get_identifier ("SIZE_A_UNIT"),
+ definition, false,
+ TYPE_ALIGN (gnu_type));
+
+ /* ??? gnu_type may come from an existing type so the MULT_EXPR node
+ may not be marked by the call to create_type_decl below. */
+ MARK_VISITED (TYPE_SIZE_UNIT (gnu_type));
+
+ if (TREE_CODE (gnu_type) == RECORD_TYPE)
+ {
+ tree variant_part = get_variant_part (gnu_type);
+ tree ada_size = TYPE_ADA_SIZE (gnu_type);
+
+ if (variant_part)
+ {
+ tree union_type = TREE_TYPE (variant_part);
+ tree offset = DECL_FIELD_OFFSET (variant_part);
+
+ /* If the position of the variant part is constant, subtract
+ it from the size of the type of the parent to get the new
+ size. This manual CSE reduces the data size. */
+ if (TREE_CODE (offset) == INTEGER_CST)
+ {
+ tree bitpos = DECL_FIELD_BIT_OFFSET (variant_part);
+ TYPE_SIZE (union_type)
+ = size_binop (MINUS_EXPR, TYPE_SIZE (gnu_type),
+ bit_from_pos (offset, bitpos));
+ TYPE_SIZE_UNIT (union_type)
+ = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (gnu_type),
+ byte_from_pos (offset, bitpos));
+ }
+ else
+ {
+ TYPE_SIZE (union_type)
+ = elaborate_expression_1 (TYPE_SIZE (union_type),
+ gnat_entity,
+ get_identifier ("VSIZE"),
+ definition, false);
+
+ /* ??? For now, store the size as a multiple of the
+ alignment in bytes so that we can see the alignment
+ from the tree. */
+ TYPE_SIZE_UNIT (union_type)
+ = elaborate_expression_2 (TYPE_SIZE_UNIT (union_type),
+ gnat_entity,
+ get_identifier
+ ("VSIZE_A_UNIT"),
+ definition, false,
+ TYPE_ALIGN (union_type));
+
+ /* ??? For now, store the offset as a multiple of the
+ alignment in bytes so that we can see the alignment
+ from the tree. */
+ DECL_FIELD_OFFSET (variant_part)
+ = elaborate_expression_2 (offset,
+ gnat_entity,
+ get_identifier ("VOFFSET"),
+ definition, false,
+ DECL_OFFSET_ALIGN
+ (variant_part));
+ }
+
+ DECL_SIZE (variant_part) = TYPE_SIZE (union_type);
+ DECL_SIZE_UNIT (variant_part) = TYPE_SIZE_UNIT (union_type);
+ }
+
+ if (operand_equal_p (ada_size, size, 0))
+ ada_size = TYPE_SIZE (gnu_type);
+ else
+ ada_size
+ = elaborate_expression_1 (ada_size, gnat_entity,
+ get_identifier ("RM_SIZE"),
+ definition, false);
+ SET_TYPE_ADA_SIZE (gnu_type, ada_size);
+ }
+ }
+
+ /* If this is a record type or subtype, call elaborate_expression_2 on
+ any field position. Do this for both global and local types.
+ Skip any fields that we haven't made trees for to avoid problems with
+ class wide types. */
+ if (IN (kind, Record_Kind))
+ for (gnat_temp = First_Entity (gnat_entity); Present (gnat_temp);
+ gnat_temp = Next_Entity (gnat_temp))
+ if (Ekind (gnat_temp) == E_Component && present_gnu_tree (gnat_temp))
+ {
+ tree gnu_field = get_gnu_tree (gnat_temp);
+
+ /* ??? For now, store the offset as a multiple of the alignment
+ in bytes so that we can see the alignment from the tree. */
+ if (!CONTAINS_PLACEHOLDER_P (DECL_FIELD_OFFSET (gnu_field)))
+ {
+ DECL_FIELD_OFFSET (gnu_field)
+ = elaborate_expression_2 (DECL_FIELD_OFFSET (gnu_field),
+ gnat_temp,
+ get_identifier ("OFFSET"),
+ definition, false,
+ DECL_OFFSET_ALIGN (gnu_field));
+
+ /* ??? The context of gnu_field is not necessarily gnu_type
+ so the MULT_EXPR node built above may not be marked by
+ the call to create_type_decl below. */
+ if (global_bindings_p ())
+ MARK_VISITED (DECL_FIELD_OFFSET (gnu_field));
+ }
+ }
+
+ if (Treat_As_Volatile (gnat_entity))
+ gnu_type
+ = build_qualified_type (gnu_type,
+ TYPE_QUALS (gnu_type) | TYPE_QUAL_VOLATILE);
+
+ if (Is_Atomic (gnat_entity))
+ check_ok_for_atomic (gnu_type, gnat_entity, false);
+
+ if (Present (Alignment_Clause (gnat_entity)))
+ TYPE_USER_ALIGN (gnu_type) = 1;
+
+ if (Universal_Aliasing (gnat_entity))
+ TYPE_UNIVERSAL_ALIASING_P (TYPE_MAIN_VARIANT (gnu_type)) = 1;
+
+ if (!gnu_decl)
+ gnu_decl = create_type_decl (gnu_entity_name, gnu_type,
+ !Comes_From_Source (gnat_entity),
+ debug_info_p, gnat_entity);
+ else
+ {
+ TREE_TYPE (gnu_decl) = gnu_type;
+ TYPE_STUB_DECL (gnu_type) = gnu_decl;
+ }
+ }
+
+ if (is_type && !TYPE_IS_DUMMY_P (TREE_TYPE (gnu_decl)))
+ {
+ gnu_type = TREE_TYPE (gnu_decl);
+
+ /* If this is a derived type, relate its alias set to that of its parent
+ to avoid troubles when a call to an inherited primitive is inlined in
+ a context where a derived object is accessed. The inlined code works
+ on the parent view so the resulting code may access the same object
+ using both the parent and the derived alias sets, which thus have to
+ conflict. As the same issue arises with component references, the
+ parent alias set also has to conflict with composite types enclosing
+ derived components. For instance, if we have:
+
+ type D is new T;
+ type R is record
+ Component : D;
+ end record;
+
+ we want T to conflict with both D and R, in addition to R being a
+ superset of D by record/component construction.
+
+ One way to achieve this is to perform an alias set copy from the
+ parent to the derived type. This is not quite appropriate, though,
+ as we don't want separate derived types to conflict with each other:
+
+ type I1 is new Integer;
+ type I2 is new Integer;
+
+ We want I1 and I2 to both conflict with Integer but we do not want
+ I1 to conflict with I2, and an alias set copy on derivation would
+ have that effect.
+
+ The option chosen is to make the alias set of the derived type a
+ superset of that of its parent type. It trivially fulfills the
+ simple requirement for the Integer derivation example above, and
+ the component case as well by superset transitivity:
+
+ superset superset
+ R ----------> D ----------> T
+
+ However, for composite types, conversions between derived types are
+ translated into VIEW_CONVERT_EXPRs so a sequence like:
+
+ type Comp1 is new Comp;
+ type Comp2 is new Comp;
+ procedure Proc (C : Comp1);
+
+ C : Comp2;
+ Proc (Comp1 (C));
+
+ is translated into:
+
+ C : Comp2;
+ Proc ((Comp1 &) &VIEW_CONVERT_EXPR <Comp1> (C));
+
+ and gimplified into:
+
+ C : Comp2;
+ Comp1 *C.0;
+ C.0 = (Comp1 *) &C;
+ Proc (C.0);
+
+ i.e. generates code involving type punning. Therefore, Comp1 needs
+ to conflict with Comp2 and an alias set copy is required.
+
+ The language rules ensure the parent type is already frozen here. */
+ if (Is_Derived_Type (gnat_entity) && !type_annotate_only)
+ {
+ tree gnu_parent_type = gnat_to_gnu_type (Etype (gnat_entity));
+ relate_alias_sets (gnu_type, gnu_parent_type,
+ Is_Composite_Type (gnat_entity)
+ ? ALIAS_SET_COPY : ALIAS_SET_SUPERSET);
+ }
+
+ /* Back-annotate the Alignment of the type if not already in the
+ tree. Likewise for sizes. */
+ if (Unknown_Alignment (gnat_entity))
+ {
+ unsigned int double_align, align;
+ bool is_capped_double, align_clause;
+
+ /* If the default alignment of "double" or larger scalar types is
+ specifically capped and this is not an array with an alignment
+ clause on the component type, return the cap. */
+ if ((double_align = double_float_alignment) > 0)
+ is_capped_double
+ = is_double_float_or_array (gnat_entity, &align_clause);
+ else if ((double_align = double_scalar_alignment) > 0)
+ is_capped_double
+ = is_double_scalar_or_array (gnat_entity, &align_clause);
+ else
+ is_capped_double = align_clause = false;
+
+ if (is_capped_double && !align_clause)
+ align = double_align;
+ else
+ align = TYPE_ALIGN (gnu_type) / BITS_PER_UNIT;
+
+ Set_Alignment (gnat_entity, UI_From_Int (align));
+ }
+
+ if (Unknown_Esize (gnat_entity) && TYPE_SIZE (gnu_type))
+ {
+ tree gnu_size = TYPE_SIZE (gnu_type);
+
+ /* If the size is self-referential, annotate the maximum value. */
+ if (CONTAINS_PLACEHOLDER_P (gnu_size))
+ gnu_size = max_size (gnu_size, true);
+
+ /* If we are just annotating types and the type is tagged, the tag
+ and the parent components are not generated by the front-end so
+ sizes must be adjusted if there is no representation clause. */
+ if (type_annotate_only
+ && Is_Tagged_Type (gnat_entity)
+ && !VOID_TYPE_P (gnu_type)
+ && (!TYPE_FIELDS (gnu_type)
+ || integer_zerop (bit_position (TYPE_FIELDS (gnu_type)))))
+ {
+ tree pointer_size = bitsize_int (POINTER_SIZE), offset;
+ Uint uint_size;
+
+ if (Is_Derived_Type (gnat_entity))
+ {
+ Entity_Id gnat_parent = Etype (Base_Type (gnat_entity));
+ offset = UI_To_gnu (Esize (gnat_parent), bitsizetype);
+ Set_Alignment (gnat_entity, Alignment (gnat_parent));
+ }
+ else
+ offset = pointer_size;
+
+ if (TYPE_FIELDS (gnu_type))
+ offset
+ = round_up (offset, DECL_ALIGN (TYPE_FIELDS (gnu_type)));
+
+ gnu_size = size_binop (PLUS_EXPR, gnu_size, offset);
+ gnu_size = round_up (gnu_size, POINTER_SIZE);
+ uint_size = annotate_value (gnu_size);
+ Set_Esize (gnat_entity, uint_size);
+ Set_RM_Size (gnat_entity, uint_size);
+ }
+ else
+ Set_Esize (gnat_entity, annotate_value (gnu_size));
+ }
+
+ if (Unknown_RM_Size (gnat_entity) && rm_size (gnu_type))
+ Set_RM_Size (gnat_entity, annotate_value (rm_size (gnu_type)));
+ }
+
+ /* If we really have a ..._DECL node, set a couple of flags on it. But we
+ cannot do so if we are reusing the ..._DECL node made for an equivalent
+ type or an alias or a renamed object as the predicates don't apply to it
+ but to GNAT_ENTITY. */
+ if (DECL_P (gnu_decl)
+ && !(is_type && gnat_equiv_type != gnat_entity)
+ && !Present (Alias (gnat_entity))
+ && !(Present (Renamed_Object (gnat_entity)) && saved))
+ {
+ if (!Comes_From_Source (gnat_entity))
+ DECL_ARTIFICIAL (gnu_decl) = 1;
+
+ if (!debug_info_p)
+ DECL_IGNORED_P (gnu_decl) = 1;
+ }
+
+ /* If we haven't already, associate the ..._DECL node that we just made with
+ the input GNAT entity node. */
+ if (!saved)
+ save_gnu_tree (gnat_entity, gnu_decl, false);
+
+ /* If this is an enumeration or floating-point type, we were not able to set
+ the bounds since they refer to the type. These are always static. */
+ if ((kind == E_Enumeration_Type && Present (First_Literal (gnat_entity)))
+ || (kind == E_Floating_Point_Type && !Vax_Float (gnat_entity)))
+ {
+ tree gnu_scalar_type = gnu_type;
+ tree gnu_low_bound, gnu_high_bound;
+
+ /* If this is a padded type, we need to use the underlying type. */
+ if (TYPE_IS_PADDING_P (gnu_scalar_type))
+ gnu_scalar_type = TREE_TYPE (TYPE_FIELDS (gnu_scalar_type));
+
+ /* If this is a floating point type and we haven't set a floating
+ point type yet, use this in the evaluation of the bounds. */
+ if (!longest_float_type_node && kind == E_Floating_Point_Type)
+ longest_float_type_node = gnu_scalar_type;
+
+ gnu_low_bound = gnat_to_gnu (Type_Low_Bound (gnat_entity));
+ gnu_high_bound = gnat_to_gnu (Type_High_Bound (gnat_entity));
+
+ if (kind == E_Enumeration_Type)
+ {
+ /* Enumeration types have specific RM bounds. */
+ SET_TYPE_RM_MIN_VALUE (gnu_scalar_type, gnu_low_bound);
+ SET_TYPE_RM_MAX_VALUE (gnu_scalar_type, gnu_high_bound);
+ }
+ else
+ {
+ /* Floating-point types don't have specific RM bounds. */
+ TYPE_GCC_MIN_VALUE (gnu_scalar_type) = gnu_low_bound;
+ TYPE_GCC_MAX_VALUE (gnu_scalar_type) = gnu_high_bound;
+ }
+ }
+
+ /* If we deferred processing of incomplete types, re-enable it. If there
+ were no other disables and we have deferred types to process, do so. */
+ if (this_deferred
+ && --defer_incomplete_level == 0
+ && defer_incomplete_list)
+ {
+ struct incomplete *p, *next;
+
+ /* We are back to level 0 for the deferring of incomplete types.
+ But processing these incomplete types below may itself require
+ deferring, so preserve what we have and restart from scratch. */
+ p = defer_incomplete_list;
+ defer_incomplete_list = NULL;
+
+ for (; p; p = next)
+ {
+ next = p->next;
+
+ if (p->old_type)
+ update_pointer_to (TYPE_MAIN_VARIANT (p->old_type),
+ gnat_to_gnu_type (p->full_type));
+ free (p);
+ }
+ }
+
+ /* If we are not defining this type, see if it's on one of the lists of
+ incomplete types. If so, handle the list entry now. */
+ if (is_type && !definition)
+ {
+ struct incomplete *p;
+
+ for (p = defer_incomplete_list; p; p = p->next)
+ if (p->old_type && p->full_type == gnat_entity)
+ {
+ update_pointer_to (TYPE_MAIN_VARIANT (p->old_type),
+ TREE_TYPE (gnu_decl));
+ p->old_type = NULL_TREE;
+ }
+
+ for (p = defer_limited_with; p; p = p->next)
+ if (p->old_type && Non_Limited_View (p->full_type) == gnat_entity)
+ {
+ update_pointer_to (TYPE_MAIN_VARIANT (p->old_type),
+ TREE_TYPE (gnu_decl));
+ p->old_type = NULL_TREE;
+ }
+ }
+
+ if (this_global)
+ force_global--;
+
+ /* If this is a packed array type whose original array type is itself
+ an Itype without freeze node, make sure the latter is processed. */
+ if (Is_Packed_Array_Type (gnat_entity)
+ && Is_Itype (Original_Array_Type (gnat_entity))
+ && No (Freeze_Node (Original_Array_Type (gnat_entity)))
+ && !present_gnu_tree (Original_Array_Type (gnat_entity)))
+ gnat_to_gnu_entity (Original_Array_Type (gnat_entity), NULL_TREE, 0);
+
+ return gnu_decl;
+}
+
+/* Similar, but if the returned value is a COMPONENT_REF, return the
+ FIELD_DECL. */
+
+tree
+gnat_to_gnu_field_decl (Entity_Id gnat_entity)
+{
+ tree gnu_field = gnat_to_gnu_entity (gnat_entity, NULL_TREE, 0);
+
+ if (TREE_CODE (gnu_field) == COMPONENT_REF)
+ gnu_field = TREE_OPERAND (gnu_field, 1);
+
+ return gnu_field;
+}
+
+/* Similar, but GNAT_ENTITY is assumed to refer to a GNAT type. Return
+ the GCC type corresponding to that entity. */
+
+tree
+gnat_to_gnu_type (Entity_Id gnat_entity)
+{
+ tree gnu_decl;
+
+ /* The back end never attempts to annotate generic types. */
+ if (Is_Generic_Type (gnat_entity) && type_annotate_only)
+ return void_type_node;
+
+ gnu_decl = gnat_to_gnu_entity (gnat_entity, NULL_TREE, 0);
+ gcc_assert (TREE_CODE (gnu_decl) == TYPE_DECL);
+
+ return TREE_TYPE (gnu_decl);
+}
+
+/* Similar, but GNAT_ENTITY is assumed to refer to a GNAT type. Return
+ the unpadded version of the GCC type corresponding to that entity. */
+
+tree
+get_unpadded_type (Entity_Id gnat_entity)
+{
+ tree type = gnat_to_gnu_type (gnat_entity);
+
+ if (TYPE_IS_PADDING_P (type))
+ type = TREE_TYPE (TYPE_FIELDS (type));
+
+ return type;
+}
+
+/* Return the DECL associated with the public subprogram GNAT_ENTITY but whose
+ type has been changed to that of the parameterless procedure, except if an
+ alias is already present, in which case it is returned instead. */
+
+tree
+get_minimal_subprog_decl (Entity_Id gnat_entity)
+{
+ tree gnu_entity_name, gnu_ext_name;
+ struct attrib *attr_list = NULL;
+
+ /* See the E_Function/E_Procedure case of gnat_to_gnu_entity for the model
+ of the handling applied here. */
+
+ while (Present (Alias (gnat_entity)))
+ {
+ gnat_entity = Alias (gnat_entity);
+ if (present_gnu_tree (gnat_entity))
+ return get_gnu_tree (gnat_entity);
+ }
+
+ gnu_entity_name = get_entity_name (gnat_entity);
+ gnu_ext_name = create_concat_name (gnat_entity, NULL);
+
+ if (Has_Stdcall_Convention (gnat_entity))
+ prepend_one_attribute (&attr_list, ATTR_MACHINE_ATTRIBUTE,
+ get_identifier ("stdcall"), NULL_TREE,
+ gnat_entity);
+ else if (Has_Thiscall_Convention (gnat_entity))
+ prepend_one_attribute (&attr_list, ATTR_MACHINE_ATTRIBUTE,
+ get_identifier ("thiscall"), NULL_TREE,
+ gnat_entity);
+
+ if (No (Interface_Name (gnat_entity)) && gnu_ext_name == gnu_entity_name)
+ gnu_ext_name = NULL_TREE;
+
+ return
+ create_subprog_decl (gnu_entity_name, gnu_ext_name, void_ftype, NULL_TREE,
+ is_disabled, true, true, true, attr_list, gnat_entity);
+}
+
+/* Return whether the E_Subprogram_Type/E_Function/E_Procedure GNAT_ENTITY is
+ a C++ imported method or equivalent.
+
+ We use the predicate on 32-bit x86/Windows to find out whether we need to
+ use the "thiscall" calling convention for GNAT_ENTITY. This convention is
+ used for C++ methods (functions with METHOD_TYPE) by the back-end. */
+
+bool
+is_cplusplus_method (Entity_Id gnat_entity)
+{
+ if (Convention (gnat_entity) != Convention_CPP)
+ return false;
+
+ /* This is the main case: C++ method imported as a primitive operation. */
+ if (Is_Dispatching_Operation (gnat_entity))
+ return true;
+
+ /* A thunk needs to be handled like its associated primitive operation. */
+ if (Is_Subprogram (gnat_entity) && Is_Thunk (gnat_entity))
+ return true;
+
+ /* C++ classes with no virtual functions can be imported as limited
+ record types, but we need to return true for the constructors. */
+ if (Is_Constructor (gnat_entity))
+ return true;
+
+ /* This is set on the E_Subprogram_Type built for a dispatching call. */
+ if (Is_Dispatch_Table_Entity (gnat_entity))
+ return true;
+
+ return false;
+}
+
+/* Finalize the processing of From_Limited_With incomplete types. */
+
+void
+finalize_from_limited_with (void)
+{
+ struct incomplete *p, *next;
+
+ p = defer_limited_with;
+ defer_limited_with = NULL;
+
+ for (; p; p = next)
+ {
+ next = p->next;
+
+ if (p->old_type)
+ update_pointer_to (TYPE_MAIN_VARIANT (p->old_type),
+ gnat_to_gnu_type (p->full_type));
+ free (p);
+ }
+}
+
+/* Return the equivalent type to be used for GNAT_ENTITY, if it's a
+ kind of type (such E_Task_Type) that has a different type which Gigi
+ uses for its representation. If the type does not have a special type
+ for its representation, return GNAT_ENTITY. If a type is supposed to
+ exist, but does not, abort unless annotating types, in which case
+ return Empty. If GNAT_ENTITY is Empty, return Empty. */
+
+Entity_Id
+Gigi_Equivalent_Type (Entity_Id gnat_entity)
+{
+ Entity_Id gnat_equiv = gnat_entity;
+
+ if (No (gnat_entity))
+ return gnat_entity;
+
+ switch (Ekind (gnat_entity))
+ {
+ case E_Class_Wide_Subtype:
+ if (Present (Equivalent_Type (gnat_entity)))
+ gnat_equiv = Equivalent_Type (gnat_entity);
+ break;
+
+ case E_Access_Protected_Subprogram_Type:
+ case E_Anonymous_Access_Protected_Subprogram_Type:
+ gnat_equiv = Equivalent_Type (gnat_entity);
+ break;
+
+ case E_Class_Wide_Type:
+ gnat_equiv = Root_Type (gnat_entity);
+ break;
+
+ case E_Task_Type:
+ case E_Task_Subtype:
+ case E_Protected_Type:
+ case E_Protected_Subtype:
+ gnat_equiv = Corresponding_Record_Type (gnat_entity);
+ break;
+
+ default:
+ break;
+ }
+
+ gcc_assert (Present (gnat_equiv) || type_annotate_only);
+
+ return gnat_equiv;
+}
+
+/* Return a GCC tree for a type corresponding to the component type of the
+ array type or subtype GNAT_ARRAY. DEFINITION is true if this component
+ is for an array being defined. DEBUG_INFO_P is true if we need to write
+ debug information for other types that we may create in the process. */
+
+static tree
+gnat_to_gnu_component_type (Entity_Id gnat_array, bool definition,
+ bool debug_info_p)
+{
+ const Entity_Id gnat_type = Component_Type (gnat_array);
+ tree gnu_type = gnat_to_gnu_type (gnat_type);
+ tree gnu_comp_size;
+
+ /* Try to get a smaller form of the component if needed. */
+ if ((Is_Packed (gnat_array)
+ || Has_Component_Size_Clause (gnat_array))
+ && !Is_Bit_Packed_Array (gnat_array)
+ && !Has_Aliased_Components (gnat_array)
+ && !Strict_Alignment (gnat_type)
+ && RECORD_OR_UNION_TYPE_P (gnu_type)
+ && !TYPE_FAT_POINTER_P (gnu_type)
+ && tree_fits_uhwi_p (TYPE_SIZE (gnu_type)))
+ gnu_type = make_packable_type (gnu_type, false);
+
+ if (Has_Atomic_Components (gnat_array))
+ check_ok_for_atomic (gnu_type, gnat_array, true);
+
+ /* Get and validate any specified Component_Size. */
+ gnu_comp_size
+ = validate_size (Component_Size (gnat_array), gnu_type, gnat_array,
+ Is_Bit_Packed_Array (gnat_array) ? TYPE_DECL : VAR_DECL,
+ true, Has_Component_Size_Clause (gnat_array));
+
+ /* If the array has aliased components and the component size can be zero,
+ force at least unit size to ensure that the components have distinct
+ addresses. */
+ if (!gnu_comp_size
+ && Has_Aliased_Components (gnat_array)
+ && (integer_zerop (TYPE_SIZE (gnu_type))
+ || (TREE_CODE (gnu_type) == ARRAY_TYPE
+ && !TREE_CONSTANT (TYPE_SIZE (gnu_type)))))
+ gnu_comp_size
+ = size_binop (MAX_EXPR, TYPE_SIZE (gnu_type), bitsize_unit_node);
+
+ /* If the component type is a RECORD_TYPE that has a self-referential size,
+ then use the maximum size for the component size. */
+ if (!gnu_comp_size
+ && TREE_CODE (gnu_type) == RECORD_TYPE
+ && CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_type)))
+ gnu_comp_size = max_size (TYPE_SIZE (gnu_type), true);
+
+ /* Honor the component size. This is not needed for bit-packed arrays. */
+ if (gnu_comp_size && !Is_Bit_Packed_Array (gnat_array))
+ {
+ tree orig_type = gnu_type;
+ unsigned int max_align;
+
+ /* If an alignment is specified, use it as a cap on the component type
+ so that it can be honored for the whole type. But ignore it for the
+ original type of packed array types. */
+ if (No (Packed_Array_Type (gnat_array)) && Known_Alignment (gnat_array))
+ max_align = validate_alignment (Alignment (gnat_array), gnat_array, 0);
+ else
+ max_align = 0;
+
+ gnu_type = make_type_from_size (gnu_type, gnu_comp_size, false);
+ if (max_align > 0 && TYPE_ALIGN (gnu_type) > max_align)
+ gnu_type = orig_type;
+ else
+ orig_type = gnu_type;
+
+ gnu_type = maybe_pad_type (gnu_type, gnu_comp_size, 0, gnat_array,
+ true, false, definition, true);
+
+ /* If a padding record was made, declare it now since it will never be
+ declared otherwise. This is necessary to ensure that its subtrees
+ are properly marked. */
+ if (gnu_type != orig_type && !DECL_P (TYPE_NAME (gnu_type)))
+ create_type_decl (TYPE_NAME (gnu_type), gnu_type, true, debug_info_p,
+ gnat_array);
+ }
+
+ if (Has_Volatile_Components (gnat_array))
+ gnu_type
+ = build_qualified_type (gnu_type,
+ TYPE_QUALS (gnu_type) | TYPE_QUAL_VOLATILE);
+
+ return gnu_type;
+}
+
+/* Return a GCC tree for a parameter corresponding to GNAT_PARAM and
+ using MECH as its passing mechanism, to be placed in the parameter
+ list built for GNAT_SUBPROG. Assume a foreign convention for the
+ latter if FOREIGN is true. Also set CICO to true if the parameter
+ must use the copy-in copy-out implementation mechanism.
+
+ The returned tree is a PARM_DECL, except for those cases where no
+ parameter needs to be actually passed to the subprogram; the type
+ of this "shadow" parameter is then returned instead. */
+
+static tree
+gnat_to_gnu_param (Entity_Id gnat_param, Mechanism_Type mech,
+ Entity_Id gnat_subprog, bool foreign, bool *cico)
+{
+ tree gnu_param_name = get_entity_name (gnat_param);
+ tree gnu_param_type = gnat_to_gnu_type (Etype (gnat_param));
+ tree gnu_param_type_alt = NULL_TREE;
+ bool in_param = (Ekind (gnat_param) == E_In_Parameter);
+ /* The parameter can be indirectly modified if its address is taken. */
+ bool ro_param = in_param && !Address_Taken (gnat_param);
+ bool by_return = false, by_component_ptr = false;
+ bool by_ref = false;
+ tree gnu_param;
+
+ /* Copy-return is used only for the first parameter of a valued procedure.
+ It's a copy mechanism for which a parameter is never allocated. */
+ if (mech == By_Copy_Return)
+ {
+ gcc_assert (Ekind (gnat_param) == E_Out_Parameter);
+ mech = By_Copy;
+ by_return = true;
+ }
+
+ /* If this is either a foreign function or if the underlying type won't
+ be passed by reference, strip off possible padding type. */
+ if (TYPE_IS_PADDING_P (gnu_param_type))
+ {
+ tree unpadded_type = TREE_TYPE (TYPE_FIELDS (gnu_param_type));
+
+ if (mech == By_Reference
+ || foreign
+ || (!must_pass_by_ref (unpadded_type)
+ && (mech == By_Copy || !default_pass_by_ref (unpadded_type))))
+ gnu_param_type = unpadded_type;
+ }
+
+ /* If this is a read-only parameter, make a variant of the type that is
+ read-only. ??? However, if this is an unconstrained array, that type
+ can be very complex, so skip it for now. Likewise for any other
+ self-referential type. */
+ if (ro_param
+ && TREE_CODE (gnu_param_type) != UNCONSTRAINED_ARRAY_TYPE
+ && !CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_param_type)))
+ gnu_param_type = build_qualified_type (gnu_param_type,
+ (TYPE_QUALS (gnu_param_type)
+ | TYPE_QUAL_CONST));
+
+ /* For foreign conventions, pass arrays as pointers to the element type.
+ First check for unconstrained array and get the underlying array. */
+ if (foreign && TREE_CODE (gnu_param_type) == UNCONSTRAINED_ARRAY_TYPE)
+ gnu_param_type
+ = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_param_type))));
+
+ /* For GCC builtins, pass Address integer types as (void *) */
+ if (Convention (gnat_subprog) == Convention_Intrinsic
+ && Present (Interface_Name (gnat_subprog))
+ && Is_Descendent_Of_Address (Etype (gnat_param)))
+ gnu_param_type = ptr_void_type_node;
+
+ /* VMS descriptors are themselves passed by reference. */
+ if (mech == By_Short_Descriptor ||
+ (mech == By_Descriptor && TARGET_ABI_OPEN_VMS && !flag_vms_malloc64))
+ gnu_param_type
+ = build_pointer_type (build_vms_descriptor32 (gnu_param_type,
+ Mechanism (gnat_param),
+ gnat_subprog));
+ else if (mech == By_Descriptor)
+ {
+ /* Build both a 32-bit and 64-bit descriptor, one of which will be
+ chosen in fill_vms_descriptor. */
+ gnu_param_type_alt
+ = build_pointer_type (build_vms_descriptor32 (gnu_param_type,
+ Mechanism (gnat_param),
+ gnat_subprog));
+ gnu_param_type
+ = build_pointer_type (build_vms_descriptor (gnu_param_type,
+ Mechanism (gnat_param),
+ gnat_subprog));
+ }
+
+ /* Arrays are passed as pointers to element type for foreign conventions. */
+ else if (foreign
+ && mech != By_Copy
+ && TREE_CODE (gnu_param_type) == ARRAY_TYPE)
+ {
+ /* Strip off any multi-dimensional entries, then strip
+ off the last array to get the component type. */
+ while (TREE_CODE (TREE_TYPE (gnu_param_type)) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_param_type)))
+ gnu_param_type = TREE_TYPE (gnu_param_type);
+
+ by_component_ptr = true;
+ gnu_param_type = TREE_TYPE (gnu_param_type);
+
+ if (ro_param)
+ gnu_param_type = build_qualified_type (gnu_param_type,
+ (TYPE_QUALS (gnu_param_type)
+ | TYPE_QUAL_CONST));
+
+ gnu_param_type = build_pointer_type (gnu_param_type);
+ }
+
+ /* Fat pointers are passed as thin pointers for foreign conventions. */
+ else if (foreign && TYPE_IS_FAT_POINTER_P (gnu_param_type))
+ gnu_param_type
+ = make_type_from_size (gnu_param_type, size_int (POINTER_SIZE), 0);
+
+ /* If we must pass or were requested to pass by reference, do so.
+ If we were requested to pass by copy, do so.
+ Otherwise, for foreign conventions, pass In Out or Out parameters
+ or aggregates by reference. For COBOL and Fortran, pass all
+ integer and FP types that way too. For Convention Ada, use
+ the standard Ada default. */
+ else if (must_pass_by_ref (gnu_param_type)
+ || mech == By_Reference
+ || (mech != By_Copy
+ && ((foreign
+ && (!in_param || AGGREGATE_TYPE_P (gnu_param_type)))
+ || (foreign
+ && (Convention (gnat_subprog) == Convention_Fortran
+ || Convention (gnat_subprog) == Convention_COBOL)
+ && (INTEGRAL_TYPE_P (gnu_param_type)
+ || FLOAT_TYPE_P (gnu_param_type)))
+ || (!foreign
+ && default_pass_by_ref (gnu_param_type)))))
+ {
+ /* We take advantage of 6.2(12) by considering that references built for
+ parameters whose type isn't by-ref and for which the mechanism hasn't
+ been forced to by-ref are restrict-qualified in the C sense. */
+ bool restrict_p
+ = !TYPE_IS_BY_REFERENCE_P (gnu_param_type) && mech != By_Reference;
+ gnu_param_type = build_reference_type (gnu_param_type);
+ if (restrict_p)
+ gnu_param_type
+ = build_qualified_type (gnu_param_type, TYPE_QUAL_RESTRICT);
+ by_ref = true;
+ }
+
+ /* Pass In Out or Out parameters using copy-in copy-out mechanism. */
+ else if (!in_param)
+ *cico = true;
+
+ if (mech == By_Copy && (by_ref || by_component_ptr))
+ post_error ("?cannot pass & by copy", gnat_param);
+
+ /* If this is an Out parameter that isn't passed by reference and isn't
+ a pointer or aggregate, we don't make a PARM_DECL for it. Instead,
+ it will be a VAR_DECL created when we process the procedure, so just
+ return its type. For the special parameter of a valued procedure,
+ never pass it in.
+
+ An exception is made to cover the RM-6.4.1 rule requiring "by copy"
+ Out parameters with discriminants or implicit initial values to be
+ handled like In Out parameters. These type are normally built as
+ aggregates, hence passed by reference, except for some packed arrays
+ which end up encoded in special integer types. Note that scalars can
+ be given implicit initial values using the Default_Value aspect.
+
+ The exception we need to make is then for packed arrays of records
+ with discriminants or implicit initial values. We have no light/easy
+ way to check for the latter case, so we merely check for packed arrays
+ of records. This may lead to useless copy-in operations, but in very
+ rare cases only, as these would be exceptions in a set of already
+ exceptional situations. */
+ if (Ekind (gnat_param) == E_Out_Parameter
+ && !by_ref
+ && (by_return
+ || (mech != By_Descriptor
+ && mech != By_Short_Descriptor
+ && !POINTER_TYPE_P (gnu_param_type)
+ && !AGGREGATE_TYPE_P (gnu_param_type)
+ && !Has_Default_Aspect (Etype (gnat_param))))
+ && !(Is_Array_Type (Etype (gnat_param))
+ && Is_Packed (Etype (gnat_param))
+ && Is_Composite_Type (Component_Type (Etype (gnat_param)))))
+ return gnu_param_type;
+
+ gnu_param = create_param_decl (gnu_param_name, gnu_param_type,
+ ro_param || by_ref || by_component_ptr);
+ DECL_BY_REF_P (gnu_param) = by_ref;
+ DECL_BY_COMPONENT_PTR_P (gnu_param) = by_component_ptr;
+ DECL_BY_DESCRIPTOR_P (gnu_param)
+ = (mech == By_Descriptor || mech == By_Short_Descriptor);
+ DECL_POINTS_TO_READONLY_P (gnu_param)
+ = (ro_param && (by_ref || by_component_ptr));
+ DECL_CAN_NEVER_BE_NULL_P (gnu_param) = Can_Never_Be_Null (gnat_param);
+
+ /* Save the alternate descriptor type, if any. */
+ if (gnu_param_type_alt)
+ SET_DECL_PARM_ALT_TYPE (gnu_param, gnu_param_type_alt);
+
+ /* If no Mechanism was specified, indicate what we're using, then
+ back-annotate it. */
+ if (mech == Default)
+ mech = (by_ref || by_component_ptr) ? By_Reference : By_Copy;
+
+ Set_Mechanism (gnat_param, mech);
+ return gnu_param;
+}
+
+/* Return true if DISCR1 and DISCR2 represent the same discriminant. */
+
+static bool
+same_discriminant_p (Entity_Id discr1, Entity_Id discr2)
+{
+ while (Present (Corresponding_Discriminant (discr1)))
+ discr1 = Corresponding_Discriminant (discr1);
+
+ while (Present (Corresponding_Discriminant (discr2)))
+ discr2 = Corresponding_Discriminant (discr2);
+
+ return
+ Original_Record_Component (discr1) == Original_Record_Component (discr2);
+}
+
+/* Return true if the array type GNU_TYPE, which represents a dimension of
+ GNAT_TYPE, has a non-aliased component in the back-end sense. */
+
+static bool
+array_type_has_nonaliased_component (tree gnu_type, Entity_Id gnat_type)
+{
+ /* If the array type is not the innermost dimension of the GNAT type,
+ then it has a non-aliased component. */
+ if (TREE_CODE (TREE_TYPE (gnu_type)) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_type)))
+ return true;
+
+ /* If the array type has an aliased component in the front-end sense,
+ then it also has an aliased component in the back-end sense. */
+ if (Has_Aliased_Components (gnat_type))
+ return false;
+
+ /* If this is a derived type, then it has a non-aliased component if
+ and only if its parent type also has one. */
+ if (Is_Derived_Type (gnat_type))
+ {
+ tree gnu_parent_type = gnat_to_gnu_type (Etype (gnat_type));
+ int index;
+ if (TREE_CODE (gnu_parent_type) == UNCONSTRAINED_ARRAY_TYPE)
+ gnu_parent_type
+ = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_parent_type))));
+ for (index = Number_Dimensions (gnat_type) - 1; index > 0; index--)
+ gnu_parent_type = TREE_TYPE (gnu_parent_type);
+ return TYPE_NONALIASED_COMPONENT (gnu_parent_type);
+ }
+
+ /* Otherwise, rely exclusively on properties of the element type. */
+ return type_for_nonaliased_component_p (TREE_TYPE (gnu_type));
+}
+
+/* Return true if GNAT_ADDRESS is a value known at compile-time. */
+
+static bool
+compile_time_known_address_p (Node_Id gnat_address)
+{
+ /* Catch System'To_Address. */
+ if (Nkind (gnat_address) == N_Unchecked_Type_Conversion)
+ gnat_address = Expression (gnat_address);
+
+ return Compile_Time_Known_Value (gnat_address);
+}
+
+/* Return true if GNAT_RANGE, a N_Range node, cannot be superflat, i.e. if the
+ inequality HB >= LB-1 is true. LB and HB are the low and high bounds. */
+
+static bool
+cannot_be_superflat_p (Node_Id gnat_range)
+{
+ Node_Id gnat_lb = Low_Bound (gnat_range), gnat_hb = High_Bound (gnat_range);
+ Node_Id scalar_range;
+ tree gnu_lb, gnu_hb, gnu_lb_minus_one;
+
+ /* If the low bound is not constant, try to find an upper bound. */
+ while (Nkind (gnat_lb) != N_Integer_Literal
+ && (Ekind (Etype (gnat_lb)) == E_Signed_Integer_Subtype
+ || Ekind (Etype (gnat_lb)) == E_Modular_Integer_Subtype)
+ && (scalar_range = Scalar_Range (Etype (gnat_lb)))
+ && (Nkind (scalar_range) == N_Signed_Integer_Type_Definition
+ || Nkind (scalar_range) == N_Range))
+ gnat_lb = High_Bound (scalar_range);
+
+ /* If the high bound is not constant, try to find a lower bound. */
+ while (Nkind (gnat_hb) != N_Integer_Literal
+ && (Ekind (Etype (gnat_hb)) == E_Signed_Integer_Subtype
+ || Ekind (Etype (gnat_hb)) == E_Modular_Integer_Subtype)
+ && (scalar_range = Scalar_Range (Etype (gnat_hb)))
+ && (Nkind (scalar_range) == N_Signed_Integer_Type_Definition
+ || Nkind (scalar_range) == N_Range))
+ gnat_hb = Low_Bound (scalar_range);
+
+ /* If we have failed to find constant bounds, punt. */
+ if (Nkind (gnat_lb) != N_Integer_Literal
+ || Nkind (gnat_hb) != N_Integer_Literal)
+ return false;
+
+ /* We need at least a signed 64-bit type to catch most cases. */
+ gnu_lb = UI_To_gnu (Intval (gnat_lb), sbitsizetype);
+ gnu_hb = UI_To_gnu (Intval (gnat_hb), sbitsizetype);
+ if (TREE_OVERFLOW (gnu_lb) || TREE_OVERFLOW (gnu_hb))
+ return false;
+
+ /* If the low bound is the smallest integer, nothing can be smaller. */
+ gnu_lb_minus_one = size_binop (MINUS_EXPR, gnu_lb, sbitsize_one_node);
+ if (TREE_OVERFLOW (gnu_lb_minus_one))
+ return true;
+
+ return !tree_int_cst_lt (gnu_hb, gnu_lb_minus_one);
+}
+
+/* Return true if GNU_EXPR is (essentially) the address of a CONSTRUCTOR. */
+
+static bool
+constructor_address_p (tree gnu_expr)
+{
+ while (TREE_CODE (gnu_expr) == NOP_EXPR
+ || TREE_CODE (gnu_expr) == CONVERT_EXPR
+ || TREE_CODE (gnu_expr) == NON_LVALUE_EXPR)
+ gnu_expr = TREE_OPERAND (gnu_expr, 0);
+
+ return (TREE_CODE (gnu_expr) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (gnu_expr, 0)) == CONSTRUCTOR);
+}
+
+/* Given GNAT_ENTITY, elaborate all expressions that are required to
+ be elaborated at the point of its definition, but do nothing else. */
+
+void
+elaborate_entity (Entity_Id gnat_entity)
+{
+ switch (Ekind (gnat_entity))
+ {
+ case E_Signed_Integer_Subtype:
+ case E_Modular_Integer_Subtype:
+ case E_Enumeration_Subtype:
+ case E_Ordinary_Fixed_Point_Subtype:
+ case E_Decimal_Fixed_Point_Subtype:
+ case E_Floating_Point_Subtype:
+ {
+ Node_Id gnat_lb = Type_Low_Bound (gnat_entity);
+ Node_Id gnat_hb = Type_High_Bound (gnat_entity);
+
+ /* ??? Tests to avoid Constraint_Error in static expressions
+ are needed until after the front stops generating bogus
+ conversions on bounds of real types. */
+ if (!Raises_Constraint_Error (gnat_lb))
+ elaborate_expression (gnat_lb, gnat_entity, get_identifier ("L"),
+ true, false, Needs_Debug_Info (gnat_entity));
+ if (!Raises_Constraint_Error (gnat_hb))
+ elaborate_expression (gnat_hb, gnat_entity, get_identifier ("U"),
+ true, false, Needs_Debug_Info (gnat_entity));
+ break;
+ }
+
+ case E_Record_Subtype:
+ case E_Private_Subtype:
+ case E_Limited_Private_Subtype:
+ case E_Record_Subtype_With_Private:
+ if (Has_Discriminants (gnat_entity) && Is_Constrained (gnat_entity))
+ {
+ Node_Id gnat_discriminant_expr;
+ Entity_Id gnat_field;
+
+ for (gnat_field
+ = First_Discriminant (Implementation_Base_Type (gnat_entity)),
+ gnat_discriminant_expr
+ = First_Elmt (Discriminant_Constraint (gnat_entity));
+ Present (gnat_field);
+ gnat_field = Next_Discriminant (gnat_field),
+ gnat_discriminant_expr = Next_Elmt (gnat_discriminant_expr))
+ /* Ignore access discriminants. */
+ if (!Is_Access_Type (Etype (Node (gnat_discriminant_expr))))
+ elaborate_expression (Node (gnat_discriminant_expr),
+ gnat_entity, get_entity_name (gnat_field),
+ true, false, false);
+ }
+ break;
+
+ }
+}
+
+/* Return true if the size in units represented by GNU_SIZE can be handled by
+ an allocation. If STATIC_P is true, consider only what can be done with a
+ static allocation. */
+
+static bool
+allocatable_size_p (tree gnu_size, bool static_p)
+{
+ /* We can allocate a fixed size if it is a valid for the middle-end. */
+ if (TREE_CODE (gnu_size) == INTEGER_CST)
+ return valid_constant_size_p (gnu_size);
+
+ /* We can allocate a variable size if this isn't a static allocation. */
+ else
+ return !static_p;
+}
+
+/* Prepend to ATTR_LIST an entry for an attribute with provided TYPE,
+ NAME, ARGS and ERROR_POINT. */
+
+static void
+prepend_one_attribute (struct attrib **attr_list,
+ enum attr_type attr_type,
+ tree attr_name,
+ tree attr_args,
+ Node_Id attr_error_point)
+{
+ struct attrib * attr = (struct attrib *) xmalloc (sizeof (struct attrib));
+
+ attr->type = attr_type;
+ attr->name = attr_name;
+ attr->args = attr_args;
+ attr->error_point = attr_error_point;
+
+ attr->next = *attr_list;
+ *attr_list = attr;
+}
+
+/* Prepend to ATTR_LIST an entry for an attribute provided by GNAT_PRAGMA. */
+
+static void
+prepend_one_attribute_pragma (struct attrib **attr_list, Node_Id gnat_pragma)
+{
+ const Node_Id gnat_arg = Pragma_Argument_Associations (gnat_pragma);
+ tree gnu_arg0 = NULL_TREE, gnu_arg1 = NULL_TREE;
+ enum attr_type etype;
+
+ /* Map the pragma at hand. Skip if this isn't one we know how to handle. */
+ switch (Get_Pragma_Id (Chars (Pragma_Identifier (gnat_pragma))))
+ {
+ case Pragma_Machine_Attribute:
+ etype = ATTR_MACHINE_ATTRIBUTE;
+ break;
+
+ case Pragma_Linker_Alias:
+ etype = ATTR_LINK_ALIAS;
+ break;
+
+ case Pragma_Linker_Section:
+ etype = ATTR_LINK_SECTION;
+ break;
+
+ case Pragma_Linker_Constructor:
+ etype = ATTR_LINK_CONSTRUCTOR;
+ break;
+
+ case Pragma_Linker_Destructor:
+ etype = ATTR_LINK_DESTRUCTOR;
+ break;
+
+ case Pragma_Weak_External:
+ etype = ATTR_WEAK_EXTERNAL;
+ break;
+
+ case Pragma_Thread_Local_Storage:
+ etype = ATTR_THREAD_LOCAL_STORAGE;
+ break;
+
+ default:
+ return;
+ }
+
+ /* See what arguments we have and turn them into GCC trees for attribute
+ handlers. These expect identifier for strings. We handle at most two
+ arguments and static expressions only. */
+ if (Present (gnat_arg) && Present (First (gnat_arg)))
+ {
+ Node_Id gnat_arg0 = Next (First (gnat_arg));
+ Node_Id gnat_arg1 = Empty;
+
+ if (Present (gnat_arg0) && Is_Static_Expression (Expression (gnat_arg0)))
+ {
+ gnu_arg0 = gnat_to_gnu (Expression (gnat_arg0));
+
+ if (TREE_CODE (gnu_arg0) == STRING_CST)
+ {
+ gnu_arg0 = get_identifier (TREE_STRING_POINTER (gnu_arg0));
+ if (IDENTIFIER_LENGTH (gnu_arg0) == 0)
+ return;
+ }
+
+ gnat_arg1 = Next (gnat_arg0);
+ }
+
+ if (Present (gnat_arg1) && Is_Static_Expression (Expression (gnat_arg1)))
+ {
+ gnu_arg1 = gnat_to_gnu (Expression (gnat_arg1));
+
+ if (TREE_CODE (gnu_arg1) == STRING_CST)
+ gnu_arg1 = get_identifier (TREE_STRING_POINTER (gnu_arg1));
+ }
+ }
+
+ /* Prepend to the list. Make a list of the argument we might have, as GCC
+ expects it. */
+ prepend_one_attribute (attr_list, etype, gnu_arg0,
+ gnu_arg1
+ ? build_tree_list (NULL_TREE, gnu_arg1) : NULL_TREE,
+ Present (Next (First (gnat_arg)))
+ ? Expression (Next (First (gnat_arg))) : gnat_pragma);
+}
+
+/* Prepend to ATTR_LIST the list of attributes for GNAT_ENTITY, if any. */
+
+static void
+prepend_attributes (struct attrib **attr_list, Entity_Id gnat_entity)
+{
+ Node_Id gnat_temp;
+
+ /* Attributes are stored as Representation Item pragmas. */
+ for (gnat_temp = First_Rep_Item (gnat_entity);
+ Present (gnat_temp);
+ gnat_temp = Next_Rep_Item (gnat_temp))
+ if (Nkind (gnat_temp) == N_Pragma)
+ prepend_one_attribute_pragma (attr_list, gnat_temp);
+}
+
+/* Given a GNAT tree GNAT_EXPR, for an expression which is a value within a
+ type definition (either a bound or a discriminant value) for GNAT_ENTITY,
+ return the GCC tree to use for that expression. GNU_NAME is the suffix
+ to use if a variable needs to be created and DEFINITION is true if this
+ is a definition of GNAT_ENTITY. If NEED_VALUE is true, we need a result;
+ otherwise, we are just elaborating the expression for side-effects. If
+ NEED_DEBUG is true, we need a variable for debugging purposes even if it
+ isn't needed for code generation. */
+
+static tree
+elaborate_expression (Node_Id gnat_expr, Entity_Id gnat_entity, tree gnu_name,
+ bool definition, bool need_value, bool need_debug)
+{
+ tree gnu_expr;
+
+ /* If we already elaborated this expression (e.g. it was involved
+ in the definition of a private type), use the old value. */
+ if (present_gnu_tree (gnat_expr))
+ return get_gnu_tree (gnat_expr);
+
+ /* If we don't need a value and this is static or a discriminant,
+ we don't need to do anything. */
+ if (!need_value
+ && (Is_OK_Static_Expression (gnat_expr)
+ || (Nkind (gnat_expr) == N_Identifier
+ && Ekind (Entity (gnat_expr)) == E_Discriminant)))
+ return NULL_TREE;
+
+ /* If it's a static expression, we don't need a variable for debugging. */
+ if (need_debug && Is_OK_Static_Expression (gnat_expr))
+ need_debug = false;
+
+ /* Otherwise, convert this tree to its GCC equivalent and elaborate it. */
+ gnu_expr = elaborate_expression_1 (gnat_to_gnu (gnat_expr), gnat_entity,
+ gnu_name, definition, need_debug);
+
+ /* Save the expression in case we try to elaborate this entity again. Since
+ it's not a DECL, don't check it. Don't save if it's a discriminant. */
+ if (!CONTAINS_PLACEHOLDER_P (gnu_expr))
+ save_gnu_tree (gnat_expr, gnu_expr, true);
+
+ return need_value ? gnu_expr : error_mark_node;
+}
+
+/* Similar, but take a GNU expression and always return a result. */
+
+static tree
+elaborate_expression_1 (tree gnu_expr, Entity_Id gnat_entity, tree gnu_name,
+ bool definition, bool need_debug)
+{
+ const bool expr_public_p = Is_Public (gnat_entity);
+ const bool expr_global_p = expr_public_p || global_bindings_p ();
+ bool expr_variable_p, use_variable;
+
+ /* In most cases, we won't see a naked FIELD_DECL because a discriminant
+ reference will have been replaced with a COMPONENT_REF when the type
+ is being elaborated. However, there are some cases involving child
+ types where we will. So convert it to a COMPONENT_REF. We hope it
+ will be at the highest level of the expression in these cases. */
+ if (TREE_CODE (gnu_expr) == FIELD_DECL)
+ gnu_expr = build3 (COMPONENT_REF, TREE_TYPE (gnu_expr),
+ build0 (PLACEHOLDER_EXPR, DECL_CONTEXT (gnu_expr)),
+ gnu_expr, NULL_TREE);
+
+ /* If GNU_EXPR contains a placeholder, just return it. We rely on the fact
+ that an expression cannot contain both a discriminant and a variable. */
+ if (CONTAINS_PLACEHOLDER_P (gnu_expr))
+ return gnu_expr;
+
+ /* If GNU_EXPR is neither a constant nor based on a read-only variable, make
+ a variable that is initialized to contain the expression when the package
+ containing the definition is elaborated. If this entity is defined at top
+ level, replace the expression by the variable; otherwise use a SAVE_EXPR
+ if this is necessary. */
+ if (CONSTANT_CLASS_P (gnu_expr))
+ expr_variable_p = false;
+ else
+ {
+ /* Skip any conversions and simple constant arithmetics to see if the
+ expression is based on a read-only variable.
+ ??? This really should remain read-only, but we have to think about
+ the typing of the tree here. */
+ tree inner = remove_conversions (gnu_expr, true);
+
+ inner = skip_simple_constant_arithmetic (inner);
+
+ if (handled_component_p (inner))
+ {
+ HOST_WIDE_INT bitsize, bitpos;
+ tree offset;
+ enum machine_mode mode;
+ int unsignedp, volatilep;
+
+ inner = get_inner_reference (inner, &bitsize, &bitpos, &offset,
+ &mode, &unsignedp, &volatilep, false);
+ /* If the offset is variable, err on the side of caution. */
+ if (offset)
+ inner = NULL_TREE;
+ }
+
+ expr_variable_p
+ = !(inner
+ && TREE_CODE (inner) == VAR_DECL
+ && (TREE_READONLY (inner) || DECL_READONLY_ONCE_ELAB (inner)));
+ }
+
+ /* We only need to use the variable if we are in a global context since GCC
+ can do the right thing in the local case. However, when not optimizing,
+ use it for bounds of loop iteration scheme to avoid code duplication. */
+ use_variable = expr_variable_p
+ && (expr_global_p
+ || (!optimize
+ && definition
+ && Is_Itype (gnat_entity)
+ && Nkind (Associated_Node_For_Itype (gnat_entity))
+ == N_Loop_Parameter_Specification));
+
+ /* Now create it, possibly only for debugging purposes. */
+ if (use_variable || need_debug)
+ {
+ tree gnu_decl
+ = create_var_decl_1
+ (create_concat_name (gnat_entity, IDENTIFIER_POINTER (gnu_name)),
+ NULL_TREE, TREE_TYPE (gnu_expr), gnu_expr, true, expr_public_p,
+ !definition, expr_global_p, !need_debug, NULL, gnat_entity);
+
+ if (use_variable)
+ return gnu_decl;
+ }
+
+ return expr_variable_p ? gnat_save_expr (gnu_expr) : gnu_expr;
+}
+
+/* Similar, but take an alignment factor and make it explicit in the tree. */
+
+static tree
+elaborate_expression_2 (tree gnu_expr, Entity_Id gnat_entity, tree gnu_name,
+ bool definition, bool need_debug, unsigned int align)
+{
+ tree unit_align = size_int (align / BITS_PER_UNIT);
+ return
+ size_binop (MULT_EXPR,
+ elaborate_expression_1 (size_binop (EXACT_DIV_EXPR,
+ gnu_expr,
+ unit_align),
+ gnat_entity, gnu_name, definition,
+ need_debug),
+ unit_align);
+}
+
+/* Given a GNU tree and a GNAT list of choices, generate an expression to test
+ the value passed against the list of choices. */
+
+tree
+choices_to_gnu (tree operand, Node_Id choices)
+{
+ Node_Id choice;
+ Node_Id gnat_temp;
+ tree result = boolean_false_node;
+ tree this_test, low = 0, high = 0, single = 0;
+
+ for (choice = First (choices); Present (choice); choice = Next (choice))
+ {
+ switch (Nkind (choice))
+ {
+ case N_Range:
+ low = gnat_to_gnu (Low_Bound (choice));
+ high = gnat_to_gnu (High_Bound (choice));
+
+ this_test
+ = build_binary_op (TRUTH_ANDIF_EXPR, boolean_type_node,
+ build_binary_op (GE_EXPR, boolean_type_node,
+ operand, low),
+ build_binary_op (LE_EXPR, boolean_type_node,
+ operand, high));
+
+ break;
+
+ case N_Subtype_Indication:
+ gnat_temp = Range_Expression (Constraint (choice));
+ low = gnat_to_gnu (Low_Bound (gnat_temp));
+ high = gnat_to_gnu (High_Bound (gnat_temp));
+
+ this_test
+ = build_binary_op (TRUTH_ANDIF_EXPR, boolean_type_node,
+ build_binary_op (GE_EXPR, boolean_type_node,
+ operand, low),
+ build_binary_op (LE_EXPR, boolean_type_node,
+ operand, high));
+ break;
+
+ case N_Identifier:
+ case N_Expanded_Name:
+ /* This represents either a subtype range, an enumeration
+ literal, or a constant Ekind says which. If an enumeration
+ literal or constant, fall through to the next case. */
+ if (Ekind (Entity (choice)) != E_Enumeration_Literal
+ && Ekind (Entity (choice)) != E_Constant)
+ {
+ tree type = gnat_to_gnu_type (Entity (choice));
+
+ low = TYPE_MIN_VALUE (type);
+ high = TYPE_MAX_VALUE (type);
+
+ this_test
+ = build_binary_op (TRUTH_ANDIF_EXPR, boolean_type_node,
+ build_binary_op (GE_EXPR, boolean_type_node,
+ operand, low),
+ build_binary_op (LE_EXPR, boolean_type_node,
+ operand, high));
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case N_Character_Literal:
+ case N_Integer_Literal:
+ single = gnat_to_gnu (choice);
+ this_test = build_binary_op (EQ_EXPR, boolean_type_node, operand,
+ single);
+ break;
+
+ case N_Others_Choice:
+ this_test = boolean_true_node;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ result = build_binary_op (TRUTH_ORIF_EXPR, boolean_type_node, result,
+ this_test);
+ }
+
+ return result;
+}
+
+/* Adjust PACKED setting as passed to gnat_to_gnu_field for a field of
+ type FIELD_TYPE to be placed in RECORD_TYPE. Return the result. */
+
+static int
+adjust_packed (tree field_type, tree record_type, int packed)
+{
+ /* If the field contains an item of variable size, we cannot pack it
+ because we cannot create temporaries of non-fixed size in case
+ we need to take the address of the field. See addressable_p and
+ the notes on the addressability issues for further details. */
+ if (type_has_variable_size (field_type))
+ return 0;
+
+ /* If the alignment of the record is specified and the field type
+ is over-aligned, request Storage_Unit alignment for the field. */
+ if (packed == -2)
+ {
+ if (TYPE_ALIGN (field_type) > TYPE_ALIGN (record_type))
+ return -1;
+ else
+ return 0;
+ }
+
+ return packed;
+}
+
+/* Return a GCC tree for a field corresponding to GNAT_FIELD to be
+ placed in GNU_RECORD_TYPE.
+
+ PACKED is 1 if the enclosing record is packed, -1 if the enclosing
+ record has Component_Alignment of Storage_Unit, -2 if the enclosing
+ record has a specified alignment.
+
+ DEFINITION is true if this field is for a record being defined.
+
+ DEBUG_INFO_P is true if we need to write debug information for types
+ that we may create in the process. */
+
+static tree
+gnat_to_gnu_field (Entity_Id gnat_field, tree gnu_record_type, int packed,
+ bool definition, bool debug_info_p)
+{
+ const Entity_Id gnat_field_type = Etype (gnat_field);
+ tree gnu_field_type = gnat_to_gnu_type (gnat_field_type);
+ tree gnu_field_id = get_entity_name (gnat_field);
+ tree gnu_field, gnu_size, gnu_pos;
+ bool is_volatile
+ = (Treat_As_Volatile (gnat_field) || Treat_As_Volatile (gnat_field_type));
+ bool needs_strict_alignment
+ = (is_volatile
+ || Is_Aliased (gnat_field)
+ || Strict_Alignment (gnat_field_type));
+
+ /* If this field requires strict alignment, we cannot pack it because
+ it would very likely be under-aligned in the record. */
+ if (needs_strict_alignment)
+ packed = 0;
+ else
+ packed = adjust_packed (gnu_field_type, gnu_record_type, packed);
+
+ /* If a size is specified, use it. Otherwise, if the record type is packed,
+ use the official RM size. See "Handling of Type'Size Values" in Einfo
+ for further details. */
+ if (Known_Esize (gnat_field))
+ gnu_size = validate_size (Esize (gnat_field), gnu_field_type,
+ gnat_field, FIELD_DECL, false, true);
+ else if (packed == 1)
+ gnu_size = validate_size (RM_Size (gnat_field_type), gnu_field_type,
+ gnat_field, FIELD_DECL, false, true);
+ else
+ gnu_size = NULL_TREE;
+
+ /* If we have a specified size that is smaller than that of the field's type,
+ or a position is specified, and the field's type is a record that doesn't
+ require strict alignment, see if we can get either an integral mode form
+ of the type or a smaller form. If we can, show a size was specified for
+ the field if there wasn't one already, so we know to make this a bitfield
+ and avoid making things wider.
+
+ Changing to an integral mode form is useful when the record is packed as
+ we can then place the field at a non-byte-aligned position and so achieve
+ tighter packing. This is in addition required if the field shares a byte
+ with another field and the front-end lets the back-end handle the access
+ to the field, because GCC cannot handle non-byte-aligned BLKmode fields.
+
+ Changing to a smaller form is required if the specified size is smaller
+ than that of the field's type and the type contains sub-fields that are
+ padded, in order to avoid generating accesses to these sub-fields that
+ are wider than the field.
+
+ We avoid the transformation if it is not required or potentially useful,
+ as it might entail an increase of the field's alignment and have ripple
+ effects on the outer record type. A typical case is a field known to be
+ byte-aligned and not to share a byte with another field. */
+ if (!needs_strict_alignment
+ && RECORD_OR_UNION_TYPE_P (gnu_field_type)
+ && !TYPE_FAT_POINTER_P (gnu_field_type)
+ && tree_fits_uhwi_p (TYPE_SIZE (gnu_field_type))
+ && (packed == 1
+ || (gnu_size
+ && (tree_int_cst_lt (gnu_size, TYPE_SIZE (gnu_field_type))
+ || (Present (Component_Clause (gnat_field))
+ && !(UI_To_Int (Component_Bit_Offset (gnat_field))
+ % BITS_PER_UNIT == 0
+ && value_factor_p (gnu_size, BITS_PER_UNIT)))))))
+ {
+ tree gnu_packable_type = make_packable_type (gnu_field_type, true);
+ if (gnu_packable_type != gnu_field_type)
+ {
+ gnu_field_type = gnu_packable_type;
+ if (!gnu_size)
+ gnu_size = rm_size (gnu_field_type);
+ }
+ }
+
+ if (Is_Atomic (gnat_field))
+ check_ok_for_atomic (gnu_field_type, gnat_field, false);
+
+ if (Present (Component_Clause (gnat_field)))
+ {
+ Entity_Id gnat_parent
+ = Parent_Subtype (Underlying_Type (Scope (gnat_field)));
+
+ gnu_pos = UI_To_gnu (Component_Bit_Offset (gnat_field), bitsizetype);
+ gnu_size = validate_size (Esize (gnat_field), gnu_field_type,
+ gnat_field, FIELD_DECL, false, true);
+
+ /* Ensure the position does not overlap with the parent subtype, if there
+ is one. This test is omitted if the parent of the tagged type has a
+ full rep clause since, in this case, component clauses are allowed to
+ overlay the space allocated for the parent type and the front-end has
+ checked that there are no overlapping components. */
+ if (Present (gnat_parent) && !Is_Fully_Repped_Tagged_Type (gnat_parent))
+ {
+ tree gnu_parent = gnat_to_gnu_type (gnat_parent);
+
+ if (TREE_CODE (TYPE_SIZE (gnu_parent)) == INTEGER_CST
+ && tree_int_cst_lt (gnu_pos, TYPE_SIZE (gnu_parent)))
+ {
+ post_error_ne_tree
+ ("offset of& must be beyond parent{, minimum allowed is ^}",
+ First_Bit (Component_Clause (gnat_field)), gnat_field,
+ TYPE_SIZE_UNIT (gnu_parent));
+ }
+ }
+
+ /* If this field needs strict alignment, check that the record is
+ sufficiently aligned and that position and size are consistent with
+ the alignment. But don't do it if we are just annotating types and
+ the field's type is tagged, since tagged types aren't fully laid out
+ in this mode. Also, note that atomic implies volatile so the inner
+ test sequences ordering is significant here. */
+ if (needs_strict_alignment
+ && !(type_annotate_only && Is_Tagged_Type (gnat_field_type)))
+ {
+ TYPE_ALIGN (gnu_record_type)
+ = MAX (TYPE_ALIGN (gnu_record_type), TYPE_ALIGN (gnu_field_type));
+
+ if (gnu_size
+ && !operand_equal_p (gnu_size, TYPE_SIZE (gnu_field_type), 0))
+ {
+ if (Is_Atomic (gnat_field) || Is_Atomic (gnat_field_type))
+ post_error_ne_tree
+ ("atomic field& must be natural size of type{ (^)}",
+ Last_Bit (Component_Clause (gnat_field)), gnat_field,
+ TYPE_SIZE (gnu_field_type));
+
+ else if (is_volatile)
+ post_error_ne_tree
+ ("volatile field& must be natural size of type{ (^)}",
+ Last_Bit (Component_Clause (gnat_field)), gnat_field,
+ TYPE_SIZE (gnu_field_type));
+
+ else if (Is_Aliased (gnat_field))
+ post_error_ne_tree
+ ("size of aliased field& must be ^ bits",
+ Last_Bit (Component_Clause (gnat_field)), gnat_field,
+ TYPE_SIZE (gnu_field_type));
+
+ else if (Strict_Alignment (gnat_field_type))
+ post_error_ne_tree
+ ("size of & with aliased or tagged components not ^ bits",
+ Last_Bit (Component_Clause (gnat_field)), gnat_field,
+ TYPE_SIZE (gnu_field_type));
+
+ else
+ gcc_unreachable ();
+
+ gnu_size = NULL_TREE;
+ }
+
+ if (!integer_zerop (size_binop
+ (TRUNC_MOD_EXPR, gnu_pos,
+ bitsize_int (TYPE_ALIGN (gnu_field_type)))))
+ {
+ if (Is_Atomic (gnat_field) || Is_Atomic (gnat_field_type))
+ post_error_ne_num
+ ("position of atomic field& must be multiple of ^ bits",
+ First_Bit (Component_Clause (gnat_field)), gnat_field,
+ TYPE_ALIGN (gnu_field_type));
+
+ else if (is_volatile)
+ post_error_ne_num
+ ("position of volatile field& must be multiple of ^ bits",
+ First_Bit (Component_Clause (gnat_field)), gnat_field,
+ TYPE_ALIGN (gnu_field_type));
+
+ else if (Is_Aliased (gnat_field))
+ post_error_ne_num
+ ("position of aliased field& must be multiple of ^ bits",
+ First_Bit (Component_Clause (gnat_field)), gnat_field,
+ TYPE_ALIGN (gnu_field_type));
+
+ else if (Strict_Alignment (gnat_field_type))
+ post_error_ne
+ ("position of & is not compatible with alignment required "
+ "by its components",
+ First_Bit (Component_Clause (gnat_field)), gnat_field);
+
+ else
+ gcc_unreachable ();
+
+ gnu_pos = NULL_TREE;
+ }
+ }
+ }
+
+ /* If the record has rep clauses and this is the tag field, make a rep
+ clause for it as well. */
+ else if (Has_Specified_Layout (Scope (gnat_field))
+ && Chars (gnat_field) == Name_uTag)
+ {
+ gnu_pos = bitsize_zero_node;
+ gnu_size = TYPE_SIZE (gnu_field_type);
+ }
+
+ else
+ {
+ gnu_pos = NULL_TREE;
+
+ /* If we are packing the record and the field is BLKmode, round the
+ size up to a byte boundary. */
+ if (packed && TYPE_MODE (gnu_field_type) == BLKmode && gnu_size)
+ gnu_size = round_up (gnu_size, BITS_PER_UNIT);
+ }
+
+ /* We need to make the size the maximum for the type if it is
+ self-referential and an unconstrained type. In that case, we can't
+ pack the field since we can't make a copy to align it. */
+ if (TREE_CODE (gnu_field_type) == RECORD_TYPE
+ && !gnu_size
+ && CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_field_type))
+ && !Is_Constrained (Underlying_Type (gnat_field_type)))
+ {
+ gnu_size = max_size (TYPE_SIZE (gnu_field_type), true);
+ packed = 0;
+ }
+
+ /* If a size is specified, adjust the field's type to it. */
+ if (gnu_size)
+ {
+ tree orig_field_type;
+
+ /* If the field's type is justified modular, we would need to remove
+ the wrapper to (better) meet the layout requirements. However we
+ can do so only if the field is not aliased to preserve the unique
+ layout and if the prescribed size is not greater than that of the
+ packed array to preserve the justification. */
+ if (!needs_strict_alignment
+ && TREE_CODE (gnu_field_type) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (gnu_field_type)
+ && tree_int_cst_compare (gnu_size, TYPE_ADA_SIZE (gnu_field_type))
+ <= 0)
+ gnu_field_type = TREE_TYPE (TYPE_FIELDS (gnu_field_type));
+
+ /* Similarly if the field's type is a misaligned integral type, but
+ there is no restriction on the size as there is no justification. */
+ if (!needs_strict_alignment
+ && TYPE_IS_PADDING_P (gnu_field_type)
+ && INTEGRAL_TYPE_P (TREE_TYPE (TYPE_FIELDS (gnu_field_type))))
+ gnu_field_type = TREE_TYPE (TYPE_FIELDS (gnu_field_type));
+
+ gnu_field_type
+ = make_type_from_size (gnu_field_type, gnu_size,
+ Has_Biased_Representation (gnat_field));
+
+ orig_field_type = gnu_field_type;
+ gnu_field_type = maybe_pad_type (gnu_field_type, gnu_size, 0, gnat_field,
+ false, false, definition, true);
+
+ /* If a padding record was made, declare it now since it will never be
+ declared otherwise. This is necessary to ensure that its subtrees
+ are properly marked. */
+ if (gnu_field_type != orig_field_type
+ && !DECL_P (TYPE_NAME (gnu_field_type)))
+ create_type_decl (TYPE_NAME (gnu_field_type), gnu_field_type, true,
+ debug_info_p, gnat_field);
+ }
+
+ /* Otherwise (or if there was an error), don't specify a position. */
+ else
+ gnu_pos = NULL_TREE;
+
+ gcc_assert (TREE_CODE (gnu_field_type) != RECORD_TYPE
+ || !TYPE_CONTAINS_TEMPLATE_P (gnu_field_type));
+
+ /* Now create the decl for the field. */
+ gnu_field
+ = create_field_decl (gnu_field_id, gnu_field_type, gnu_record_type,
+ gnu_size, gnu_pos, packed, Is_Aliased (gnat_field));
+ Sloc_to_locus (Sloc (gnat_field), &DECL_SOURCE_LOCATION (gnu_field));
+ DECL_ALIASED_P (gnu_field) = Is_Aliased (gnat_field);
+ TREE_THIS_VOLATILE (gnu_field) = TREE_SIDE_EFFECTS (gnu_field) = is_volatile;
+
+ if (Ekind (gnat_field) == E_Discriminant)
+ DECL_DISCRIMINANT_NUMBER (gnu_field)
+ = UI_To_gnu (Discriminant_Number (gnat_field), sizetype);
+
+ return gnu_field;
+}
+
+/* Return true if at least one member of COMPONENT_LIST needs strict
+ alignment. */
+
+static bool
+components_need_strict_alignment (Node_Id component_list)
+{
+ Node_Id component_decl;
+
+ for (component_decl = First_Non_Pragma (Component_Items (component_list));
+ Present (component_decl);
+ component_decl = Next_Non_Pragma (component_decl))
+ {
+ Entity_Id gnat_field = Defining_Entity (component_decl);
+
+ if (Is_Aliased (gnat_field))
+ return true;
+
+ if (Strict_Alignment (Etype (gnat_field)))
+ return true;
+ }
+
+ return false;
+}
+
+/* Return true if TYPE is a type with variable size or a padding type with a
+ field of variable size or a record that has a field with such a type. */
+
+static bool
+type_has_variable_size (tree type)
+{
+ tree field;
+
+ if (!TREE_CONSTANT (TYPE_SIZE (type)))
+ return true;
+
+ if (TYPE_IS_PADDING_P (type)
+ && !TREE_CONSTANT (DECL_SIZE (TYPE_FIELDS (type))))
+ return true;
+
+ if (!RECORD_OR_UNION_TYPE_P (type))
+ return false;
+
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
+ if (type_has_variable_size (TREE_TYPE (field)))
+ return true;
+
+ return false;
+}
+
+/* Return true if FIELD is an artificial field. */
+
+static bool
+field_is_artificial (tree field)
+{
+ /* These fields are generated by the front-end proper. */
+ if (IDENTIFIER_POINTER (DECL_NAME (field)) [0] == '_')
+ return true;
+
+ /* These fields are generated by gigi. */
+ if (DECL_INTERNAL_P (field))
+ return true;
+
+ return false;
+}
+
+/* Return true if FIELD is a non-artificial aliased field. */
+
+static bool
+field_is_aliased (tree field)
+{
+ if (field_is_artificial (field))
+ return false;
+
+ return DECL_ALIASED_P (field);
+}
+
+/* Return true if FIELD is a non-artificial field with self-referential
+ size. */
+
+static bool
+field_has_self_size (tree field)
+{
+ if (field_is_artificial (field))
+ return false;
+
+ if (DECL_SIZE (field) && TREE_CODE (DECL_SIZE (field)) == INTEGER_CST)
+ return false;
+
+ return CONTAINS_PLACEHOLDER_P (TYPE_SIZE (TREE_TYPE (field)));
+}
+
+/* Return true if FIELD is a non-artificial field with variable size. */
+
+static bool
+field_has_variable_size (tree field)
+{
+ if (field_is_artificial (field))
+ return false;
+
+ if (DECL_SIZE (field) && TREE_CODE (DECL_SIZE (field)) == INTEGER_CST)
+ return false;
+
+ return TREE_CODE (TYPE_SIZE (TREE_TYPE (field))) != INTEGER_CST;
+}
+
+/* qsort comparer for the bit positions of two record components. */
+
+static int
+compare_field_bitpos (const PTR rt1, const PTR rt2)
+{
+ const_tree const field1 = * (const_tree const *) rt1;
+ const_tree const field2 = * (const_tree const *) rt2;
+ const int ret
+ = tree_int_cst_compare (bit_position (field1), bit_position (field2));
+
+ return ret ? ret : (int) (DECL_UID (field1) - DECL_UID (field2));
+}
+
+/* Structure holding information for a given variant. */
+typedef struct vinfo
+{
+ /* The record type of the variant. */
+ tree type;
+
+ /* The name of the variant. */
+ tree name;
+
+ /* The qualifier of the variant. */
+ tree qual;
+
+ /* Whether the variant has a rep clause. */
+ bool has_rep;
+
+ /* Whether the variant is packed. */
+ bool packed;
+
+} vinfo_t;
+
+/* Translate and chain the GNAT_COMPONENT_LIST to the GNU_FIELD_LIST, set the
+ result as the field list of GNU_RECORD_TYPE and finish it up. Return true
+ if GNU_RECORD_TYPE has a rep clause which affects the layout (see below).
+ When called from gnat_to_gnu_entity during the processing of a record type
+ definition, the GCC node for the parent, if any, will be the single field
+ of GNU_RECORD_TYPE and the GCC nodes for the discriminants will be on the
+ GNU_FIELD_LIST. The other calls to this function are recursive calls for
+ the component list of a variant and, in this case, GNU_FIELD_LIST is empty.
+
+ PACKED is 1 if this is for a packed record, -1 if this is for a record
+ with Component_Alignment of Storage_Unit, -2 if this is for a record
+ with a specified alignment.
+
+ DEFINITION is true if we are defining this record type.
+
+ CANCEL_ALIGNMENT is true if the alignment should be zeroed before laying
+ out the record. This means the alignment only serves to force fields to
+ be bitfields, but not to require the record to be that aligned. This is
+ used for variants.
+
+ ALL_REP is true if a rep clause is present for all the fields.
+
+ UNCHECKED_UNION is true if we are building this type for a record with a
+ Pragma Unchecked_Union.
+
+ ARTIFICIAL is true if this is a type that was generated by the compiler.
+
+ DEBUG_INFO is true if we need to write debug information about the type.
+
+ MAYBE_UNUSED is true if this type may be unused in the end; this doesn't
+ mean that its contents may be unused as well, only the container itself.
+
+ REORDER is true if we are permitted to reorder components of this type.
+
+ FIRST_FREE_POS, if nonzero, is the first (lowest) free field position in
+ the outer record type down to this variant level. It is nonzero only if
+ all the fields down to this level have a rep clause and ALL_REP is false.
+
+ P_GNU_REP_LIST, if nonzero, is a pointer to a list to which each field
+ with a rep clause is to be added; in this case, that is all that should
+ be done with such fields and the return value will be false. */
+
+static bool
+components_to_record (tree gnu_record_type, Node_Id gnat_component_list,
+ tree gnu_field_list, int packed, bool definition,
+ bool cancel_alignment, bool all_rep,
+ bool unchecked_union, bool artificial,
+ bool debug_info, bool maybe_unused, bool reorder,
+ tree first_free_pos, tree *p_gnu_rep_list)
+{
+ bool all_rep_and_size = all_rep && TYPE_SIZE (gnu_record_type);
+ bool variants_have_rep = all_rep;
+ bool layout_with_rep = false;
+ bool has_self_field = false;
+ bool has_aliased_after_self_field = false;
+ Node_Id component_decl, variant_part;
+ tree gnu_field, gnu_next, gnu_last;
+ tree gnu_variant_part = NULL_TREE;
+ tree gnu_rep_list = NULL_TREE;
+ tree gnu_var_list = NULL_TREE;
+ tree gnu_self_list = NULL_TREE;
+ tree gnu_zero_list = NULL_TREE;
+
+ /* For each component referenced in a component declaration create a GCC
+ field and add it to the list, skipping pragmas in the GNAT list. */
+ gnu_last = tree_last (gnu_field_list);
+ if (Present (Component_Items (gnat_component_list)))
+ for (component_decl
+ = First_Non_Pragma (Component_Items (gnat_component_list));
+ Present (component_decl);
+ component_decl = Next_Non_Pragma (component_decl))
+ {
+ Entity_Id gnat_field = Defining_Entity (component_decl);
+ Name_Id gnat_name = Chars (gnat_field);
+
+ /* If present, the _Parent field must have been created as the single
+ field of the record type. Put it before any other fields. */
+ if (gnat_name == Name_uParent)
+ {
+ gnu_field = TYPE_FIELDS (gnu_record_type);
+ gnu_field_list = chainon (gnu_field_list, gnu_field);
+ }
+ else
+ {
+ gnu_field = gnat_to_gnu_field (gnat_field, gnu_record_type, packed,
+ definition, debug_info);
+
+ /* If this is the _Tag field, put it before any other fields. */
+ if (gnat_name == Name_uTag)
+ gnu_field_list = chainon (gnu_field_list, gnu_field);
+
+ /* If this is the _Controller field, put it before the other
+ fields except for the _Tag or _Parent field. */
+ else if (gnat_name == Name_uController && gnu_last)
+ {
+ DECL_CHAIN (gnu_field) = DECL_CHAIN (gnu_last);
+ DECL_CHAIN (gnu_last) = gnu_field;
+ }
+
+ /* If this is a regular field, put it after the other fields. */
+ else
+ {
+ DECL_CHAIN (gnu_field) = gnu_field_list;
+ gnu_field_list = gnu_field;
+ if (!gnu_last)
+ gnu_last = gnu_field;
+
+ /* And record information for the final layout. */
+ if (field_has_self_size (gnu_field))
+ has_self_field = true;
+ else if (has_self_field && field_is_aliased (gnu_field))
+ has_aliased_after_self_field = true;
+ }
+ }
+
+ save_gnu_tree (gnat_field, gnu_field, false);
+ }
+
+ /* At the end of the component list there may be a variant part. */
+ variant_part = Variant_Part (gnat_component_list);
+
+ /* We create a QUAL_UNION_TYPE for the variant part since the variants are
+ mutually exclusive and should go in the same memory. To do this we need
+ to treat each variant as a record whose elements are created from the
+ component list for the variant. So here we create the records from the
+ lists for the variants and put them all into the QUAL_UNION_TYPE.
+ If this is an Unchecked_Union, we make a UNION_TYPE instead or
+ use GNU_RECORD_TYPE if there are no fields so far. */
+ if (Present (variant_part))
+ {
+ Node_Id gnat_discr = Name (variant_part), variant;
+ tree gnu_discr = gnat_to_gnu (gnat_discr);
+ tree gnu_name = TYPE_NAME (gnu_record_type);
+ tree gnu_var_name
+ = concat_name (get_identifier (Get_Name_String (Chars (gnat_discr))),
+ "XVN");
+ tree gnu_union_type, gnu_union_name;
+ tree this_first_free_pos, gnu_variant_list = NULL_TREE;
+ bool union_field_needs_strict_alignment = false;
+ auto_vec <vinfo_t, 16> variant_types;
+ vinfo_t *gnu_variant;
+ unsigned int variants_align = 0;
+ unsigned int i;
+
+ if (TREE_CODE (gnu_name) == TYPE_DECL)
+ gnu_name = DECL_NAME (gnu_name);
+
+ gnu_union_name
+ = concat_name (gnu_name, IDENTIFIER_POINTER (gnu_var_name));
+
+ /* Reuse the enclosing union if this is an Unchecked_Union whose fields
+ are all in the variant part, to match the layout of C unions. There
+ is an associated check below. */
+ if (TREE_CODE (gnu_record_type) == UNION_TYPE)
+ gnu_union_type = gnu_record_type;
+ else
+ {
+ gnu_union_type
+ = make_node (unchecked_union ? UNION_TYPE : QUAL_UNION_TYPE);
+
+ TYPE_NAME (gnu_union_type) = gnu_union_name;
+ TYPE_ALIGN (gnu_union_type) = 0;
+ TYPE_PACKED (gnu_union_type) = TYPE_PACKED (gnu_record_type);
+ }
+
+ /* If all the fields down to this level have a rep clause, find out
+ whether all the fields at this level also have one. If so, then
+ compute the new first free position to be passed downward. */
+ this_first_free_pos = first_free_pos;
+ if (this_first_free_pos)
+ {
+ for (gnu_field = gnu_field_list;
+ gnu_field;
+ gnu_field = DECL_CHAIN (gnu_field))
+ if (DECL_FIELD_OFFSET (gnu_field))
+ {
+ tree pos = bit_position (gnu_field);
+ if (!tree_int_cst_lt (pos, this_first_free_pos))
+ this_first_free_pos
+ = size_binop (PLUS_EXPR, pos, DECL_SIZE (gnu_field));
+ }
+ else
+ {
+ this_first_free_pos = NULL_TREE;
+ break;
+ }
+ }
+
+ /* We build the variants in two passes. The bulk of the work is done in
+ the first pass, that is to say translating the GNAT nodes, building
+ the container types and computing the associated properties. However
+ we cannot finish up the container types during this pass because we
+ don't know where the variant part will be placed until the end. */
+ for (variant = First_Non_Pragma (Variants (variant_part));
+ Present (variant);
+ variant = Next_Non_Pragma (variant))
+ {
+ tree gnu_variant_type = make_node (RECORD_TYPE);
+ tree gnu_inner_name, gnu_qual;
+ bool has_rep;
+ int field_packed;
+ vinfo_t vinfo;
+
+ Get_Variant_Encoding (variant);
+ gnu_inner_name = get_identifier_with_length (Name_Buffer, Name_Len);
+ TYPE_NAME (gnu_variant_type)
+ = concat_name (gnu_union_name,
+ IDENTIFIER_POINTER (gnu_inner_name));
+
+ /* Set the alignment of the inner type in case we need to make
+ inner objects into bitfields, but then clear it out so the
+ record actually gets only the alignment required. */
+ TYPE_ALIGN (gnu_variant_type) = TYPE_ALIGN (gnu_record_type);
+ TYPE_PACKED (gnu_variant_type) = TYPE_PACKED (gnu_record_type);
+
+ /* Similarly, if the outer record has a size specified and all
+ the fields have a rep clause, we can propagate the size. */
+ if (all_rep_and_size)
+ {
+ TYPE_SIZE (gnu_variant_type) = TYPE_SIZE (gnu_record_type);
+ TYPE_SIZE_UNIT (gnu_variant_type)
+ = TYPE_SIZE_UNIT (gnu_record_type);
+ }
+
+ /* Add the fields into the record type for the variant. Note that
+ we aren't sure to really use it at this point, see below. */
+ has_rep
+ = components_to_record (gnu_variant_type, Component_List (variant),
+ NULL_TREE, packed, definition,
+ !all_rep_and_size, all_rep,
+ unchecked_union,
+ true, debug_info, true, reorder,
+ this_first_free_pos,
+ all_rep || this_first_free_pos
+ ? NULL : &gnu_rep_list);
+
+ /* Translate the qualifier and annotate the GNAT node. */
+ gnu_qual = choices_to_gnu (gnu_discr, Discrete_Choices (variant));
+ Set_Present_Expr (variant, annotate_value (gnu_qual));
+
+ /* Deal with packedness like in gnat_to_gnu_field. */
+ if (components_need_strict_alignment (Component_List (variant)))
+ {
+ field_packed = 0;
+ union_field_needs_strict_alignment = true;
+ }
+ else
+ field_packed
+ = adjust_packed (gnu_variant_type, gnu_record_type, packed);
+
+ /* Push this variant onto the stack for the second pass. */
+ vinfo.type = gnu_variant_type;
+ vinfo.name = gnu_inner_name;
+ vinfo.qual = gnu_qual;
+ vinfo.has_rep = has_rep;
+ vinfo.packed = field_packed;
+ variant_types.safe_push (vinfo);
+
+ /* Compute the global properties that will determine the placement of
+ the variant part. */
+ variants_have_rep |= has_rep;
+ if (!field_packed && TYPE_ALIGN (gnu_variant_type) > variants_align)
+ variants_align = TYPE_ALIGN (gnu_variant_type);
+ }
+
+ /* Round up the first free position to the alignment of the variant part
+ for the variants without rep clause. This will guarantee a consistent
+ layout independently of the placement of the variant part. */
+ if (variants_have_rep && variants_align > 0 && this_first_free_pos)
+ this_first_free_pos = round_up (this_first_free_pos, variants_align);
+
+ /* In the second pass, the container types are adjusted if necessary and
+ finished up, then the corresponding fields of the variant part are
+ built with their qualifier, unless this is an unchecked union. */
+ FOR_EACH_VEC_ELT (variant_types, i, gnu_variant)
+ {
+ tree gnu_variant_type = gnu_variant->type;
+ tree gnu_field_list = TYPE_FIELDS (gnu_variant_type);
+
+ /* If this is an Unchecked_Union whose fields are all in the variant
+ part and we have a single field with no representation clause or
+ placed at offset zero, use the field directly to match the layout
+ of C unions. */
+ if (TREE_CODE (gnu_record_type) == UNION_TYPE
+ && gnu_field_list
+ && !DECL_CHAIN (gnu_field_list)
+ && (!DECL_FIELD_OFFSET (gnu_field_list)
+ || integer_zerop (bit_position (gnu_field_list))))
+ {
+ gnu_field = gnu_field_list;
+ DECL_CONTEXT (gnu_field) = gnu_record_type;
+ }
+ else
+ {
+ /* Finalize the variant type now. We used to throw away empty
+ record types but we no longer do that because we need them to
+ generate complete debug info for the variant; otherwise, the
+ union type definition will be lacking the fields associated
+ with these empty variants. */
+ if (gnu_field_list && variants_have_rep && !gnu_variant->has_rep)
+ {
+ /* The variant part will be at offset 0 so we need to ensure
+ that the fields are laid out starting from the first free
+ position at this level. */
+ tree gnu_rep_type = make_node (RECORD_TYPE);
+ tree gnu_rep_part;
+ finish_record_type (gnu_rep_type, NULL_TREE, 0, debug_info);
+ gnu_rep_part
+ = create_rep_part (gnu_rep_type, gnu_variant_type,
+ this_first_free_pos);
+ DECL_CHAIN (gnu_rep_part) = gnu_field_list;
+ gnu_field_list = gnu_rep_part;
+ finish_record_type (gnu_variant_type, gnu_field_list, 0,
+ false);
+ }
+
+ if (debug_info)
+ rest_of_record_type_compilation (gnu_variant_type);
+ create_type_decl (TYPE_NAME (gnu_variant_type), gnu_variant_type,
+ true, debug_info, gnat_component_list);
+
+ gnu_field
+ = create_field_decl (gnu_variant->name, gnu_variant_type,
+ gnu_union_type,
+ all_rep_and_size
+ ? TYPE_SIZE (gnu_variant_type) : 0,
+ variants_have_rep ? bitsize_zero_node : 0,
+ gnu_variant->packed, 0);
+
+ DECL_INTERNAL_P (gnu_field) = 1;
+
+ if (!unchecked_union)
+ DECL_QUALIFIER (gnu_field) = gnu_variant->qual;
+ }
+
+ DECL_CHAIN (gnu_field) = gnu_variant_list;
+ gnu_variant_list = gnu_field;
+ }
+
+ /* Only make the QUAL_UNION_TYPE if there are non-empty variants. */
+ if (gnu_variant_list)
+ {
+ int union_field_packed;
+
+ if (all_rep_and_size)
+ {
+ TYPE_SIZE (gnu_union_type) = TYPE_SIZE (gnu_record_type);
+ TYPE_SIZE_UNIT (gnu_union_type)
+ = TYPE_SIZE_UNIT (gnu_record_type);
+ }
+
+ finish_record_type (gnu_union_type, nreverse (gnu_variant_list),
+ all_rep_and_size ? 1 : 0, debug_info);
+
+ /* If GNU_UNION_TYPE is our record type, it means we must have an
+ Unchecked_Union with no fields. Verify that and, if so, just
+ return. */
+ if (gnu_union_type == gnu_record_type)
+ {
+ gcc_assert (unchecked_union
+ && !gnu_field_list
+ && !gnu_rep_list);
+ return variants_have_rep;
+ }
+
+ create_type_decl (TYPE_NAME (gnu_union_type), gnu_union_type, true,
+ debug_info, gnat_component_list);
+
+ /* Deal with packedness like in gnat_to_gnu_field. */
+ if (union_field_needs_strict_alignment)
+ union_field_packed = 0;
+ else
+ union_field_packed
+ = adjust_packed (gnu_union_type, gnu_record_type, packed);
+
+ gnu_variant_part
+ = create_field_decl (gnu_var_name, gnu_union_type, gnu_record_type,
+ all_rep_and_size
+ ? TYPE_SIZE (gnu_union_type) : 0,
+ variants_have_rep ? bitsize_zero_node : 0,
+ union_field_packed, 0);
+
+ DECL_INTERNAL_P (gnu_variant_part) = 1;
+ }
+ }
+
+ /* Scan GNU_FIELD_LIST and see if any fields have rep clauses and, if we are
+ permitted to reorder components, self-referential sizes or variable sizes.
+ If they do, pull them out and put them onto the appropriate list. We have
+ to do this in a separate pass since we want to handle the discriminants
+ but can't play with them until we've used them in debugging data above.
+
+ Similarly, pull out the fields with zero size and no rep clause, as they
+ would otherwise modify the layout and thus very likely run afoul of the
+ Ada semantics, which are different from those of C here.
+
+ ??? If we reorder them, debugging information will be wrong but there is
+ nothing that can be done about this at the moment. */
+ gnu_last = NULL_TREE;
+
+#define MOVE_FROM_FIELD_LIST_TO(LIST) \
+ do { \
+ if (gnu_last) \
+ DECL_CHAIN (gnu_last) = gnu_next; \
+ else \
+ gnu_field_list = gnu_next; \
+ \
+ DECL_CHAIN (gnu_field) = (LIST); \
+ (LIST) = gnu_field; \
+ } while (0)
+
+ for (gnu_field = gnu_field_list; gnu_field; gnu_field = gnu_next)
+ {
+ gnu_next = DECL_CHAIN (gnu_field);
+
+ if (DECL_FIELD_OFFSET (gnu_field))
+ {
+ MOVE_FROM_FIELD_LIST_TO (gnu_rep_list);
+ continue;
+ }
+
+ if ((reorder || has_aliased_after_self_field)
+ && field_has_self_size (gnu_field))
+ {
+ MOVE_FROM_FIELD_LIST_TO (gnu_self_list);
+ continue;
+ }
+
+ if (reorder && field_has_variable_size (gnu_field))
+ {
+ MOVE_FROM_FIELD_LIST_TO (gnu_var_list);
+ continue;
+ }
+
+ if (DECL_SIZE (gnu_field) && integer_zerop (DECL_SIZE (gnu_field)))
+ {
+ DECL_FIELD_OFFSET (gnu_field) = size_zero_node;
+ SET_DECL_OFFSET_ALIGN (gnu_field, BIGGEST_ALIGNMENT);
+ DECL_FIELD_BIT_OFFSET (gnu_field) = bitsize_zero_node;
+ if (field_is_aliased (gnu_field))
+ TYPE_ALIGN (gnu_record_type)
+ = MAX (TYPE_ALIGN (gnu_record_type),
+ TYPE_ALIGN (TREE_TYPE (gnu_field)));
+ MOVE_FROM_FIELD_LIST_TO (gnu_zero_list);
+ continue;
+ }
+
+ gnu_last = gnu_field;
+ }
+
+#undef MOVE_FROM_FIELD_LIST_TO
+
+ gnu_field_list = nreverse (gnu_field_list);
+
+ /* If permitted, we reorder the fields as follows:
+
+ 1) all fixed length fields,
+ 2) all fields whose length doesn't depend on discriminants,
+ 3) all fields whose length depends on discriminants,
+ 4) the variant part,
+
+ within the record and within each variant recursively. */
+ if (reorder)
+ gnu_field_list
+ = chainon (gnu_field_list, chainon (gnu_var_list, gnu_self_list));
+
+ /* Otherwise, if there is an aliased field placed after a field whose length
+ depends on discriminants, we put all the fields of the latter sort, last.
+ We need to do this in case an object of this record type is mutable. */
+ else if (has_aliased_after_self_field)
+ gnu_field_list = chainon (gnu_field_list, gnu_self_list);
+
+ /* If P_REP_LIST is nonzero, this means that we are asked to move the fields
+ in our REP list to the previous level because this level needs them in
+ order to do a correct layout, i.e. avoid having overlapping fields. */
+ if (p_gnu_rep_list && gnu_rep_list)
+ *p_gnu_rep_list = chainon (*p_gnu_rep_list, gnu_rep_list);
+
+ /* Otherwise, sort the fields by bit position and put them into their own
+ record, before the others, if we also have fields without rep clause. */
+ else if (gnu_rep_list)
+ {
+ tree gnu_rep_type, gnu_rep_part;
+ int i, len = list_length (gnu_rep_list);
+ tree *gnu_arr = XALLOCAVEC (tree, len);
+
+ /* If all the fields have a rep clause, we can do a flat layout. */
+ layout_with_rep = !gnu_field_list
+ && (!gnu_variant_part || variants_have_rep);
+ gnu_rep_type
+ = layout_with_rep ? gnu_record_type : make_node (RECORD_TYPE);
+
+ for (gnu_field = gnu_rep_list, i = 0;
+ gnu_field;
+ gnu_field = DECL_CHAIN (gnu_field), i++)
+ gnu_arr[i] = gnu_field;
+
+ qsort (gnu_arr, len, sizeof (tree), compare_field_bitpos);
+
+ /* Put the fields in the list in order of increasing position, which
+ means we start from the end. */
+ gnu_rep_list = NULL_TREE;
+ for (i = len - 1; i >= 0; i--)
+ {
+ DECL_CHAIN (gnu_arr[i]) = gnu_rep_list;
+ gnu_rep_list = gnu_arr[i];
+ DECL_CONTEXT (gnu_arr[i]) = gnu_rep_type;
+ }
+
+ if (layout_with_rep)
+ gnu_field_list = gnu_rep_list;
+ else
+ {
+ finish_record_type (gnu_rep_type, gnu_rep_list, 1, debug_info);
+
+ /* If FIRST_FREE_POS is nonzero, we need to ensure that the fields
+ without rep clause are laid out starting from this position.
+ Therefore, we force it as a minimal size on the REP part. */
+ gnu_rep_part
+ = create_rep_part (gnu_rep_type, gnu_record_type, first_free_pos);
+
+ /* Chain the REP part at the beginning of the field list. */
+ DECL_CHAIN (gnu_rep_part) = gnu_field_list;
+ gnu_field_list = gnu_rep_part;
+ }
+ }
+
+ /* Chain the variant part at the end of the field list. */
+ if (gnu_variant_part)
+ gnu_field_list = chainon (gnu_field_list, gnu_variant_part);
+
+ if (cancel_alignment)
+ TYPE_ALIGN (gnu_record_type) = 0;
+
+ TYPE_ARTIFICIAL (gnu_record_type) = artificial;
+
+ finish_record_type (gnu_record_type, gnu_field_list, layout_with_rep ? 1 : 0,
+ debug_info && !maybe_unused);
+
+ /* Chain the fields with zero size at the beginning of the field list. */
+ if (gnu_zero_list)
+ TYPE_FIELDS (gnu_record_type)
+ = chainon (gnu_zero_list, TYPE_FIELDS (gnu_record_type));
+
+ return (gnu_rep_list && !p_gnu_rep_list) || variants_have_rep;
+}
+
+/* Given GNU_SIZE, a GCC tree representing a size, return a Uint to be
+ placed into an Esize, Component_Bit_Offset, or Component_Size value
+ in the GNAT tree. */
+
+static Uint
+annotate_value (tree gnu_size)
+{
+ TCode tcode;
+ Node_Ref_Or_Val ops[3], ret, pre_op1 = No_Uint;
+ struct tree_int_map in;
+ int i;
+
+ /* See if we've already saved the value for this node. */
+ if (EXPR_P (gnu_size))
+ {
+ struct tree_int_map *e;
+
+ if (!annotate_value_cache)
+ annotate_value_cache = htab_create_ggc (512, tree_int_map_hash,
+ tree_int_map_eq, 0);
+ in.base.from = gnu_size;
+ e = (struct tree_int_map *)
+ htab_find (annotate_value_cache, &in);
+
+ if (e)
+ return (Node_Ref_Or_Val) e->to;
+ }
+ else
+ in.base.from = NULL_TREE;
+
+ /* If we do not return inside this switch, TCODE will be set to the
+ code to use for a Create_Node operand and LEN (set above) will be
+ the number of recursive calls for us to make. */
+
+ switch (TREE_CODE (gnu_size))
+ {
+ case INTEGER_CST:
+ return TREE_OVERFLOW (gnu_size) ? No_Uint : UI_From_gnu (gnu_size);
+
+ case COMPONENT_REF:
+ /* The only case we handle here is a simple discriminant reference. */
+ if (DECL_DISCRIMINANT_NUMBER (TREE_OPERAND (gnu_size, 1)))
+ {
+ tree n = DECL_DISCRIMINANT_NUMBER (TREE_OPERAND (gnu_size, 1));
+
+ /* Climb up the chain of successive extensions, if any. */
+ while (TREE_CODE (TREE_OPERAND (gnu_size, 0)) == COMPONENT_REF
+ && DECL_NAME (TREE_OPERAND (TREE_OPERAND (gnu_size, 0), 1))
+ == parent_name_id)
+ gnu_size = TREE_OPERAND (gnu_size, 0);
+
+ if (TREE_CODE (TREE_OPERAND (gnu_size, 0)) == PLACEHOLDER_EXPR)
+ return
+ Create_Node (Discrim_Val, annotate_value (n), No_Uint, No_Uint);
+ }
+
+ return No_Uint;
+
+ CASE_CONVERT: case NON_LVALUE_EXPR:
+ return annotate_value (TREE_OPERAND (gnu_size, 0));
+
+ /* Now just list the operations we handle. */
+ case COND_EXPR: tcode = Cond_Expr; break;
+ case PLUS_EXPR: tcode = Plus_Expr; break;
+ case MINUS_EXPR: tcode = Minus_Expr; break;
+ case MULT_EXPR: tcode = Mult_Expr; break;
+ case TRUNC_DIV_EXPR: tcode = Trunc_Div_Expr; break;
+ case CEIL_DIV_EXPR: tcode = Ceil_Div_Expr; break;
+ case FLOOR_DIV_EXPR: tcode = Floor_Div_Expr; break;
+ case TRUNC_MOD_EXPR: tcode = Trunc_Mod_Expr; break;
+ case CEIL_MOD_EXPR: tcode = Ceil_Mod_Expr; break;
+ case FLOOR_MOD_EXPR: tcode = Floor_Mod_Expr; break;
+ case EXACT_DIV_EXPR: tcode = Exact_Div_Expr; break;
+ case NEGATE_EXPR: tcode = Negate_Expr; break;
+ case MIN_EXPR: tcode = Min_Expr; break;
+ case MAX_EXPR: tcode = Max_Expr; break;
+ case ABS_EXPR: tcode = Abs_Expr; break;
+ case TRUTH_ANDIF_EXPR: tcode = Truth_Andif_Expr; break;
+ case TRUTH_ORIF_EXPR: tcode = Truth_Orif_Expr; break;
+ case TRUTH_AND_EXPR: tcode = Truth_And_Expr; break;
+ case TRUTH_OR_EXPR: tcode = Truth_Or_Expr; break;
+ case TRUTH_XOR_EXPR: tcode = Truth_Xor_Expr; break;
+ case TRUTH_NOT_EXPR: tcode = Truth_Not_Expr; break;
+ case LT_EXPR: tcode = Lt_Expr; break;
+ case LE_EXPR: tcode = Le_Expr; break;
+ case GT_EXPR: tcode = Gt_Expr; break;
+ case GE_EXPR: tcode = Ge_Expr; break;
+ case EQ_EXPR: tcode = Eq_Expr; break;
+ case NE_EXPR: tcode = Ne_Expr; break;
+
+ case BIT_AND_EXPR:
+ tcode = Bit_And_Expr;
+ /* For negative values, build NEGATE_EXPR of the opposite. Such values
+ appear in expressions containing aligning patterns. Note that, since
+ sizetype is unsigned, we have to jump through some hoops. */
+ if (TREE_CODE (TREE_OPERAND (gnu_size, 1)) == INTEGER_CST)
+ {
+ tree op1 = TREE_OPERAND (gnu_size, 1);
+ double_int signed_op1
+ = tree_to_double_int (op1).sext (TYPE_PRECISION (sizetype));
+ if (signed_op1.is_negative ())
+ {
+ op1 = double_int_to_tree (sizetype, -signed_op1);
+ pre_op1 = annotate_value (build1 (NEGATE_EXPR, sizetype, op1));
+ }
+ }
+ break;
+
+ case CALL_EXPR:
+ {
+ tree t = maybe_inline_call_in_expr (gnu_size);
+ if (t)
+ return annotate_value (t);
+ }
+
+ /* Fall through... */
+
+ default:
+ return No_Uint;
+ }
+
+ /* Now get each of the operands that's relevant for this code. If any
+ cannot be expressed as a repinfo node, say we can't. */
+ for (i = 0; i < 3; i++)
+ ops[i] = No_Uint;
+
+ for (i = 0; i < TREE_CODE_LENGTH (TREE_CODE (gnu_size)); i++)
+ {
+ if (i == 1 && pre_op1 != No_Uint)
+ ops[i] = pre_op1;
+ else
+ ops[i] = annotate_value (TREE_OPERAND (gnu_size, i));
+ if (ops[i] == No_Uint)
+ return No_Uint;
+ }
+
+ ret = Create_Node (tcode, ops[0], ops[1], ops[2]);
+
+ /* Save the result in the cache. */
+ if (in.base.from)
+ {
+ struct tree_int_map **h;
+ /* We can't assume the hash table data hasn't moved since the
+ initial look up, so we have to search again. Allocating and
+ inserting an entry at that point would be an alternative, but
+ then we'd better discard the entry if we decided not to cache
+ it. */
+ h = (struct tree_int_map **)
+ htab_find_slot (annotate_value_cache, &in, INSERT);
+ gcc_assert (!*h);
+ *h = ggc_alloc_tree_int_map ();
+ (*h)->base.from = gnu_size;
+ (*h)->to = ret;
+ }
+
+ return ret;
+}
+
+/* Given GNAT_ENTITY, an object (constant, variable, parameter, exception)
+ and GNU_TYPE, its corresponding GCC type, set Esize and Alignment to the
+ size and alignment used by Gigi. Prefer SIZE over TYPE_SIZE if non-null.
+ BY_REF is true if the object is used by reference. */
+
+void
+annotate_object (Entity_Id gnat_entity, tree gnu_type, tree size, bool by_ref)
+{
+ if (by_ref)
+ {
+ if (TYPE_IS_FAT_POINTER_P (gnu_type))
+ gnu_type = TYPE_UNCONSTRAINED_ARRAY (gnu_type);
+ else
+ gnu_type = TREE_TYPE (gnu_type);
+ }
+
+ if (Unknown_Esize (gnat_entity))
+ {
+ if (TREE_CODE (gnu_type) == RECORD_TYPE
+ && TYPE_CONTAINS_TEMPLATE_P (gnu_type))
+ size = TYPE_SIZE (TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_type))));
+ else if (!size)
+ size = TYPE_SIZE (gnu_type);
+
+ if (size)
+ Set_Esize (gnat_entity, annotate_value (size));
+ }
+
+ if (Unknown_Alignment (gnat_entity))
+ Set_Alignment (gnat_entity,
+ UI_From_Int (TYPE_ALIGN (gnu_type) / BITS_PER_UNIT));
+}
+
+/* Return first element of field list whose TREE_PURPOSE is the same as ELEM.
+ Return NULL_TREE if there is no such element in the list. */
+
+static tree
+purpose_member_field (const_tree elem, tree list)
+{
+ while (list)
+ {
+ tree field = TREE_PURPOSE (list);
+ if (SAME_FIELD_P (field, elem))
+ return list;
+ list = TREE_CHAIN (list);
+ }
+ return NULL_TREE;
+}
+
+/* Given GNAT_ENTITY, a record type, and GNU_TYPE, its corresponding GCC type,
+ set Component_Bit_Offset and Esize of the components to the position and
+ size used by Gigi. */
+
+static void
+annotate_rep (Entity_Id gnat_entity, tree gnu_type)
+{
+ Entity_Id gnat_field;
+ tree gnu_list;
+
+ /* We operate by first making a list of all fields and their position (we
+ can get the size easily) and then update all the sizes in the tree. */
+ gnu_list
+ = build_position_list (gnu_type, false, size_zero_node, bitsize_zero_node,
+ BIGGEST_ALIGNMENT, NULL_TREE);
+
+ for (gnat_field = First_Entity (gnat_entity);
+ Present (gnat_field);
+ gnat_field = Next_Entity (gnat_field))
+ if (Ekind (gnat_field) == E_Component
+ || (Ekind (gnat_field) == E_Discriminant
+ && !Is_Unchecked_Union (Scope (gnat_field))))
+ {
+ tree t = purpose_member_field (gnat_to_gnu_field_decl (gnat_field),
+ gnu_list);
+ if (t)
+ {
+ tree parent_offset;
+
+ /* If we are just annotating types and the type is tagged, the tag
+ and the parent components are not generated by the front-end so
+ we need to add the appropriate offset to each component without
+ representation clause. */
+ if (type_annotate_only
+ && Is_Tagged_Type (gnat_entity)
+ && No (Component_Clause (gnat_field)))
+ {
+ /* For a component appearing in the current extension, the
+ offset is the size of the parent. */
+ if (Is_Derived_Type (gnat_entity)
+ && Original_Record_Component (gnat_field) == gnat_field)
+ parent_offset
+ = UI_To_gnu (Esize (Etype (Base_Type (gnat_entity))),
+ bitsizetype);
+ else
+ parent_offset = bitsize_int (POINTER_SIZE);
+
+ if (TYPE_FIELDS (gnu_type))
+ parent_offset
+ = round_up (parent_offset,
+ DECL_ALIGN (TYPE_FIELDS (gnu_type)));
+ }
+ else
+ parent_offset = bitsize_zero_node;
+
+ Set_Component_Bit_Offset
+ (gnat_field,
+ annotate_value
+ (size_binop (PLUS_EXPR,
+ bit_from_pos (TREE_VEC_ELT (TREE_VALUE (t), 0),
+ TREE_VEC_ELT (TREE_VALUE (t), 2)),
+ parent_offset)));
+
+ Set_Esize (gnat_field,
+ annotate_value (DECL_SIZE (TREE_PURPOSE (t))));
+ }
+ else if (Is_Tagged_Type (gnat_entity) && Is_Derived_Type (gnat_entity))
+ {
+ /* If there is no entry, this is an inherited component whose
+ position is the same as in the parent type. */
+ Set_Component_Bit_Offset
+ (gnat_field,
+ Component_Bit_Offset (Original_Record_Component (gnat_field)));
+
+ Set_Esize (gnat_field,
+ Esize (Original_Record_Component (gnat_field)));
+ }
+ }
+}
+
+/* Scan all fields in GNU_TYPE and return a TREE_LIST where TREE_PURPOSE is
+ the FIELD_DECL and TREE_VALUE a TREE_VEC containing the byte position, the
+ value to be placed into DECL_OFFSET_ALIGN and the bit position. The list
+ of fields is flattened, except for variant parts if DO_NOT_FLATTEN_VARIANT
+ is set to true. GNU_POS is to be added to the position, GNU_BITPOS to the
+ bit position, OFFSET_ALIGN is the present offset alignment. GNU_LIST is a
+ pre-existing list to be chained to the newly created entries. */
+
+static tree
+build_position_list (tree gnu_type, bool do_not_flatten_variant, tree gnu_pos,
+ tree gnu_bitpos, unsigned int offset_align, tree gnu_list)
+{
+ tree gnu_field;
+
+ for (gnu_field = TYPE_FIELDS (gnu_type);
+ gnu_field;
+ gnu_field = DECL_CHAIN (gnu_field))
+ {
+ tree gnu_our_bitpos = size_binop (PLUS_EXPR, gnu_bitpos,
+ DECL_FIELD_BIT_OFFSET (gnu_field));
+ tree gnu_our_offset = size_binop (PLUS_EXPR, gnu_pos,
+ DECL_FIELD_OFFSET (gnu_field));
+ unsigned int our_offset_align
+ = MIN (offset_align, DECL_OFFSET_ALIGN (gnu_field));
+ tree v = make_tree_vec (3);
+
+ TREE_VEC_ELT (v, 0) = gnu_our_offset;
+ TREE_VEC_ELT (v, 1) = size_int (our_offset_align);
+ TREE_VEC_ELT (v, 2) = gnu_our_bitpos;
+ gnu_list = tree_cons (gnu_field, v, gnu_list);
+
+ /* Recurse on internal fields, flattening the nested fields except for
+ those in the variant part, if requested. */
+ if (DECL_INTERNAL_P (gnu_field))
+ {
+ tree gnu_field_type = TREE_TYPE (gnu_field);
+ if (do_not_flatten_variant
+ && TREE_CODE (gnu_field_type) == QUAL_UNION_TYPE)
+ gnu_list
+ = build_position_list (gnu_field_type, do_not_flatten_variant,
+ size_zero_node, bitsize_zero_node,
+ BIGGEST_ALIGNMENT, gnu_list);
+ else
+ gnu_list
+ = build_position_list (gnu_field_type, do_not_flatten_variant,
+ gnu_our_offset, gnu_our_bitpos,
+ our_offset_align, gnu_list);
+ }
+ }
+
+ return gnu_list;
+}
+
+/* Return a list describing the substitutions needed to reflect the
+ discriminant substitutions from GNAT_TYPE to GNAT_SUBTYPE. They can
+ be in any order. The values in an element of the list are in the form
+ of operands to SUBSTITUTE_IN_EXPR. DEFINITION is true if this is for
+ a definition of GNAT_SUBTYPE. */
+
+static vec<subst_pair>
+build_subst_list (Entity_Id gnat_subtype, Entity_Id gnat_type, bool definition)
+{
+ vec<subst_pair> gnu_list = vNULL;
+ Entity_Id gnat_discrim;
+ Node_Id gnat_constr;
+
+ for (gnat_discrim = First_Stored_Discriminant (gnat_type),
+ gnat_constr = First_Elmt (Stored_Constraint (gnat_subtype));
+ Present (gnat_discrim);
+ gnat_discrim = Next_Stored_Discriminant (gnat_discrim),
+ gnat_constr = Next_Elmt (gnat_constr))
+ /* Ignore access discriminants. */
+ if (!Is_Access_Type (Etype (Node (gnat_constr))))
+ {
+ tree gnu_field = gnat_to_gnu_field_decl (gnat_discrim);
+ tree replacement = convert (TREE_TYPE (gnu_field),
+ elaborate_expression
+ (Node (gnat_constr), gnat_subtype,
+ get_entity_name (gnat_discrim),
+ definition, true, false));
+ subst_pair s = {gnu_field, replacement};
+ gnu_list.safe_push (s);
+ }
+
+ return gnu_list;
+}
+
+/* Scan all fields in QUAL_UNION_TYPE and return a list describing the
+ variants of QUAL_UNION_TYPE that are still relevant after applying
+ the substitutions described in SUBST_LIST. GNU_LIST is a pre-existing
+ list to be prepended to the newly created entries. */
+
+static vec<variant_desc>
+build_variant_list (tree qual_union_type, vec<subst_pair> subst_list,
+ vec<variant_desc> gnu_list)
+{
+ tree gnu_field;
+
+ for (gnu_field = TYPE_FIELDS (qual_union_type);
+ gnu_field;
+ gnu_field = DECL_CHAIN (gnu_field))
+ {
+ tree qual = DECL_QUALIFIER (gnu_field);
+ unsigned int i;
+ subst_pair *s;
+
+ FOR_EACH_VEC_ELT (subst_list, i, s)
+ qual = SUBSTITUTE_IN_EXPR (qual, s->discriminant, s->replacement);
+
+ /* If the new qualifier is not unconditionally false, its variant may
+ still be accessed. */
+ if (!integer_zerop (qual))
+ {
+ tree variant_type = TREE_TYPE (gnu_field), variant_subpart;
+ variant_desc v = {variant_type, gnu_field, qual, NULL_TREE};
+
+ gnu_list.safe_push (v);
+
+ /* Recurse on the variant subpart of the variant, if any. */
+ variant_subpart = get_variant_part (variant_type);
+ if (variant_subpart)
+ gnu_list = build_variant_list (TREE_TYPE (variant_subpart),
+ subst_list, gnu_list);
+
+ /* If the new qualifier is unconditionally true, the subsequent
+ variants cannot be accessed. */
+ if (integer_onep (qual))
+ break;
+ }
+ }
+
+ return gnu_list;
+}
+
+/* UINT_SIZE is a Uint giving the specified size for an object of GNU_TYPE
+ corresponding to GNAT_OBJECT. If the size is valid, return an INTEGER_CST
+ corresponding to its value. Otherwise, return NULL_TREE. KIND is set to
+ VAR_DECL if we are specifying the size of an object, TYPE_DECL for the
+ size of a type, and FIELD_DECL for the size of a field. COMPONENT_P is
+ true if we are being called to process the Component_Size of GNAT_OBJECT;
+ this is used only for error messages. ZERO_OK is true if a size of zero
+ is permitted; if ZERO_OK is false, it means that a size of zero should be
+ treated as an unspecified size. */
+
+static tree
+validate_size (Uint uint_size, tree gnu_type, Entity_Id gnat_object,
+ enum tree_code kind, bool component_p, bool zero_ok)
+{
+ Node_Id gnat_error_node;
+ tree type_size, size;
+
+ /* Return 0 if no size was specified. */
+ if (uint_size == No_Uint)
+ return NULL_TREE;
+
+ /* Ignore a negative size since that corresponds to our back-annotation. */
+ if (UI_Lt (uint_size, Uint_0))
+ return NULL_TREE;
+
+ /* Find the node to use for error messages. */
+ if ((Ekind (gnat_object) == E_Component
+ || Ekind (gnat_object) == E_Discriminant)
+ && Present (Component_Clause (gnat_object)))
+ gnat_error_node = Last_Bit (Component_Clause (gnat_object));
+ else if (Present (Size_Clause (gnat_object)))
+ gnat_error_node = Expression (Size_Clause (gnat_object));
+ else
+ gnat_error_node = gnat_object;
+
+ /* Get the size as an INTEGER_CST. Issue an error if a size was specified
+ but cannot be represented in bitsizetype. */
+ size = UI_To_gnu (uint_size, bitsizetype);
+ if (TREE_OVERFLOW (size))
+ {
+ if (component_p)
+ post_error_ne ("component size for& is too large", gnat_error_node,
+ gnat_object);
+ else
+ post_error_ne ("size for& is too large", gnat_error_node,
+ gnat_object);
+ return NULL_TREE;
+ }
+
+ /* Ignore a zero size if it is not permitted. */
+ if (!zero_ok && integer_zerop (size))
+ return NULL_TREE;
+
+ /* The size of objects is always a multiple of a byte. */
+ if (kind == VAR_DECL
+ && !integer_zerop (size_binop (TRUNC_MOD_EXPR, size, bitsize_unit_node)))
+ {
+ if (component_p)
+ post_error_ne ("component size for& is not a multiple of Storage_Unit",
+ gnat_error_node, gnat_object);
+ else
+ post_error_ne ("size for& is not a multiple of Storage_Unit",
+ gnat_error_node, gnat_object);
+ return NULL_TREE;
+ }
+
+ /* If this is an integral type or a packed array type, the front-end has
+ already verified the size, so we need not do it here (which would mean
+ checking against the bounds). However, if this is an aliased object,
+ it may not be smaller than the type of the object. */
+ if ((INTEGRAL_TYPE_P (gnu_type) || TYPE_IS_PACKED_ARRAY_TYPE_P (gnu_type))
+ && !(kind == VAR_DECL && Is_Aliased (gnat_object)))
+ return size;
+
+ /* If the object is a record that contains a template, add the size of the
+ template to the specified size. */
+ if (TREE_CODE (gnu_type) == RECORD_TYPE
+ && TYPE_CONTAINS_TEMPLATE_P (gnu_type))
+ size = size_binop (PLUS_EXPR, DECL_SIZE (TYPE_FIELDS (gnu_type)), size);
+
+ if (kind == VAR_DECL
+ /* If a type needs strict alignment, a component of this type in
+ a packed record cannot be packed and thus uses the type size. */
+ || (kind == TYPE_DECL && Strict_Alignment (gnat_object)))
+ type_size = TYPE_SIZE (gnu_type);
+ else
+ type_size = rm_size (gnu_type);
+
+ /* Modify the size of a discriminated type to be the maximum size. */
+ if (type_size && CONTAINS_PLACEHOLDER_P (type_size))
+ type_size = max_size (type_size, true);
+
+ /* If this is an access type or a fat pointer, the minimum size is that given
+ by the smallest integral mode that's valid for pointers. */
+ if (TREE_CODE (gnu_type) == POINTER_TYPE || TYPE_IS_FAT_POINTER_P (gnu_type))
+ {
+ enum machine_mode p_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ while (!targetm.valid_pointer_mode (p_mode))
+ p_mode = GET_MODE_WIDER_MODE (p_mode);
+ type_size = bitsize_int (GET_MODE_BITSIZE (p_mode));
+ }
+
+ /* Issue an error either if the default size of the object isn't a constant
+ or if the new size is smaller than it. */
+ if (TREE_CODE (type_size) != INTEGER_CST
+ || TREE_OVERFLOW (type_size)
+ || tree_int_cst_lt (size, type_size))
+ {
+ if (component_p)
+ post_error_ne_tree
+ ("component size for& too small{, minimum allowed is ^}",
+ gnat_error_node, gnat_object, type_size);
+ else
+ post_error_ne_tree
+ ("size for& too small{, minimum allowed is ^}",
+ gnat_error_node, gnat_object, type_size);
+ return NULL_TREE;
+ }
+
+ return size;
+}
+
+/* Similarly, but both validate and process a value of RM size. This routine
+ is only called for types. */
+
+static void
+set_rm_size (Uint uint_size, tree gnu_type, Entity_Id gnat_entity)
+{
+ Node_Id gnat_attr_node;
+ tree old_size, size;
+
+ /* Do nothing if no size was specified. */
+ if (uint_size == No_Uint)
+ return;
+
+ /* Ignore a negative size since that corresponds to our back-annotation. */
+ if (UI_Lt (uint_size, Uint_0))
+ return;
+
+ /* Only issue an error if a Value_Size clause was explicitly given.
+ Otherwise, we'd be duplicating an error on the Size clause. */
+ gnat_attr_node
+ = Get_Attribute_Definition_Clause (gnat_entity, Attr_Value_Size);
+
+ /* Get the size as an INTEGER_CST. Issue an error if a size was specified
+ but cannot be represented in bitsizetype. */
+ size = UI_To_gnu (uint_size, bitsizetype);
+ if (TREE_OVERFLOW (size))
+ {
+ if (Present (gnat_attr_node))
+ post_error_ne ("Value_Size for& is too large", gnat_attr_node,
+ gnat_entity);
+ return;
+ }
+
+ /* Ignore a zero size unless a Value_Size clause exists, or a size clause
+ exists, or this is an integer type, in which case the front-end will
+ have always set it. */
+ if (No (gnat_attr_node)
+ && integer_zerop (size)
+ && !Has_Size_Clause (gnat_entity)
+ && !Is_Discrete_Or_Fixed_Point_Type (gnat_entity))
+ return;
+
+ old_size = rm_size (gnu_type);
+
+ /* If the old size is self-referential, get the maximum size. */
+ if (CONTAINS_PLACEHOLDER_P (old_size))
+ old_size = max_size (old_size, true);
+
+ /* Issue an error either if the old size of the object isn't a constant or
+ if the new size is smaller than it. The front-end has already verified
+ this for scalar and packed array types. */
+ if (TREE_CODE (old_size) != INTEGER_CST
+ || TREE_OVERFLOW (old_size)
+ || (AGGREGATE_TYPE_P (gnu_type)
+ && !(TREE_CODE (gnu_type) == ARRAY_TYPE
+ && TYPE_PACKED_ARRAY_TYPE_P (gnu_type))
+ && !(TYPE_IS_PADDING_P (gnu_type)
+ && TREE_CODE (TREE_TYPE (TYPE_FIELDS (gnu_type))) == ARRAY_TYPE
+ && TYPE_PACKED_ARRAY_TYPE_P
+ (TREE_TYPE (TYPE_FIELDS (gnu_type))))
+ && tree_int_cst_lt (size, old_size)))
+ {
+ if (Present (gnat_attr_node))
+ post_error_ne_tree
+ ("Value_Size for& too small{, minimum allowed is ^}",
+ gnat_attr_node, gnat_entity, old_size);
+ return;
+ }
+
+ /* Otherwise, set the RM size proper for integral types... */
+ if ((TREE_CODE (gnu_type) == INTEGER_TYPE
+ && Is_Discrete_Or_Fixed_Point_Type (gnat_entity))
+ || (TREE_CODE (gnu_type) == ENUMERAL_TYPE
+ || TREE_CODE (gnu_type) == BOOLEAN_TYPE))
+ SET_TYPE_RM_SIZE (gnu_type, size);
+
+ /* ...or the Ada size for record and union types. */
+ else if (RECORD_OR_UNION_TYPE_P (gnu_type)
+ && !TYPE_FAT_POINTER_P (gnu_type))
+ SET_TYPE_ADA_SIZE (gnu_type, size);
+}
+
+/* ALIGNMENT is a Uint giving the alignment specified for GNAT_ENTITY,
+ a type or object whose present alignment is ALIGN. If this alignment is
+ valid, return it. Otherwise, give an error and return ALIGN. */
+
+static unsigned int
+validate_alignment (Uint alignment, Entity_Id gnat_entity, unsigned int align)
+{
+ unsigned int max_allowed_alignment = get_target_maximum_allowed_alignment ();
+ unsigned int new_align;
+ Node_Id gnat_error_node;
+
+ /* Don't worry about checking alignment if alignment was not specified
+ by the source program and we already posted an error for this entity. */
+ if (Error_Posted (gnat_entity) && !Has_Alignment_Clause (gnat_entity))
+ return align;
+
+ /* Post the error on the alignment clause if any. Note, for the implicit
+ base type of an array type, the alignment clause is on the first
+ subtype. */
+ if (Present (Alignment_Clause (gnat_entity)))
+ gnat_error_node = Expression (Alignment_Clause (gnat_entity));
+
+ else if (Is_Itype (gnat_entity)
+ && Is_Array_Type (gnat_entity)
+ && Etype (gnat_entity) == gnat_entity
+ && Present (Alignment_Clause (First_Subtype (gnat_entity))))
+ gnat_error_node =
+ Expression (Alignment_Clause (First_Subtype (gnat_entity)));
+
+ else
+ gnat_error_node = gnat_entity;
+
+ /* Within GCC, an alignment is an integer, so we must make sure a value is
+ specified that fits in that range. Also, there is an upper bound to
+ alignments we can support/allow. */
+ if (!UI_Is_In_Int_Range (alignment)
+ || ((new_align = UI_To_Int (alignment)) > max_allowed_alignment))
+ post_error_ne_num ("largest supported alignment for& is ^",
+ gnat_error_node, gnat_entity, max_allowed_alignment);
+ else if (!(Present (Alignment_Clause (gnat_entity))
+ && From_At_Mod (Alignment_Clause (gnat_entity)))
+ && new_align * BITS_PER_UNIT < align)
+ {
+ unsigned int double_align;
+ bool is_capped_double, align_clause;
+
+ /* If the default alignment of "double" or larger scalar types is
+ specifically capped and the new alignment is above the cap, do
+ not post an error and change the alignment only if there is an
+ alignment clause; this makes it possible to have the associated
+ GCC type overaligned by default for performance reasons. */
+ if ((double_align = double_float_alignment) > 0)
+ {
+ Entity_Id gnat_type
+ = Is_Type (gnat_entity) ? gnat_entity : Etype (gnat_entity);
+ is_capped_double
+ = is_double_float_or_array (gnat_type, &align_clause);
+ }
+ else if ((double_align = double_scalar_alignment) > 0)
+ {
+ Entity_Id gnat_type
+ = Is_Type (gnat_entity) ? gnat_entity : Etype (gnat_entity);
+ is_capped_double
+ = is_double_scalar_or_array (gnat_type, &align_clause);
+ }
+ else
+ is_capped_double = align_clause = false;
+
+ if (is_capped_double && new_align >= double_align)
+ {
+ if (align_clause)
+ align = new_align * BITS_PER_UNIT;
+ }
+ else
+ {
+ if (is_capped_double)
+ align = double_align * BITS_PER_UNIT;
+
+ post_error_ne_num ("alignment for& must be at least ^",
+ gnat_error_node, gnat_entity,
+ align / BITS_PER_UNIT);
+ }
+ }
+ else
+ {
+ new_align = (new_align > 0 ? new_align * BITS_PER_UNIT : 1);
+ if (new_align > align)
+ align = new_align;
+ }
+
+ return align;
+}
+
+/* Verify that OBJECT, a type or decl, is something we can implement
+ atomically. If not, give an error for GNAT_ENTITY. COMP_P is true
+ if we require atomic components. */
+
+static void
+check_ok_for_atomic (tree object, Entity_Id gnat_entity, bool comp_p)
+{
+ Node_Id gnat_error_point = gnat_entity;
+ Node_Id gnat_node;
+ enum machine_mode mode;
+ unsigned int align;
+ tree size;
+
+ /* There are three case of what OBJECT can be. It can be a type, in which
+ case we take the size, alignment and mode from the type. It can be a
+ declaration that was indirect, in which case the relevant values are
+ that of the type being pointed to, or it can be a normal declaration,
+ in which case the values are of the decl. The code below assumes that
+ OBJECT is either a type or a decl. */
+ if (TYPE_P (object))
+ {
+ /* If this is an anonymous base type, nothing to check. Error will be
+ reported on the source type. */
+ if (!Comes_From_Source (gnat_entity))
+ return;
+
+ mode = TYPE_MODE (object);
+ align = TYPE_ALIGN (object);
+ size = TYPE_SIZE (object);
+ }
+ else if (DECL_BY_REF_P (object))
+ {
+ mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (object)));
+ align = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (object)));
+ size = TYPE_SIZE (TREE_TYPE (TREE_TYPE (object)));
+ }
+ else
+ {
+ mode = DECL_MODE (object);
+ align = DECL_ALIGN (object);
+ size = DECL_SIZE (object);
+ }
+
+ /* Consider all floating-point types atomic and any types that that are
+ represented by integers no wider than a machine word. */
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || ((GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD))
+ return;
+
+ /* For the moment, also allow anything that has an alignment equal
+ to its size and which is smaller than a word. */
+ if (size && TREE_CODE (size) == INTEGER_CST
+ && compare_tree_int (size, align) == 0
+ && align <= BITS_PER_WORD)
+ return;
+
+ for (gnat_node = First_Rep_Item (gnat_entity); Present (gnat_node);
+ gnat_node = Next_Rep_Item (gnat_node))
+ {
+ if (!comp_p && Nkind (gnat_node) == N_Pragma
+ && (Get_Pragma_Id (Chars (Pragma_Identifier (gnat_node)))
+ == Pragma_Atomic))
+ gnat_error_point = First (Pragma_Argument_Associations (gnat_node));
+ else if (comp_p && Nkind (gnat_node) == N_Pragma
+ && (Get_Pragma_Id (Chars (Pragma_Identifier (gnat_node)))
+ == Pragma_Atomic_Components))
+ gnat_error_point = First (Pragma_Argument_Associations (gnat_node));
+ }
+
+ if (comp_p)
+ post_error_ne ("atomic access to component of & cannot be guaranteed",
+ gnat_error_point, gnat_entity);
+ else
+ post_error_ne ("atomic access to & cannot be guaranteed",
+ gnat_error_point, gnat_entity);
+}
+
+
+/* Helper for the intrin compatibility checks family. Evaluate whether
+ two types are definitely incompatible. */
+
+static bool
+intrin_types_incompatible_p (tree t1, tree t2)
+{
+ enum tree_code code;
+
+ if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
+ return false;
+
+ if (TYPE_MODE (t1) != TYPE_MODE (t2))
+ return true;
+
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ return true;
+
+ code = TREE_CODE (t1);
+
+ switch (code)
+ {
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ return TYPE_PRECISION (t1) != TYPE_PRECISION (t2);
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* Assume designated types are ok. We'd need to account for char * and
+ void * variants to do better, which could rapidly get messy and isn't
+ clearly worth the effort. */
+ return false;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* Helper for intrin_profiles_compatible_p, to perform compatibility checks
+ on the Ada/builtin argument lists for the INB binding. */
+
+static bool
+intrin_arglists_compatible_p (intrin_binding_t * inb)
+{
+ function_args_iterator ada_iter, btin_iter;
+
+ function_args_iter_init (&ada_iter, inb->ada_fntype);
+ function_args_iter_init (&btin_iter, inb->btin_fntype);
+
+ /* Sequence position of the last argument we checked. */
+ int argpos = 0;
+
+ while (1)
+ {
+ tree ada_type = function_args_iter_cond (&ada_iter);
+ tree btin_type = function_args_iter_cond (&btin_iter);
+
+ /* If we've exhausted both lists simultaneously, we're done. */
+ if (ada_type == NULL_TREE && btin_type == NULL_TREE)
+ break;
+
+ /* If one list is shorter than the other, they fail to match. */
+ if (ada_type == NULL_TREE || btin_type == NULL_TREE)
+ return false;
+
+ /* If we're done with the Ada args and not with the internal builtin
+ args, or the other way around, complain. */
+ if (ada_type == void_type_node
+ && btin_type != void_type_node)
+ {
+ post_error ("?Ada arguments list too short!", inb->gnat_entity);
+ return false;
+ }
+
+ if (btin_type == void_type_node
+ && ada_type != void_type_node)
+ {
+ post_error_ne_num ("?Ada arguments list too long ('> ^)!",
+ inb->gnat_entity, inb->gnat_entity, argpos);
+ return false;
+ }
+
+ /* Otherwise, check that types match for the current argument. */
+ argpos ++;
+ if (intrin_types_incompatible_p (ada_type, btin_type))
+ {
+ post_error_ne_num ("?intrinsic binding type mismatch on argument ^!",
+ inb->gnat_entity, inb->gnat_entity, argpos);
+ return false;
+ }
+
+
+ function_args_iter_next (&ada_iter);
+ function_args_iter_next (&btin_iter);
+ }
+
+ return true;
+}
+
+/* Helper for intrin_profiles_compatible_p, to perform compatibility checks
+ on the Ada/builtin return values for the INB binding. */
+
+static bool
+intrin_return_compatible_p (intrin_binding_t * inb)
+{
+ tree ada_return_type = TREE_TYPE (inb->ada_fntype);
+ tree btin_return_type = TREE_TYPE (inb->btin_fntype);
+
+ /* Accept function imported as procedure, common and convenient. */
+ if (VOID_TYPE_P (ada_return_type)
+ && !VOID_TYPE_P (btin_return_type))
+ return true;
+
+ /* If return type is Address (integer type), map it to void *. */
+ if (Is_Descendent_Of_Address (Etype (inb->gnat_entity)))
+ ada_return_type = ptr_void_type_node;
+
+ /* Check return types compatibility otherwise. Note that this
+ handles void/void as well. */
+ if (intrin_types_incompatible_p (btin_return_type, ada_return_type))
+ {
+ post_error ("?intrinsic binding type mismatch on return value!",
+ inb->gnat_entity);
+ return false;
+ }
+
+ return true;
+}
+
+/* Check and return whether the Ada and gcc builtin profiles bound by INB are
+ compatible. Issue relevant warnings when they are not.
+
+ This is intended as a light check to diagnose the most obvious cases, not
+ as a full fledged type compatibility predicate. It is the programmer's
+ responsibility to ensure correctness of the Ada declarations in Imports,
+ especially when binding straight to a compiler internal. */
+
+static bool
+intrin_profiles_compatible_p (intrin_binding_t * inb)
+{
+ /* Check compatibility on return values and argument lists, each responsible
+ for posting warnings as appropriate. Ensure use of the proper sloc for
+ this purpose. */
+
+ bool arglists_compatible_p, return_compatible_p;
+ location_t saved_location = input_location;
+
+ Sloc_to_locus (Sloc (inb->gnat_entity), &input_location);
+
+ return_compatible_p = intrin_return_compatible_p (inb);
+ arglists_compatible_p = intrin_arglists_compatible_p (inb);
+
+ input_location = saved_location;
+
+ return return_compatible_p && arglists_compatible_p;
+}
+
+/* Return a FIELD_DECL node modeled on OLD_FIELD. FIELD_TYPE is its type
+ and RECORD_TYPE is the type of the parent. If SIZE is nonzero, it is the
+ specified size for this field. POS_LIST is a position list describing
+ the layout of OLD_FIELD and SUBST_LIST a substitution list to be applied
+ to this layout. */
+
+static tree
+create_field_decl_from (tree old_field, tree field_type, tree record_type,
+ tree size, tree pos_list,
+ vec<subst_pair> subst_list)
+{
+ tree t = TREE_VALUE (purpose_member (old_field, pos_list));
+ tree pos = TREE_VEC_ELT (t, 0), bitpos = TREE_VEC_ELT (t, 2);
+ unsigned int offset_align = tree_to_uhwi (TREE_VEC_ELT (t, 1));
+ tree new_pos, new_field;
+ unsigned int i;
+ subst_pair *s;
+
+ if (CONTAINS_PLACEHOLDER_P (pos))
+ FOR_EACH_VEC_ELT (subst_list, i, s)
+ pos = SUBSTITUTE_IN_EXPR (pos, s->discriminant, s->replacement);
+
+ /* If the position is now a constant, we can set it as the position of the
+ field when we make it. Otherwise, we need to deal with it specially. */
+ if (TREE_CONSTANT (pos))
+ new_pos = bit_from_pos (pos, bitpos);
+ else
+ new_pos = NULL_TREE;
+
+ new_field
+ = create_field_decl (DECL_NAME (old_field), field_type, record_type,
+ size, new_pos, DECL_PACKED (old_field),
+ !DECL_NONADDRESSABLE_P (old_field));
+
+ if (!new_pos)
+ {
+ normalize_offset (&pos, &bitpos, offset_align);
+ DECL_FIELD_OFFSET (new_field) = pos;
+ DECL_FIELD_BIT_OFFSET (new_field) = bitpos;
+ SET_DECL_OFFSET_ALIGN (new_field, offset_align);
+ DECL_SIZE (new_field) = size;
+ DECL_SIZE_UNIT (new_field)
+ = convert (sizetype,
+ size_binop (CEIL_DIV_EXPR, size, bitsize_unit_node));
+ layout_decl (new_field, DECL_OFFSET_ALIGN (new_field));
+ }
+
+ DECL_INTERNAL_P (new_field) = DECL_INTERNAL_P (old_field);
+ SET_DECL_ORIGINAL_FIELD_TO_FIELD (new_field, old_field);
+ DECL_DISCRIMINANT_NUMBER (new_field) = DECL_DISCRIMINANT_NUMBER (old_field);
+ TREE_THIS_VOLATILE (new_field) = TREE_THIS_VOLATILE (old_field);
+
+ return new_field;
+}
+
+/* Create the REP part of RECORD_TYPE with REP_TYPE. If MIN_SIZE is nonzero,
+ it is the minimal size the REP_PART must have. */
+
+static tree
+create_rep_part (tree rep_type, tree record_type, tree min_size)
+{
+ tree field;
+
+ if (min_size && !tree_int_cst_lt (TYPE_SIZE (rep_type), min_size))
+ min_size = NULL_TREE;
+
+ field = create_field_decl (get_identifier ("REP"), rep_type, record_type,
+ min_size, NULL_TREE, 0, 1);
+ DECL_INTERNAL_P (field) = 1;
+
+ return field;
+}
+
+/* Return the REP part of RECORD_TYPE, if any. Otherwise return NULL. */
+
+static tree
+get_rep_part (tree record_type)
+{
+ tree field = TYPE_FIELDS (record_type);
+
+ /* The REP part is the first field, internal, another record, and its name
+ starts with an 'R'. */
+ if (field
+ && DECL_INTERNAL_P (field)
+ && TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE
+ && IDENTIFIER_POINTER (DECL_NAME (field)) [0] == 'R')
+ return field;
+
+ return NULL_TREE;
+}
+
+/* Return the variant part of RECORD_TYPE, if any. Otherwise return NULL. */
+
+tree
+get_variant_part (tree record_type)
+{
+ tree field;
+
+ /* The variant part is the only internal field that is a qualified union. */
+ for (field = TYPE_FIELDS (record_type); field; field = DECL_CHAIN (field))
+ if (DECL_INTERNAL_P (field)
+ && TREE_CODE (TREE_TYPE (field)) == QUAL_UNION_TYPE)
+ return field;
+
+ return NULL_TREE;
+}
+
+/* Return a new variant part modeled on OLD_VARIANT_PART. VARIANT_LIST is
+ the list of variants to be used and RECORD_TYPE is the type of the parent.
+ POS_LIST is a position list describing the layout of fields present in
+ OLD_VARIANT_PART and SUBST_LIST a substitution list to be applied to this
+ layout. */
+
+static tree
+create_variant_part_from (tree old_variant_part,
+ vec<variant_desc> variant_list,
+ tree record_type, tree pos_list,
+ vec<subst_pair> subst_list)
+{
+ tree offset = DECL_FIELD_OFFSET (old_variant_part);
+ tree old_union_type = TREE_TYPE (old_variant_part);
+ tree new_union_type, new_variant_part;
+ tree union_field_list = NULL_TREE;
+ variant_desc *v;
+ unsigned int i;
+
+ /* First create the type of the variant part from that of the old one. */
+ new_union_type = make_node (QUAL_UNION_TYPE);
+ TYPE_NAME (new_union_type)
+ = concat_name (TYPE_NAME (record_type),
+ IDENTIFIER_POINTER (DECL_NAME (old_variant_part)));
+
+ /* If the position of the variant part is constant, subtract it from the
+ size of the type of the parent to get the new size. This manual CSE
+ reduces the code size when not optimizing. */
+ if (TREE_CODE (offset) == INTEGER_CST)
+ {
+ tree bitpos = DECL_FIELD_BIT_OFFSET (old_variant_part);
+ tree first_bit = bit_from_pos (offset, bitpos);
+ TYPE_SIZE (new_union_type)
+ = size_binop (MINUS_EXPR, TYPE_SIZE (record_type), first_bit);
+ TYPE_SIZE_UNIT (new_union_type)
+ = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (record_type),
+ byte_from_pos (offset, bitpos));
+ SET_TYPE_ADA_SIZE (new_union_type,
+ size_binop (MINUS_EXPR, TYPE_ADA_SIZE (record_type),
+ first_bit));
+ TYPE_ALIGN (new_union_type) = TYPE_ALIGN (old_union_type);
+ relate_alias_sets (new_union_type, old_union_type, ALIAS_SET_COPY);
+ }
+ else
+ copy_and_substitute_in_size (new_union_type, old_union_type, subst_list);
+
+ /* Now finish up the new variants and populate the union type. */
+ FOR_EACH_VEC_ELT_REVERSE (variant_list, i, v)
+ {
+ tree old_field = v->field, new_field;
+ tree old_variant, old_variant_subpart, new_variant, field_list;
+
+ /* Skip variants that don't belong to this nesting level. */
+ if (DECL_CONTEXT (old_field) != old_union_type)
+ continue;
+
+ /* Retrieve the list of fields already added to the new variant. */
+ new_variant = v->new_type;
+ field_list = TYPE_FIELDS (new_variant);
+
+ /* If the old variant had a variant subpart, we need to create a new
+ variant subpart and add it to the field list. */
+ old_variant = v->type;
+ old_variant_subpart = get_variant_part (old_variant);
+ if (old_variant_subpart)
+ {
+ tree new_variant_subpart
+ = create_variant_part_from (old_variant_subpart, variant_list,
+ new_variant, pos_list, subst_list);
+ DECL_CHAIN (new_variant_subpart) = field_list;
+ field_list = new_variant_subpart;
+ }
+
+ /* Finish up the new variant and create the field. No need for debug
+ info thanks to the XVS type. */
+ finish_record_type (new_variant, nreverse (field_list), 2, false);
+ compute_record_mode (new_variant);
+ create_type_decl (TYPE_NAME (new_variant), new_variant, true, false,
+ Empty);
+
+ new_field
+ = create_field_decl_from (old_field, new_variant, new_union_type,
+ TYPE_SIZE (new_variant),
+ pos_list, subst_list);
+ DECL_QUALIFIER (new_field) = v->qual;
+ DECL_INTERNAL_P (new_field) = 1;
+ DECL_CHAIN (new_field) = union_field_list;
+ union_field_list = new_field;
+ }
+
+ /* Finish up the union type and create the variant part. No need for debug
+ info thanks to the XVS type. Note that we don't reverse the field list
+ because VARIANT_LIST has been traversed in reverse order. */
+ finish_record_type (new_union_type, union_field_list, 2, false);
+ compute_record_mode (new_union_type);
+ create_type_decl (TYPE_NAME (new_union_type), new_union_type, true, false,
+ Empty);
+
+ new_variant_part
+ = create_field_decl_from (old_variant_part, new_union_type, record_type,
+ TYPE_SIZE (new_union_type),
+ pos_list, subst_list);
+ DECL_INTERNAL_P (new_variant_part) = 1;
+
+ /* With multiple discriminants it is possible for an inner variant to be
+ statically selected while outer ones are not; in this case, the list
+ of fields of the inner variant is not flattened and we end up with a
+ qualified union with a single member. Drop the useless container. */
+ if (!DECL_CHAIN (union_field_list))
+ {
+ DECL_CONTEXT (union_field_list) = record_type;
+ DECL_FIELD_OFFSET (union_field_list)
+ = DECL_FIELD_OFFSET (new_variant_part);
+ DECL_FIELD_BIT_OFFSET (union_field_list)
+ = DECL_FIELD_BIT_OFFSET (new_variant_part);
+ SET_DECL_OFFSET_ALIGN (union_field_list,
+ DECL_OFFSET_ALIGN (new_variant_part));
+ new_variant_part = union_field_list;
+ }
+
+ return new_variant_part;
+}
+
+/* Copy the size (and alignment and alias set) from OLD_TYPE to NEW_TYPE,
+ which are both RECORD_TYPE, after applying the substitutions described
+ in SUBST_LIST. */
+
+static void
+copy_and_substitute_in_size (tree new_type, tree old_type,
+ vec<subst_pair> subst_list)
+{
+ unsigned int i;
+ subst_pair *s;
+
+ TYPE_SIZE (new_type) = TYPE_SIZE (old_type);
+ TYPE_SIZE_UNIT (new_type) = TYPE_SIZE_UNIT (old_type);
+ SET_TYPE_ADA_SIZE (new_type, TYPE_ADA_SIZE (old_type));
+ TYPE_ALIGN (new_type) = TYPE_ALIGN (old_type);
+ relate_alias_sets (new_type, old_type, ALIAS_SET_COPY);
+
+ if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE (new_type)))
+ FOR_EACH_VEC_ELT (subst_list, i, s)
+ TYPE_SIZE (new_type)
+ = SUBSTITUTE_IN_EXPR (TYPE_SIZE (new_type),
+ s->discriminant, s->replacement);
+
+ if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE_UNIT (new_type)))
+ FOR_EACH_VEC_ELT (subst_list, i, s)
+ TYPE_SIZE_UNIT (new_type)
+ = SUBSTITUTE_IN_EXPR (TYPE_SIZE_UNIT (new_type),
+ s->discriminant, s->replacement);
+
+ if (CONTAINS_PLACEHOLDER_P (TYPE_ADA_SIZE (new_type)))
+ FOR_EACH_VEC_ELT (subst_list, i, s)
+ SET_TYPE_ADA_SIZE
+ (new_type, SUBSTITUTE_IN_EXPR (TYPE_ADA_SIZE (new_type),
+ s->discriminant, s->replacement));
+
+ /* Finalize the size. */
+ TYPE_SIZE (new_type) = variable_size (TYPE_SIZE (new_type));
+ TYPE_SIZE_UNIT (new_type) = variable_size (TYPE_SIZE_UNIT (new_type));
+}
+
+/* Given a type T, a FIELD_DECL F, and a replacement value R, return a
+ type with all size expressions that contain F in a PLACEHOLDER_EXPR
+ updated by replacing F with R.
+
+ The function doesn't update the layout of the type, i.e. it assumes
+ that the substitution is purely formal. That's why the replacement
+ value R must itself contain a PLACEHOLDER_EXPR. */
+
+tree
+substitute_in_type (tree t, tree f, tree r)
+{
+ tree nt;
+
+ gcc_assert (CONTAINS_PLACEHOLDER_P (r));
+
+ switch (TREE_CODE (t))
+ {
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ case REAL_TYPE:
+
+ /* First the domain types of arrays. */
+ if (CONTAINS_PLACEHOLDER_P (TYPE_GCC_MIN_VALUE (t))
+ || CONTAINS_PLACEHOLDER_P (TYPE_GCC_MAX_VALUE (t)))
+ {
+ tree low = SUBSTITUTE_IN_EXPR (TYPE_GCC_MIN_VALUE (t), f, r);
+ tree high = SUBSTITUTE_IN_EXPR (TYPE_GCC_MAX_VALUE (t), f, r);
+
+ if (low == TYPE_GCC_MIN_VALUE (t) && high == TYPE_GCC_MAX_VALUE (t))
+ return t;
+
+ nt = copy_type (t);
+ TYPE_GCC_MIN_VALUE (nt) = low;
+ TYPE_GCC_MAX_VALUE (nt) = high;
+
+ if (TREE_CODE (t) == INTEGER_TYPE && TYPE_INDEX_TYPE (t))
+ SET_TYPE_INDEX_TYPE
+ (nt, substitute_in_type (TYPE_INDEX_TYPE (t), f, r));
+
+ return nt;
+ }
+
+ /* Then the subtypes. */
+ if (CONTAINS_PLACEHOLDER_P (TYPE_RM_MIN_VALUE (t))
+ || CONTAINS_PLACEHOLDER_P (TYPE_RM_MAX_VALUE (t)))
+ {
+ tree low = SUBSTITUTE_IN_EXPR (TYPE_RM_MIN_VALUE (t), f, r);
+ tree high = SUBSTITUTE_IN_EXPR (TYPE_RM_MAX_VALUE (t), f, r);
+
+ if (low == TYPE_RM_MIN_VALUE (t) && high == TYPE_RM_MAX_VALUE (t))
+ return t;
+
+ nt = copy_type (t);
+ SET_TYPE_RM_MIN_VALUE (nt, low);
+ SET_TYPE_RM_MAX_VALUE (nt, high);
+
+ return nt;
+ }
+
+ return t;
+
+ case COMPLEX_TYPE:
+ nt = substitute_in_type (TREE_TYPE (t), f, r);
+ if (nt == TREE_TYPE (t))
+ return t;
+
+ return build_complex_type (nt);
+
+ case FUNCTION_TYPE:
+ /* These should never show up here. */
+ gcc_unreachable ();
+
+ case ARRAY_TYPE:
+ {
+ tree component = substitute_in_type (TREE_TYPE (t), f, r);
+ tree domain = substitute_in_type (TYPE_DOMAIN (t), f, r);
+
+ if (component == TREE_TYPE (t) && domain == TYPE_DOMAIN (t))
+ return t;
+
+ nt = build_nonshared_array_type (component, domain);
+ TYPE_ALIGN (nt) = TYPE_ALIGN (t);
+ TYPE_USER_ALIGN (nt) = TYPE_USER_ALIGN (t);
+ SET_TYPE_MODE (nt, TYPE_MODE (t));
+ TYPE_SIZE (nt) = SUBSTITUTE_IN_EXPR (TYPE_SIZE (t), f, r);
+ TYPE_SIZE_UNIT (nt) = SUBSTITUTE_IN_EXPR (TYPE_SIZE_UNIT (t), f, r);
+ TYPE_NONALIASED_COMPONENT (nt) = TYPE_NONALIASED_COMPONENT (t);
+ TYPE_MULTI_ARRAY_P (nt) = TYPE_MULTI_ARRAY_P (t);
+ TYPE_CONVENTION_FORTRAN_P (nt) = TYPE_CONVENTION_FORTRAN_P (t);
+ return nt;
+ }
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ bool changed_field = false;
+ tree field;
+
+ /* Start out with no fields, make new fields, and chain them
+ in. If we haven't actually changed the type of any field,
+ discard everything we've done and return the old type. */
+ nt = copy_type (t);
+ TYPE_FIELDS (nt) = NULL_TREE;
+
+ for (field = TYPE_FIELDS (t); field; field = DECL_CHAIN (field))
+ {
+ tree new_field = copy_node (field), new_n;
+
+ new_n = substitute_in_type (TREE_TYPE (field), f, r);
+ if (new_n != TREE_TYPE (field))
+ {
+ TREE_TYPE (new_field) = new_n;
+ changed_field = true;
+ }
+
+ new_n = SUBSTITUTE_IN_EXPR (DECL_FIELD_OFFSET (field), f, r);
+ if (new_n != DECL_FIELD_OFFSET (field))
+ {
+ DECL_FIELD_OFFSET (new_field) = new_n;
+ changed_field = true;
+ }
+
+ /* Do the substitution inside the qualifier, if any. */
+ if (TREE_CODE (t) == QUAL_UNION_TYPE)
+ {
+ new_n = SUBSTITUTE_IN_EXPR (DECL_QUALIFIER (field), f, r);
+ if (new_n != DECL_QUALIFIER (field))
+ {
+ DECL_QUALIFIER (new_field) = new_n;
+ changed_field = true;
+ }
+ }
+
+ DECL_CONTEXT (new_field) = nt;
+ SET_DECL_ORIGINAL_FIELD_TO_FIELD (new_field, field);
+
+ DECL_CHAIN (new_field) = TYPE_FIELDS (nt);
+ TYPE_FIELDS (nt) = new_field;
+ }
+
+ if (!changed_field)
+ return t;
+
+ TYPE_FIELDS (nt) = nreverse (TYPE_FIELDS (nt));
+ TYPE_SIZE (nt) = SUBSTITUTE_IN_EXPR (TYPE_SIZE (t), f, r);
+ TYPE_SIZE_UNIT (nt) = SUBSTITUTE_IN_EXPR (TYPE_SIZE_UNIT (t), f, r);
+ SET_TYPE_ADA_SIZE (nt, SUBSTITUTE_IN_EXPR (TYPE_ADA_SIZE (t), f, r));
+ return nt;
+ }
+
+ default:
+ return t;
+ }
+}
+
+/* Return the RM size of GNU_TYPE. This is the actual number of bits
+ needed to represent the object. */
+
+tree
+rm_size (tree gnu_type)
+{
+ /* For integral types, we store the RM size explicitly. */
+ if (INTEGRAL_TYPE_P (gnu_type) && TYPE_RM_SIZE (gnu_type))
+ return TYPE_RM_SIZE (gnu_type);
+
+ /* Return the RM size of the actual data plus the size of the template. */
+ if (TREE_CODE (gnu_type) == RECORD_TYPE
+ && TYPE_CONTAINS_TEMPLATE_P (gnu_type))
+ return
+ size_binop (PLUS_EXPR,
+ rm_size (TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_type)))),
+ DECL_SIZE (TYPE_FIELDS (gnu_type)));
+
+ /* For record or union types, we store the size explicitly. */
+ if (RECORD_OR_UNION_TYPE_P (gnu_type)
+ && !TYPE_FAT_POINTER_P (gnu_type)
+ && TYPE_ADA_SIZE (gnu_type))
+ return TYPE_ADA_SIZE (gnu_type);
+
+ /* For other types, this is just the size. */
+ return TYPE_SIZE (gnu_type);
+}
+
+/* Return the name to be used for GNAT_ENTITY. If a type, create a
+ fully-qualified name, possibly with type information encoding.
+ Otherwise, return the name. */
+
+tree
+get_entity_name (Entity_Id gnat_entity)
+{
+ Get_Encoded_Name (gnat_entity);
+ return get_identifier_with_length (Name_Buffer, Name_Len);
+}
+
+/* Return an identifier representing the external name to be used for
+ GNAT_ENTITY. If SUFFIX is specified, the name is followed by "___"
+ and the specified suffix. */
+
+tree
+create_concat_name (Entity_Id gnat_entity, const char *suffix)
+{
+ Entity_Kind kind = Ekind (gnat_entity);
+
+ if (suffix)
+ {
+ String_Template temp = {1, (int) strlen (suffix)};
+ Fat_Pointer fp = {suffix, &temp};
+ Get_External_Name_With_Suffix (gnat_entity, fp);
+ }
+ else
+ Get_External_Name (gnat_entity, 0);
+
+ /* A variable using the Stdcall convention lives in a DLL. We adjust
+ its name to use the jump table, the _imp__NAME contains the address
+ for the NAME variable. */
+ if ((kind == E_Variable || kind == E_Constant)
+ && Has_Stdcall_Convention (gnat_entity))
+ {
+ const int len = 6 + Name_Len;
+ char *new_name = (char *) alloca (len + 1);
+ strcpy (new_name, "_imp__");
+ strcat (new_name, Name_Buffer);
+ return get_identifier_with_length (new_name, len);
+ }
+
+ return get_identifier_with_length (Name_Buffer, Name_Len);
+}
+
+/* Given GNU_NAME, an IDENTIFIER_NODE containing a name and SUFFIX, a
+ string, return a new IDENTIFIER_NODE that is the concatenation of
+ the name followed by "___" and the specified suffix. */
+
+tree
+concat_name (tree gnu_name, const char *suffix)
+{
+ const int len = IDENTIFIER_LENGTH (gnu_name) + 3 + strlen (suffix);
+ char *new_name = (char *) alloca (len + 1);
+ strcpy (new_name, IDENTIFIER_POINTER (gnu_name));
+ strcat (new_name, "___");
+ strcat (new_name, suffix);
+ return get_identifier_with_length (new_name, len);
+}
+
+#include "gt-ada-decl.h"
diff --git a/gcc-4.9/gcc/ada/gcc-interface/gadaint.h b/gcc-4.9/gcc/ada/gcc-interface/gadaint.h
new file mode 100644
index 000000000..ce27a14d0
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/gadaint.h
@@ -0,0 +1,43 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * G A D A I N T *
+ * *
+ * C Header File *
+ * *
+ * Copyright (C) 2010-2011, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License distributed with GNAT; see file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+/* This file contains the declarations of adaint.c material used in gigi.
+ It should be used in lieu of adaint.h in gigi because the latter drags
+ a lot of stuff on Windows and this pollutes the namespace of macros. */
+
+#ifndef GCC_ADAINT_H
+#define GCC_ADAINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern char *__gnat_to_canonical_file_spec (char *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GCC_ADAINT_H */
diff --git a/gcc-4.9/gcc/ada/gcc-interface/gigi.h b/gcc-4.9/gcc/ada/gcc-interface/gigi.h
new file mode 100644
index 000000000..b7092735c
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/gigi.h
@@ -0,0 +1,1087 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * G I G I *
+ * *
+ * C Header File *
+ * *
+ * Copyright (C) 1992-2014, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License distributed with GNAT; see file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+/* Declare all functions and types used by gigi. */
+
+/* Given GNAT_ENTITY, a GNAT defining identifier node, which denotes some Ada
+ entity, this routine returns the equivalent GCC tree for that entity
+ (an ..._DECL node) and associates the ..._DECL node with the input GNAT
+ defining identifier.
+
+ If GNAT_ENTITY is a variable or a constant declaration, GNU_EXPR gives its
+ initial value (in GCC tree form). This is optional for variables.
+ For renamed entities, GNU_EXPR gives the object being renamed.
+
+ DEFINITION is nonzero if this call is intended for a definition. This is
+ used for separate compilation where it necessary to know whether an
+ external declaration or a definition should be created if the GCC equivalent
+ was not created previously. The value of 1 is normally used for a nonzero
+ DEFINITION, but a value of 2 is used in special circumstances, defined in
+ the code. */
+extern tree gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr,
+ int definition);
+
+/* Similar, but if the returned value is a COMPONENT_REF, return the
+ FIELD_DECL. */
+extern tree gnat_to_gnu_field_decl (Entity_Id gnat_entity);
+
+/* Similar, but GNAT_ENTITY is assumed to refer to a GNAT type. Return
+ the GCC type corresponding to that entity. */
+extern tree gnat_to_gnu_type (Entity_Id gnat_entity);
+
+/* Start a new statement group chained to the previous group. */
+extern void start_stmt_group (void);
+
+/* Add GNU_STMT to the current statement group. If it is an expression with
+ no effects, it is ignored. */
+extern void add_stmt (tree gnu_stmt);
+
+/* Similar, but the statement is always added, regardless of side-effects. */
+extern void add_stmt_force (tree gnu_stmt);
+
+/* Like add_stmt, but set the location of GNU_STMT to that of GNAT_NODE. */
+extern void add_stmt_with_node (tree gnu_stmt, Node_Id gnat_node);
+
+/* Similar, but the statement is always added, regardless of side-effects. */
+extern void add_stmt_with_node_force (tree gnu_stmt, Node_Id gnat_node);
+
+/* Return code corresponding to the current code group. It is normally
+ a STATEMENT_LIST, but may also be a BIND_EXPR or TRY_FINALLY_EXPR if
+ BLOCK or cleanups were set. */
+extern tree end_stmt_group (void);
+
+/* Set the BLOCK node corresponding to the current code group to GNU_BLOCK. */
+extern void set_block_for_group (tree);
+
+/* Add a declaration statement for GNU_DECL to the current BLOCK_STMT node.
+ Get SLOC from GNAT_ENTITY. */
+extern void add_decl_expr (tree gnu_decl, Entity_Id gnat_entity);
+
+/* Mark nodes rooted at T with TREE_VISITED and types as having their
+ sized gimplified. We use this to indicate all variable sizes and
+ positions in global types may not be shared by any subprogram. */
+extern void mark_visited (tree t);
+
+/* This macro calls the above function but short-circuits the common
+ case of a constant to save time and also checks for NULL. */
+
+#define MARK_VISITED(EXP) \
+do { \
+ if((EXP) && !CONSTANT_CLASS_P (EXP)) \
+ mark_visited (EXP); \
+} while (0)
+
+/* Finalize the processing of From_Limited_With incomplete types. */
+extern void finalize_from_limited_with (void);
+
+/* Return the equivalent type to be used for GNAT_ENTITY, if it's a
+ kind of type (such E_Task_Type) that has a different type which Gigi
+ uses for its representation. If the type does not have a special type
+ for its representation, return GNAT_ENTITY. If a type is supposed to
+ exist, but does not, abort unless annotating types, in which case
+ return Empty. If GNAT_ENTITY is Empty, return Empty. */
+extern Entity_Id Gigi_Equivalent_Type (Entity_Id gnat_entity);
+
+/* Given GNAT_ENTITY, elaborate all expressions that are required to
+ be elaborated at the point of its definition, but do nothing else. */
+extern void elaborate_entity (Entity_Id gnat_entity);
+
+/* Get the unpadded version of a GNAT type. */
+extern tree get_unpadded_type (Entity_Id gnat_entity);
+
+/* Return the DECL associated with the public subprogram GNAT_ENTITY but whose
+ type has been changed to that of the parameterless procedure, except if an
+ alias is already present, in which case it is returned instead. */
+extern tree get_minimal_subprog_decl (Entity_Id gnat_entity);
+
+/* Return whether the E_Subprogram_Type/E_Function/E_Procedure GNAT_ENTITY is
+ a C++ imported method or equivalent. */
+extern bool is_cplusplus_method (Entity_Id gnat_entity);
+
+/* Create a record type that contains a SIZE bytes long field of TYPE with a
+ starting bit position so that it is aligned to ALIGN bits, and leaving at
+ least ROOM bytes free before the field. BASE_ALIGN is the alignment the
+ record is guaranteed to get. GNAT_NODE is used for the position of the
+ associated TYPE_DECL. */
+extern tree make_aligning_type (tree type, unsigned int align, tree size,
+ unsigned int base_align, int room, Node_Id);
+
+/* TYPE is a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE that is being used
+ as the field type of a packed record if IN_RECORD is true, or as the
+ component type of a packed array if IN_RECORD is false. See if we can
+ rewrite it either as a type that has a non-BLKmode, which we can pack
+ tighter in the packed record case, or as a smaller type. If so, return
+ the new type. If not, return the original type. */
+extern tree make_packable_type (tree type, bool in_record);
+
+/* Given a type TYPE, return a new type whose size is appropriate for SIZE.
+ If TYPE is the best type, return it. Otherwise, make a new type. We
+ only support new integral and pointer types. FOR_BIASED is true if
+ we are making a biased type. */
+extern tree make_type_from_size (tree type, tree size_tree, bool for_biased);
+
+/* Ensure that TYPE has SIZE and ALIGN. Make and return a new padded type
+ if needed. We have already verified that SIZE and TYPE are large enough.
+ GNAT_ENTITY is used to name the resulting record and to issue a warning.
+ IS_COMPONENT_TYPE is true if this is being done for the component type of
+ an array. IS_USER_TYPE is true if the original type needs to be completed.
+ DEFINITION is true if this type is being defined. SET_RM_SIZE is true if
+ the RM size of the resulting type is to be set to SIZE too. */
+extern tree maybe_pad_type (tree type, tree size, unsigned int align,
+ Entity_Id gnat_entity, bool is_component_type,
+ bool is_user_type, bool definition,
+ bool set_rm_size);
+
+enum alias_set_op
+{
+ ALIAS_SET_COPY,
+ ALIAS_SET_SUBSET,
+ ALIAS_SET_SUPERSET
+};
+
+/* Relate the alias sets of GNU_NEW_TYPE and GNU_OLD_TYPE according to OP.
+ If this is a multi-dimensional array type, do this recursively.
+
+ OP may be
+ - ALIAS_SET_COPY: the new set is made a copy of the old one.
+ - ALIAS_SET_SUPERSET: the new set is made a superset of the old one.
+ - ALIAS_SET_SUBSET: the new set is made a subset of the old one. */
+extern void relate_alias_sets (tree gnu_new_type, tree gnu_old_type,
+ enum alias_set_op op);
+
+/* Given a GNU tree and a GNAT list of choices, generate an expression to test
+ the value passed against the list of choices. */
+extern tree choices_to_gnu (tree operand, Node_Id choices);
+
+/* Given GNAT_ENTITY, an object (constant, variable, parameter, exception)
+ and GNU_TYPE, its corresponding GCC type, set Esize and Alignment to the
+ size and alignment used by Gigi. Prefer SIZE over TYPE_SIZE if non-null.
+ BY_REF is true if the object is used by reference. */
+extern void annotate_object (Entity_Id gnat_entity, tree gnu_type, tree size,
+ bool by_ref);
+
+/* Return the variant part of RECORD_TYPE, if any. Otherwise return NULL. */
+extern tree get_variant_part (tree record_type);
+
+/* Given a type T, a FIELD_DECL F, and a replacement value R, return a new
+ type with all size expressions that contain F updated by replacing F
+ with R. If F is NULL_TREE, always make a new RECORD_TYPE, even if
+ nothing has changed. */
+extern tree substitute_in_type (tree t, tree f, tree r);
+
+/* Return the RM size of GNU_TYPE. This is the actual number of bits
+ needed to represent the object. */
+extern tree rm_size (tree gnu_type);
+
+/* Return the name to be used for GNAT_ENTITY. If a type, create a
+ fully-qualified name, possibly with type information encoding.
+ Otherwise, return the name. */
+extern tree get_entity_name (Entity_Id gnat_entity);
+
+/* Return an identifier representing the external name to be used for
+ GNAT_ENTITY. If SUFFIX is specified, the name is followed by "___"
+ and the specified suffix. */
+extern tree create_concat_name (Entity_Id gnat_entity, const char *suffix);
+
+/* Given GNU_NAME, an IDENTIFIER_NODE containing a name and SUFFIX, a
+ string, return a new IDENTIFIER_NODE that is the concatenation of
+ the name followed by "___" and the specified suffix. */
+extern tree concat_name (tree gnu_name, const char *suffix);
+
+/* Highest number in the front-end node table. */
+extern int max_gnat_nodes;
+
+/* Current node being treated, in case abort called. */
+extern Node_Id error_gnat_node;
+
+/* True when gigi is being called on an analyzed but unexpanded
+ tree, and the only purpose of the call is to properly annotate
+ types with representation information. */
+extern bool type_annotate_only;
+
+/* Current file name without path. */
+extern const char *ref_filename;
+
+/* This structure must be kept synchronized with Call_Back_End. */
+struct File_Info_Type
+{
+ File_Name_Type File_Name;
+ Instance_Id Instance;
+ Nat Num_Source_Lines;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* This is the main program of the back-end. It sets up all the table
+ structures and then generates code. */
+extern void gigi (Node_Id gnat_root,
+ int max_gnat_node,
+ int number_name ATTRIBUTE_UNUSED,
+ struct Node *nodes_ptr,
+ struct Flags *Flags_Ptr,
+ Node_Id *next_node_ptr,
+ Node_Id *prev_node_ptr,
+ struct Elist_Header *elists_ptr,
+ struct Elmt_Item *elmts_ptr,
+ struct String_Entry *strings_ptr,
+ Char_Code *strings_chars_ptr,
+ struct List_Header *list_headers_ptr,
+ Nat number_file,
+ struct File_Info_Type *file_info_ptr,
+ Entity_Id standard_boolean,
+ Entity_Id standard_integer,
+ Entity_Id standard_character,
+ Entity_Id standard_long_long_float,
+ Entity_Id standard_exception_type,
+ Int gigi_operating_mode);
+
+#ifdef __cplusplus
+}
+#endif
+
+/* GNAT_NODE is the root of some GNAT tree. Return the root of the
+ GCC tree corresponding to that GNAT tree. Normally, no code is generated;
+ we just return an equivalent tree which is used elsewhere to generate
+ code. */
+extern tree gnat_to_gnu (Node_Id gnat_node);
+
+/* GNU_STMT is a statement. We generate code for that statement. */
+extern void gnat_expand_stmt (tree gnu_stmt);
+
+/* Generate GIMPLE in place for the expression at *EXPR_P. */
+extern int gnat_gimplify_expr (tree *expr_p, gimple_seq *pre_p,
+ gimple_seq *post_p ATTRIBUTE_UNUSED);
+
+/* Do the processing for the declaration of a GNAT_ENTITY, a type. If
+ a separate Freeze node exists, delay the bulk of the processing. Otherwise
+ make a GCC type for GNAT_ENTITY and set up the correspondence. */
+extern void process_type (Entity_Id gnat_entity);
+
+/* Convert SLOC into LOCUS. Return true if SLOC corresponds to a source code
+ location and false if it doesn't. In the former case, set the Gigi global
+ variable REF_FILENAME to the simple debug file name as given by sinput. */
+extern bool Sloc_to_locus (Source_Ptr Sloc, location_t *locus);
+
+/* Post an error message. MSG is the error message, properly annotated.
+ NODE is the node at which to post the error and the node to use for the
+ '&' substitution. */
+extern void post_error (const char *msg, Node_Id node);
+
+/* Similar to post_error, but NODE is the node at which to post the error and
+ ENT is the node to use for the '&' substitution. */
+extern void post_error_ne (const char *msg, Node_Id node, Entity_Id ent);
+
+/* Similar to post_error_ne, but NUM is the number to use for the '^'. */
+extern void post_error_ne_num (const char *msg, Node_Id node, Entity_Id ent,
+ int num);
+
+/* Similar to post_error_ne, but T is a GCC tree representing the number to
+ write. If T represents a constant, the text inside curly brackets in
+ MSG will be output (presumably including a '^'). Otherwise it will not
+ be output and the text inside square brackets will be output instead. */
+extern void post_error_ne_tree (const char *msg, Node_Id node, Entity_Id ent,
+ tree t);
+
+/* Similar to post_error_ne_tree, but NUM is a second integer to write. */
+extern void post_error_ne_tree_2 (const char *msg, Node_Id node, Entity_Id ent,
+ tree t, int num);
+
+/* Return a label to branch to for the exception type in KIND or NULL_TREE
+ if none. */
+extern tree get_exception_label (char kind);
+
+/* Return the decl for the current elaboration procedure. */
+extern tree get_elaboration_procedure (void);
+
+/* If nonzero, pretend we are allocating at global level. */
+extern int force_global;
+
+/* The default alignment of "double" floating-point types, i.e. floating
+ point types whose size is equal to 64 bits, or 0 if this alignment is
+ not specifically capped. */
+extern int double_float_alignment;
+
+/* The default alignment of "double" or larger scalar types, i.e. scalar
+ types whose size is greater or equal to 64 bits, or 0 if this alignment
+ is not specifically capped. */
+extern int double_scalar_alignment;
+
+/* Data structures used to represent attributes. */
+
+enum attr_type
+{
+ ATTR_MACHINE_ATTRIBUTE,
+ ATTR_LINK_ALIAS,
+ ATTR_LINK_SECTION,
+ ATTR_LINK_CONSTRUCTOR,
+ ATTR_LINK_DESTRUCTOR,
+ ATTR_THREAD_LOCAL_STORAGE,
+ ATTR_WEAK_EXTERNAL
+};
+
+struct attrib
+{
+ struct attrib *next;
+ enum attr_type type;
+ tree name;
+ tree args;
+ Node_Id error_point;
+};
+
+/* Table of machine-independent internal attributes. */
+extern const struct attribute_spec gnat_internal_attribute_table[];
+
+/* Define the entries in the standard data array. */
+enum standard_datatypes
+{
+ /* The longest floating-point type. */
+ ADT_longest_float_type,
+
+ /* The type of an exception. */
+ ADT_except_type,
+
+ /* Type declaration node <==> typedef void *T */
+ ADT_ptr_void_type,
+
+ /* Function type declaration -- void T() */
+ ADT_void_ftype,
+
+ /* Type declaration node <==> typedef void *T() */
+ ADT_ptr_void_ftype,
+
+ /* Type declaration node <==> typedef virtual void *T() */
+ ADT_fdesc_type,
+
+ /* Null pointer for above type. */
+ ADT_null_fdesc,
+
+ /* Value 1 in signed bitsizetype. */
+ ADT_sbitsize_one_node,
+
+ /* Value BITS_PER_UNIT in signed bitsizetype. */
+ ADT_sbitsize_unit_node,
+
+ /* Function declaration nodes for run-time functions for allocating memory.
+ Ada allocators cause calls to these functions to be generated. Malloc32
+ is used only on 64bit systems needing to allocate 32bit memory. */
+ ADT_malloc_decl,
+ ADT_malloc32_decl,
+
+ /* Likewise for freeing memory. */
+ ADT_free_decl,
+
+ /* Function decl node for 64-bit multiplication with overflow checking. */
+ ADT_mulv64_decl,
+
+ /* Identifier for the name of the _Parent field in tagged record types. */
+ ADT_parent_name_id,
+
+ /* Identifier for the name of the Exception_Data type. */
+ ADT_exception_data_name_id,
+
+ /* Types and decls used by our temporary exception mechanism. See
+ init_gigi_decls for details. */
+ ADT_jmpbuf_type,
+ ADT_jmpbuf_ptr_type,
+ ADT_get_jmpbuf_decl,
+ ADT_set_jmpbuf_decl,
+ ADT_get_excptr_decl,
+ ADT_setjmp_decl,
+ ADT_longjmp_decl,
+ ADT_update_setjmp_buf_decl,
+ ADT_raise_nodefer_decl,
+ ADT_reraise_zcx_decl,
+ ADT_set_exception_parameter_decl,
+ ADT_begin_handler_decl,
+ ADT_end_handler_decl,
+ ADT_unhandled_except_decl,
+ ADT_others_decl,
+ ADT_all_others_decl,
+ ADT_unhandled_others_decl,
+ ADT_LAST};
+
+/* Define kind of exception information associated with raise statements. */
+enum exception_info_kind
+{
+ /* Simple exception information: file:line. */
+ exception_simple,
+ /* Range exception information: file:line + index, first, last. */
+ exception_range,
+ /* Column exception information: file:line:column. */
+ exception_column
+};
+
+/* Define the inline status of a subprogram. */
+enum inline_status_t
+{
+ /* Inlining is suppressed for the subprogram. */
+ is_suppressed,
+ /* No inlining is requested for the subprogram. */
+ is_disabled,
+ /* Inlining is requested for the subprogram. */
+ is_enabled
+};
+
+extern GTY(()) tree gnat_std_decls[(int) ADT_LAST];
+extern GTY(()) tree gnat_raise_decls[(int) LAST_REASON_CODE + 1];
+extern GTY(()) tree gnat_raise_decls_ext[(int) LAST_REASON_CODE + 1];
+
+#define longest_float_type_node gnat_std_decls[(int) ADT_longest_float_type]
+#define except_type_node gnat_std_decls[(int) ADT_except_type]
+#define ptr_void_type_node gnat_std_decls[(int) ADT_ptr_void_type]
+#define void_ftype gnat_std_decls[(int) ADT_void_ftype]
+#define ptr_void_ftype gnat_std_decls[(int) ADT_ptr_void_ftype]
+#define fdesc_type_node gnat_std_decls[(int) ADT_fdesc_type]
+#define null_fdesc_node gnat_std_decls[(int) ADT_null_fdesc]
+#define sbitsize_one_node gnat_std_decls[(int) ADT_sbitsize_one_node]
+#define sbitsize_unit_node gnat_std_decls[(int) ADT_sbitsize_unit_node]
+#define malloc_decl gnat_std_decls[(int) ADT_malloc_decl]
+#define malloc32_decl gnat_std_decls[(int) ADT_malloc32_decl]
+#define free_decl gnat_std_decls[(int) ADT_free_decl]
+#define mulv64_decl gnat_std_decls[(int) ADT_mulv64_decl]
+#define parent_name_id gnat_std_decls[(int) ADT_parent_name_id]
+#define exception_data_name_id gnat_std_decls[(int) ADT_exception_data_name_id]
+#define jmpbuf_type gnat_std_decls[(int) ADT_jmpbuf_type]
+#define jmpbuf_ptr_type gnat_std_decls[(int) ADT_jmpbuf_ptr_type]
+#define get_jmpbuf_decl gnat_std_decls[(int) ADT_get_jmpbuf_decl]
+#define set_jmpbuf_decl gnat_std_decls[(int) ADT_set_jmpbuf_decl]
+#define get_excptr_decl gnat_std_decls[(int) ADT_get_excptr_decl]
+#define setjmp_decl gnat_std_decls[(int) ADT_setjmp_decl]
+#define longjmp_decl gnat_std_decls[(int) ADT_longjmp_decl]
+#define update_setjmp_buf_decl gnat_std_decls[(int) ADT_update_setjmp_buf_decl]
+#define raise_nodefer_decl gnat_std_decls[(int) ADT_raise_nodefer_decl]
+#define reraise_zcx_decl gnat_std_decls[(int) ADT_reraise_zcx_decl]
+#define set_exception_parameter_decl \
+ gnat_std_decls[(int) ADT_set_exception_parameter_decl]
+#define begin_handler_decl gnat_std_decls[(int) ADT_begin_handler_decl]
+#define others_decl gnat_std_decls[(int) ADT_others_decl]
+#define all_others_decl gnat_std_decls[(int) ADT_all_others_decl]
+#define unhandled_others_decl gnat_std_decls[(int) ADT_unhandled_others_decl]
+#define end_handler_decl gnat_std_decls[(int) ADT_end_handler_decl]
+#define unhandled_except_decl gnat_std_decls[(int) ADT_unhandled_except_decl]
+
+/* Routines expected by the gcc back-end. They must have exactly the same
+ prototype and names as below. */
+
+/* Return true if we are in the global binding level. */
+extern bool global_bindings_p (void);
+
+/* Enter and exit a new binding level. */
+extern void gnat_pushlevel (void);
+extern void gnat_poplevel (void);
+extern void gnat_zaplevel (void);
+
+/* Set SUPERCONTEXT of the BLOCK for the current binding level to FNDECL
+ and point FNDECL to this BLOCK. */
+extern void set_current_block_context (tree fndecl);
+
+/* Set the jmpbuf_decl for the current binding level to DECL. */
+extern void set_block_jmpbuf_decl (tree decl);
+
+/* Get the setjmp_decl, if any, for the current binding level. */
+extern tree get_block_jmpbuf_decl (void);
+
+/* Record DECL as belonging to the current lexical scope and use GNAT_NODE
+ for location information and flag propagation. */
+extern void gnat_pushdecl (tree decl, Node_Id gnat_node);
+
+/* Initialize the GCC support for exception handling. */
+extern void gnat_init_gcc_eh (void);
+
+/* Initialize the GCC support for floating-point operations. */
+extern void gnat_init_gcc_fp (void);
+
+/* Install the builtin functions we might need. */
+extern void gnat_install_builtins (void);
+
+/* Return an integer type with the number of bits of precision given by
+ PRECISION. UNSIGNEDP is nonzero if the type is unsigned; otherwise
+ it is a signed type. */
+extern tree gnat_type_for_size (unsigned precision, int unsignedp);
+
+/* Return a data type that has machine mode MODE. UNSIGNEDP selects
+ an unsigned type; otherwise a signed type is returned. */
+extern tree gnat_type_for_mode (enum machine_mode mode, int unsignedp);
+
+/* Emit debug info for all global variable declarations. */
+extern void gnat_write_global_declarations (void);
+
+/* Return the unsigned version of a TYPE_NODE, a scalar type. */
+extern tree gnat_unsigned_type (tree type_node);
+
+/* Return the signed version of a TYPE_NODE, a scalar type. */
+extern tree gnat_signed_type (tree type_node);
+
+/* Return 1 if the types T1 and T2 are compatible, i.e. if they can be
+ transparently converted to each other. */
+extern int gnat_types_compatible_p (tree t1, tree t2);
+
+/* Return true if EXPR is a useless type conversion. */
+extern bool gnat_useless_type_conversion (tree expr);
+
+/* Return true if T, a FUNCTION_TYPE, has the specified list of flags. */
+extern bool fntype_same_flags_p (const_tree, tree, bool, bool, bool);
+
+/* Create an expression whose value is that of EXPR,
+ converted to type TYPE. The TREE_TYPE of the value
+ is always TYPE. This function implements all reasonable
+ conversions; callers should filter out those that are
+ not permitted by the language being compiled. */
+extern tree convert (tree type, tree expr);
+
+/* Create an expression whose value is that of EXPR converted to the common
+ index type, which is sizetype. */
+extern tree convert_to_index_type (tree expr);
+
+/* Routines created solely for the tree translator's sake. Their prototypes
+ can be changed as desired. */
+
+/* Initialize data structures of the utils.c module. */
+extern void init_gnat_utils (void);
+
+/* Destroy data structures of the utils.c module. */
+extern void destroy_gnat_utils (void);
+
+/* GNAT_ENTITY is a GNAT tree node for a defining identifier.
+ GNU_DECL is the GCC tree which is to be associated with
+ GNAT_ENTITY. Such gnu tree node is always an ..._DECL node.
+ If NO_CHECK is nonzero, the latter check is suppressed.
+ If GNU_DECL is zero, a previous association is to be reset. */
+extern void save_gnu_tree (Entity_Id gnat_entity, tree gnu_decl,
+ bool no_check);
+
+/* GNAT_ENTITY is a GNAT tree node for a defining identifier.
+ Return the ..._DECL node that was associated with it. If there is no tree
+ node associated with GNAT_ENTITY, abort. */
+extern tree get_gnu_tree (Entity_Id gnat_entity);
+
+/* Return nonzero if a GCC tree has been associated with GNAT_ENTITY. */
+extern bool present_gnu_tree (Entity_Id gnat_entity);
+
+/* Make a dummy type corresponding to GNAT_TYPE. */
+extern tree make_dummy_type (Entity_Id gnat_type);
+
+/* Return the dummy type that was made for GNAT_TYPE, if any. */
+extern tree get_dummy_type (Entity_Id gnat_type);
+
+/* Build dummy fat and thin pointer types whose designated type is specified
+ by GNAT_DESIG_TYPE/GNU_DESIG_TYPE and attach them to the latter. */
+extern void build_dummy_unc_pointer_types (Entity_Id gnat_desig_type,
+ tree gnu_desig_type);
+
+/* Record TYPE as a builtin type for Ada. NAME is the name of the type.
+ ARTIFICIAL_P is true if it's a type that was generated by the compiler. */
+extern void record_builtin_type (const char *name, tree type,
+ bool artificial_p);
+
+/* Given a record type RECORD_TYPE and a list of FIELD_DECL nodes FIELD_LIST,
+ finish constructing the record type as a fat pointer type. */
+extern void finish_fat_pointer_type (tree record_type, tree field_list);
+
+/* Given a record type RECORD_TYPE and a list of FIELD_DECL nodes FIELD_LIST,
+ finish constructing the record or union type. If REP_LEVEL is zero, this
+ record has no representation clause and so will be entirely laid out here.
+ If REP_LEVEL is one, this record has a representation clause and has been
+ laid out already; only set the sizes and alignment. If REP_LEVEL is two,
+ this record is derived from a parent record and thus inherits its layout;
+ only make a pass on the fields to finalize them. DEBUG_INFO_P is true if
+ we need to write debug information about this type. */
+extern void finish_record_type (tree record_type, tree field_list,
+ int rep_level, bool debug_info_p);
+
+/* Wrap up compilation of RECORD_TYPE, i.e. output all the debug information
+ associated with it. It need not be invoked directly in most cases since
+ finish_record_type takes care of doing so, but this can be necessary if
+ a parallel type is to be attached to the record type. */
+extern void rest_of_record_type_compilation (tree record_type);
+
+/* Append PARALLEL_TYPE on the chain of parallel types for TYPE. */
+extern void add_parallel_type (tree type, tree parallel_type);
+
+/* Return a FUNCTION_TYPE node. RETURN_TYPE is the type returned by the
+ subprogram. If it is VOID_TYPE, then we are dealing with a procedure,
+ otherwise we are dealing with a function. PARAM_DECL_LIST is a list of
+ PARM_DECL nodes that are the subprogram parameters. CICO_LIST is the
+ copy-in/copy-out list to be stored into the TYPE_CICO_LIST field.
+ RETURN_UNCONSTRAINED_P is true if the function returns an unconstrained
+ object. RETURN_BY_DIRECT_REF_P is true if the function returns by direct
+ reference. RETURN_BY_INVISI_REF_P is true if the function returns by
+ invisible reference. */
+extern tree create_subprog_type (tree return_type, tree param_decl_list,
+ tree cico_list, bool return_unconstrained_p,
+ bool return_by_direct_ref_p,
+ bool return_by_invisi_ref_p);
+
+/* Return a copy of TYPE, but safe to modify in any way. */
+extern tree copy_type (tree type);
+
+/* Return a subtype of sizetype with range MIN to MAX and whose
+ TYPE_INDEX_TYPE is INDEX. GNAT_NODE is used for the position
+ of the associated TYPE_DECL. */
+extern tree create_index_type (tree min, tree max, tree index,
+ Node_Id gnat_node);
+
+/* Return a subtype of TYPE with range MIN to MAX. If TYPE is NULL,
+ sizetype is used. */
+extern tree create_range_type (tree type, tree min, tree max);
+
+/* Return a TYPE_DECL node suitable for the TYPE_STUB_DECL field of a type.
+ TYPE_NAME gives the name of the type and TYPE is a ..._TYPE node giving
+ its data type. */
+extern tree create_type_stub_decl (tree type_name, tree type);
+
+/* Return a TYPE_DECL node. TYPE_NAME gives the name of the type and TYPE
+ is a ..._TYPE node giving its data type. ARTIFICIAL_P is true if this
+ is a declaration that was generated by the compiler. DEBUG_INFO_P is
+ true if we need to write debug information about this type. GNAT_NODE
+ is used for the position of the decl. */
+extern tree create_type_decl (tree type_name, tree type, bool artificial_p,
+ bool debug_info_p, Node_Id gnat_node);
+
+/* Return a VAR_DECL or CONST_DECL node.
+
+ VAR_NAME gives the name of the variable. ASM_NAME is its assembler name
+ (if provided). TYPE is its data type (a GCC ..._TYPE node). VAR_INIT is
+ the GCC tree for an optional initial expression; NULL_TREE if none.
+
+ CONST_FLAG is true if this variable is constant, in which case we might
+ return a CONST_DECL node unless CONST_DECL_ALLOWED_P is false.
+
+ PUBLIC_FLAG is true if this definition is to be made visible outside of
+ the current compilation unit. This flag should be set when processing the
+ variable definitions in a package specification.
+
+ EXTERN_FLAG is nonzero when processing an external variable declaration (as
+ opposed to a definition: no storage is to be allocated for the variable).
+
+ STATIC_FLAG is only relevant when not at top level. In that case
+ it indicates whether to always allocate storage to the variable.
+
+ GNAT_NODE is used for the position of the decl. */
+extern tree
+create_var_decl_1 (tree var_name, tree asm_name, tree type, tree var_init,
+ bool const_flag, bool public_flag, bool extern_flag,
+ bool static_flag, bool const_decl_allowed_p,
+ struct attrib *attr_list, Node_Id gnat_node);
+
+/* Wrapper around create_var_decl_1 for cases where we don't care whether
+ a VAR or a CONST decl node is created. */
+#define create_var_decl(var_name, asm_name, type, var_init, \
+ const_flag, public_flag, extern_flag, \
+ static_flag, attr_list, gnat_node) \
+ create_var_decl_1 (var_name, asm_name, type, var_init, \
+ const_flag, public_flag, extern_flag, \
+ static_flag, true, attr_list, gnat_node)
+
+/* Wrapper around create_var_decl_1 for cases where a VAR_DECL node is
+ required. The primary intent is for DECL_CONST_CORRESPONDING_VARs, which
+ must be VAR_DECLs and on which we want TREE_READONLY set to have them
+ possibly assigned to a readonly data section. */
+#define create_true_var_decl(var_name, asm_name, type, var_init, \
+ const_flag, public_flag, extern_flag, \
+ static_flag, attr_list, gnat_node) \
+ create_var_decl_1 (var_name, asm_name, type, var_init, \
+ const_flag, public_flag, extern_flag, \
+ static_flag, false, attr_list, gnat_node)
+
+/* Record DECL as a global renaming pointer. */
+extern void record_global_renaming_pointer (tree decl);
+
+/* Invalidate the global renaming pointers. */
+extern void invalidate_global_renaming_pointers (void);
+
+/* Return a FIELD_DECL node. FIELD_NAME is the field's name, FIELD_TYPE is
+ its type and RECORD_TYPE is the type of the enclosing record. If SIZE is
+ nonzero, it is the specified size of the field. If POS is nonzero, it is
+ the bit position. PACKED is 1 if the enclosing record is packed, -1 if it
+ has Component_Alignment of Storage_Unit. If ADDRESSABLE is nonzero, it
+ means we are allowed to take the address of the field; if it is negative,
+ we should not make a bitfield, which is used by make_aligning_type. */
+extern tree create_field_decl (tree field_name, tree field_type,
+ tree record_type, tree size, tree pos,
+ int packed, int addressable);
+
+/* Return a PARM_DECL node. PARAM_NAME is the name of the parameter and
+ PARAM_TYPE is its type. READONLY is true if the parameter is readonly
+ (either an In parameter or an address of a pass-by-ref parameter). */
+extern tree create_param_decl (tree param_name, tree param_type,
+ bool readonly);
+
+/* Return a LABEL_DECL with LABEL_NAME. GNAT_NODE is used for the position
+ of the decl. */
+extern tree create_label_decl (tree label_name, Node_Id gnat_node);
+
+/* Return a FUNCTION_DECL node. SUBPROG_NAME is the name of the subprogram,
+ ASM_NAME is its assembler name, SUBPROG_TYPE is its type (a FUNCTION_TYPE
+ node), PARAM_DECL_LIST is the list of the subprogram arguments (a list of
+ PARM_DECL nodes chained through the DECL_CHAIN field).
+
+ INLINE_STATUS, PUBLIC_FLAG, EXTERN_FLAG, ARTIFICIAL_FLAG and ATTR_LIST are
+ used to set the appropriate fields in the FUNCTION_DECL. GNAT_NODE is
+ used for the position of the decl. */
+extern tree create_subprog_decl (tree subprog_name, tree asm_name,
+ tree subprog_type, tree param_decl_list,
+ enum inline_status_t inline_status,
+ bool public_flag, bool extern_flag,
+ bool artificial_flag,
+ struct attrib *attr_list, Node_Id gnat_node);
+
+/* Process the attributes in ATTR_LIST for NODE, which is either a DECL or
+ a TYPE. If IN_PLACE is true, the tree pointed to by NODE should not be
+ changed. GNAT_NODE is used for the position of error messages. */
+extern void process_attributes (tree *node, struct attrib **attr_list,
+ bool in_place, Node_Id gnat_node);
+
+/* Set up the framework for generating code for SUBPROG_DECL, a subprogram
+ body. This routine needs to be invoked before processing the declarations
+ appearing in the subprogram. */
+extern void begin_subprog_body (tree subprog_decl);
+
+/* Finish translating the current subprogram and set its BODY. */
+extern void end_subprog_body (tree body);
+
+/* Wrap up compilation of SUBPROG_DECL, a subprogram body. */
+extern void rest_of_subprog_body_compilation (tree subprog_decl);
+
+/* Build a template of type TEMPLATE_TYPE from the array bounds of ARRAY_TYPE.
+ EXPR is an expression that we can use to locate any PLACEHOLDER_EXPRs.
+ Return a constructor for the template. */
+extern tree build_template (tree template_type, tree array_type, tree expr);
+
+/* Build a 64bit VMS descriptor from a Mechanism_Type, which must specify
+ a descriptor type, and the GCC type of an object. Each FIELD_DECL
+ in the type contains in its DECL_INITIAL the expression to use when
+ a constructor is made for the type. GNAT_ENTITY is a gnat node used
+ to print out an error message if the mechanism cannot be applied to
+ an object of that type and also for the name. */
+extern tree build_vms_descriptor (tree type, Mechanism_Type mech,
+ Entity_Id gnat_entity);
+
+/* Build a 32bit VMS descriptor from a Mechanism_Type. See above. */
+extern tree build_vms_descriptor32 (tree type, Mechanism_Type mech,
+ Entity_Id gnat_entity);
+
+/* Build a type to be used to represent an aliased object whose nominal type
+ is an unconstrained array. This consists of a RECORD_TYPE containing a
+ field of TEMPLATE_TYPE and a field of OBJECT_TYPE, which is an ARRAY_TYPE.
+ If ARRAY_TYPE is that of an unconstrained array, this is used to represent
+ an arbitrary unconstrained object. Use NAME as the name of the record.
+ DEBUG_INFO_P is true if we need to write debug information for the type. */
+extern tree build_unc_object_type (tree template_type, tree object_type,
+ tree name, bool debug_info_p);
+
+/* Same as build_unc_object_type, but taking a thin or fat pointer type
+ instead of the template type. */
+extern tree build_unc_object_type_from_ptr (tree thin_fat_ptr_type,
+ tree object_type, tree name,
+ bool debug_info_p);
+
+/* Update anything previously pointing to OLD_TYPE to point to NEW_TYPE. In
+ the normal case this is just two adjustments, but we have more to do
+ if NEW is an UNCONSTRAINED_ARRAY_TYPE. */
+extern void update_pointer_to (tree old_type, tree new_type);
+
+/* EXP is an expression for the size of an object. If this size contains
+ discriminant references, replace them with the maximum (if MAX_P) or
+ minimum (if !MAX_P) possible value of the discriminant. */
+extern tree max_size (tree exp, bool max_p);
+
+/* Remove all conversions that are done in EXP. This includes converting
+ from a padded type or to a left-justified modular type. If TRUE_ADDRESS
+ is true, always return the address of the containing object even if
+ the address is not bit-aligned. */
+extern tree remove_conversions (tree exp, bool true_address);
+
+/* If EXP's type is an UNCONSTRAINED_ARRAY_TYPE, return an expression that
+ refers to the underlying array. If its type has TYPE_CONTAINS_TEMPLATE_P,
+ likewise return an expression pointing to the underlying array. */
+extern tree maybe_unconstrained_array (tree exp);
+
+/* Return an expression that does an unchecked conversion of EXPR to TYPE.
+ If NOTRUNC_P is true, truncation operations should be suppressed. */
+extern tree unchecked_convert (tree type, tree expr, bool notrunc_p);
+
+/* Return the appropriate GCC tree code for the specified GNAT_TYPE,
+ the latter being a record type as predicated by Is_Record_Type. */
+extern enum tree_code tree_code_for_record_type (Entity_Id gnat_type);
+
+/* Return true if GNAT_TYPE is a "double" floating-point type, i.e. whose
+ size is equal to 64 bits, or an array of such a type. Set ALIGN_CLAUSE
+ according to the presence of an alignment clause on the type or, if it
+ is an array, on the component type. */
+extern bool is_double_float_or_array (Entity_Id gnat_type,
+ bool *align_clause);
+
+/* Return true if GNAT_TYPE is a "double" or larger scalar type, i.e. whose
+ size is greater or equal to 64 bits, or an array of such a type. Set
+ ALIGN_CLAUSE according to the presence of an alignment clause on the
+ type or, if it is an array, on the component type. */
+extern bool is_double_scalar_or_array (Entity_Id gnat_type,
+ bool *align_clause);
+
+/* Return true if GNU_TYPE is suitable as the type of a non-aliased
+ component of an aggregate type. */
+extern bool type_for_nonaliased_component_p (tree gnu_type);
+
+/* Return true if TYPE is a smaller form of ORIG_TYPE. */
+extern bool smaller_form_type_p (tree type, tree orig_type);
+
+/* Return the base type of TYPE. */
+extern tree get_base_type (tree type);
+
+/* EXP is a GCC tree representing an address. See if we can find how
+ strictly the object at that address is aligned. Return that alignment
+ strictly the object at that address is aligned. Return that alignment
+ in bits. If we don't know anything about the alignment, return 0. */
+extern unsigned int known_alignment (tree exp);
+
+/* Return true if VALUE is a multiple of FACTOR. FACTOR must be a power
+ of 2. */
+extern bool value_factor_p (tree value, HOST_WIDE_INT factor);
+
+/* Build an atomic load for the underlying atomic object in SRC. */
+extern tree build_atomic_load (tree src);
+
+/* Build an atomic store from SRC to the underlying atomic object in DEST. */
+extern tree build_atomic_store (tree dest, tree src);
+
+/* Make a binary operation of kind OP_CODE. RESULT_TYPE is the type
+ desired for the result. Usually the operation is to be performed
+ in that type. For MODIFY_EXPR and ARRAY_REF, RESULT_TYPE may be 0
+ in which case the type to be used will be derived from the operands. */
+extern tree build_binary_op (enum tree_code op_code, tree result_type,
+ tree left_operand, tree right_operand);
+
+/* Similar, but make unary operation. */
+extern tree build_unary_op (enum tree_code op_code, tree result_type,
+ tree operand);
+
+/* Similar, but for COND_EXPR. */
+extern tree build_cond_expr (tree result_type, tree condition_operand,
+ tree true_operand, tree false_operand);
+
+/* Similar, but for COMPOUND_EXPR. */
+extern tree build_compound_expr (tree result_type, tree stmt_operand,
+ tree expr_operand);
+
+/* Conveniently construct a function call expression. FNDECL names the
+ function to be called, N is the number of arguments, and the "..."
+ parameters are the argument expressions. Unlike build_call_expr
+ this doesn't fold the call, hence it will always return a CALL_EXPR. */
+extern tree build_call_n_expr (tree fndecl, int n, ...);
+
+/* Call a function that raises an exception and pass the line number and file
+ name, if requested. MSG says which exception function to call.
+
+ GNAT_NODE is the gnat node conveying the source location for which the
+ error should be signaled, or Empty in which case the error is signaled on
+ the current ref_file_name/input_line.
+
+ KIND says which kind of exception this is for
+ (N_Raise_{Constraint,Storage,Program}_Error). */
+extern tree build_call_raise (int msg, Node_Id gnat_node, char kind);
+
+/* Similar to build_call_raise, for an index or range check exception as
+ determined by MSG, with extra information generated of the form
+ "INDEX out of range FIRST..LAST". */
+extern tree build_call_raise_range (int msg, Node_Id gnat_node,
+ tree index, tree first, tree last);
+
+/* Similar to build_call_raise, with extra information about the column
+ where the check failed. */
+extern tree build_call_raise_column (int msg, Node_Id gnat_node);
+
+/* Return a CONSTRUCTOR of TYPE whose elements are V. This is not the
+ same as build_constructor in the language-independent tree.c. */
+extern tree gnat_build_constructor (tree type, vec<constructor_elt, va_gc> *v);
+
+/* Return a COMPONENT_REF to access a field that is given by COMPONENT,
+ an IDENTIFIER_NODE giving the name of the field, FIELD, a FIELD_DECL,
+ for the field, or both. Don't fold the result if NO_FOLD_P. */
+extern tree build_component_ref (tree record_variable, tree component,
+ tree field, bool no_fold_p);
+
+/* Build a GCC tree to call an allocation or deallocation function.
+ If GNU_OBJ is nonzero, it is an object to deallocate. Otherwise,
+ generate an allocator.
+
+ GNU_SIZE is the number of bytes to allocate and GNU_TYPE is the contained
+ object type, used to determine the to-be-honored address alignment.
+ GNAT_PROC, if present, is a procedure to call and GNAT_POOL is the storage
+ pool to use. If not present, malloc and free are used. GNAT_NODE is used
+ to provide an error location for restriction violation messages. */
+extern tree build_call_alloc_dealloc (tree gnu_obj, tree gnu_size,
+ tree gnu_type, Entity_Id gnat_proc,
+ Entity_Id gnat_pool, Node_Id gnat_node);
+
+/* Build a GCC tree to correspond to allocating an object of TYPE whose
+ initial value if INIT, if INIT is nonzero. Convert the expression to
+ RESULT_TYPE, which must be some type of pointer. Return the tree.
+
+ GNAT_PROC and GNAT_POOL optionally give the procedure to call and
+ the storage pool to use. GNAT_NODE is used to provide an error
+ location for restriction violation messages. If IGNORE_INIT_TYPE is
+ true, ignore the type of INIT for the purpose of determining the size;
+ this will cause the maximum size to be allocated if TYPE is of
+ self-referential size. */
+extern tree build_allocator (tree type, tree init, tree result_type,
+ Entity_Id gnat_proc, Entity_Id gnat_pool,
+ Node_Id gnat_node, bool);
+
+/* Fill in a VMS descriptor of GNU_TYPE for GNU_EXPR and return the result.
+ GNAT_ACTUAL is the actual parameter for which the descriptor is built. */
+extern tree fill_vms_descriptor (tree gnu_type, tree gnu_expr,
+ Node_Id gnat_actual);
+
+/* Convert GNU_EXPR, a pointer to a VMS descriptor, to GNU_TYPE, a regular
+ pointer or fat pointer type. GNU_EXPR_ALT_TYPE is the alternate (32-bit)
+ pointer type of GNU_EXPR. GNAT_SUBPROG is the subprogram to which the
+ descriptor is passed. */
+extern tree convert_vms_descriptor (tree gnu_type, tree gnu_expr,
+ tree gnu_expr_alt_type,
+ Entity_Id gnat_subprog);
+
+/* Indicate that we need to take the address of T and that it therefore
+ should not be allocated in a register. Returns true if successful. */
+extern bool gnat_mark_addressable (tree t);
+
+/* Save EXP for later use or reuse. This is equivalent to save_expr in tree.c
+ but we know how to handle our own nodes. */
+extern tree gnat_save_expr (tree exp);
+
+/* Protect EXP for immediate reuse. This is a variant of gnat_save_expr that
+ is optimized under the assumption that EXP's value doesn't change before
+ its subsequent reuse(s) except through its potential reevaluation. */
+extern tree gnat_protect_expr (tree exp);
+
+/* This is equivalent to stabilize_reference in tree.c but we know how to
+ handle our own nodes and we take extra arguments. FORCE says whether to
+ force evaluation of everything. We set SUCCESS to true unless we walk
+ through something we don't know how to stabilize. */
+extern tree gnat_stabilize_reference (tree ref, bool force, bool *success);
+
+/* If EXPR is an expression that is invariant in the current function, in the
+ sense that it can be evaluated anywhere in the function and any number of
+ times, return EXPR or an equivalent expression. Otherwise return NULL. */
+extern tree gnat_invariant_expr (tree expr);
+
+/* Implementation of the builtin_function langhook. */
+extern tree gnat_builtin_function (tree decl);
+
+/* Search the chain of currently reachable declarations for a builtin
+ FUNCTION_DECL node corresponding to function NAME (an IDENTIFIER_NODE).
+ Return the first node found, if any, or NULL_TREE otherwise. */
+extern tree builtin_decl_for (tree name);
+
+/* GNU_TYPE is a type. Determine if it should be passed by reference by
+ default. */
+extern bool default_pass_by_ref (tree gnu_type);
+
+/* GNU_TYPE is the type of a subprogram parameter. Determine from the type
+ if it should be passed by reference. */
+extern bool must_pass_by_ref (tree gnu_type);
+
+/* Return the size of the FP mode with precision PREC. */
+extern int fp_prec_to_size (int prec);
+
+/* Return the precision of the FP mode with size SIZE. */
+extern int fp_size_to_prec (int size);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* These functions return the basic data type sizes and related parameters
+ about the target machine. */
+extern Pos get_target_bits_per_unit (void);
+extern Pos get_target_bits_per_word (void);
+extern Pos get_target_char_size (void);
+extern Pos get_target_wchar_t_size (void);
+extern Pos get_target_short_size (void);
+extern Pos get_target_int_size (void);
+extern Pos get_target_long_size (void);
+extern Pos get_target_long_long_size (void);
+extern Pos get_target_float_size (void);
+extern Pos get_target_double_size (void);
+extern Pos get_target_long_double_size (void);
+extern Pos get_target_pointer_size (void);
+extern Pos get_target_maximum_default_alignment (void);
+extern Pos get_target_system_allocator_alignment (void);
+extern Pos get_target_maximum_allowed_alignment (void);
+extern Pos get_target_maximum_alignment (void);
+extern Nat get_float_words_be (void);
+extern Nat get_words_be (void);
+extern Nat get_bytes_be (void);
+extern Nat get_bits_be (void);
+extern Nat get_target_strict_alignment (void);
+extern Nat get_target_double_float_alignment (void);
+extern Nat get_target_double_scalar_alignment (void);
+
+/* This function is called by the front-end to enumerate all the supported
+ modes for the machine, as well as some predefined C types. */
+extern void enumerate_modes (void (*f) (const char *, int, int, int, int, int,
+ int, int));
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Let code know whether we are targeting VMS without need of
+ intrusive preprocessor directives. */
+#ifndef TARGET_ABI_OPEN_VMS
+#define TARGET_ABI_OPEN_VMS 0
+#endif
+
+/* VMS option set by default, when clear forces 32bit mallocs and 32bit
+ Descriptors. Always used in combination with TARGET_ABI_OPEN_VMS
+ so no effect on non-VMS systems. */
+#if TARGET_ABI_OPEN_VMS == 0
+#define flag_vms_malloc64 0
+#endif
+
+/* Convenient shortcuts. */
+#define VECTOR_TYPE_P(TYPE) (TREE_CODE (TYPE) == VECTOR_TYPE)
+
+/* If EXP's type is a VECTOR_TYPE, return EXP converted to the associated
+ TYPE_REPRESENTATIVE_ARRAY. */
+
+static inline tree
+maybe_vector_array (tree exp)
+{
+ tree etype = TREE_TYPE (exp);
+
+ if (VECTOR_TYPE_P (etype))
+ exp = convert (TYPE_REPRESENTATIVE_ARRAY (etype), exp);
+
+ return exp;
+}
+
+static inline unsigned HOST_WIDE_INT
+ceil_pow2 (unsigned HOST_WIDE_INT x)
+{
+ return (unsigned HOST_WIDE_INT) 1 << (floor_log2 (x - 1) + 1);
+}
diff --git a/gcc-4.9/gcc/ada/gcc-interface/lang-specs.h b/gcc-4.9/gcc/ada/gcc-interface/lang-specs.h
new file mode 100644
index 000000000..850fee5e2
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/lang-specs.h
@@ -0,0 +1,74 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * L A N G - S P E C S *
+ * *
+ * C Header File *
+ * *
+ * Copyright (C) 1992-2012, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License along with GCC; see the file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+/* This is the contribution to the `default_compilers' array in gcc.c for
+ GNAT. */
+
+ {".ads", "@ada", 0, 0, 0},
+ {".adb", "@ada", 0, 0, 0},
+ {"@ada",
+ "\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{!S:%{!c:%e-c or -S required for Ada}}\
+ gnat1 %{I*} %{k8:-gnatk8} %{Wall:-gnatwa} %{w:-gnatws} %{!Q:-quiet}\
+ %{nostdinc*} %{nostdlib*}\
+ -dumpbase %{.adb:%b.adb}%{.ads:%b.ads}%{!.adb:%{!.ads:%b.ada}}\
+ %{c|S:%{o*:-auxbase-strip %*}%{!o*:-auxbase %b}}%{!c:%{!S:-auxbase %b}} \
+ %{O*} %{W*} %{w} %{p} %{pg:-p} %{d*} %{f*}\
+ %{coverage:-fprofile-arcs -ftest-coverage} "
+ "%{gnatea:-gnatez} %{g*&m*} "
+#if defined(TARGET_VXWORKS_RTP)
+ "%{fRTS=rtp:-mrtp} %{fRTS=rtp-smp:-mrtp} %{fRTS=ravenscar-cert-rtp:-mrtp}"
+#endif
+ "%1 %{!S:%{o*:%w%*-gnatO}} \
+ %i %{S:%W{o*}%{!o*:-o %b.s}} \
+ %{gnatc*|gnats*: -o %j} %{-param*} \
+ %{!gnatc*:%{!gnats*:%(invoke_as)}}", 0, 0, 0},
+
+ {"@adawhy",
+ "\
+ %{!c:%e-c required for gnat2why}\
+ gnat1why %{I*} %{k8:-gnatk8} %{!Q:-quiet}\
+ %{nostdinc*} %{nostdlib*}\
+ -dumpbase %{.adb:%b.adb}%{.ads:%b.ads}%{!.adb:%{!.ads:%b.ada}}\
+ %{o*:-auxbase-strip %*}%{!o*:-auxbase %b} \
+ %{a} %{d*} %{f*} \
+ %{gnatea:-gnatez} %{g*&m*} \
+ %1 %{o*:%w%*-gnatO} \
+ %i \
+ %{gnatc*|gnats*: -o %j} %{-param*} ", 0, 0, 0},
+
+ {"@adascil",
+ "\
+ %{!c:%e-c required for gnat2scil}\
+ gnat1scil %{I*} %{k8:-gnatk8} %{!Q:-quiet}\
+ %{nostdinc*} %{nostdlib*}\
+ -dumpbase %{.adb:%b.adb}%{.ads:%b.ads}%{!.adb:%{!.ads:%b.ada}}\
+ %{o*:-auxbase-strip %*}%{!o*:-auxbase %b} \
+ %{a} %{d*} %{f*} \
+ %{gnatea:-gnatez} %{g*&m*} \
+ %1 %{o*:%w%*-gnatO} \
+ %i \
+ %{gnatc*|gnats*: -o %j} %{-param*} ", 0, 0, 0},
diff --git a/gcc-4.9/gcc/ada/gcc-interface/lang.opt b/gcc-4.9/gcc/ada/gcc-interface/lang.opt
new file mode 100644
index 000000000..004388ba2
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/lang.opt
@@ -0,0 +1,91 @@
+; Options for the Ada front end.
+; Copyright (C) 2003-2013 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+; See the GCC internals manual for a description of this file's format.
+
+; Please try to keep this file in ASCII collating order.
+
+Language
+Ada
+
+Language
+AdaWhy
+
+Language
+AdaSCIL
+
+-all-warnings
+Ada AdaWhy AdaSCIL Alias(Wall)
+
+-include-barrier
+Ada AdaWhy AdaSCIL Alias(I, -)
+
+-include-directory
+Ada AdaWhy AdaSCIL Separate Alias(I)
+
+-include-directory=
+Ada AdaWhy AdaSCIL Joined Alias(I)
+
+-no-standard-includes
+Ada AdaWhy AdaSCIL Alias(nostdinc)
+
+-no-standard-libraries
+Ada AdaWhy AdaSCIL Alias(nostdlib)
+
+I
+Ada AdaWhy AdaSCIL Joined Separate
+; Documented in C but it should be: -I <dir>. Add <dir> to the end of the main source path
+
+Wall
+Ada AdaWhy AdaSCIL
+Enable most warning messages
+
+k8
+Driver
+Synonym of -gnatk8
+
+nostdinc
+Ada AdaWhy AdaSCIL RejectNegative
+; Documented in C but it should be: Do not look for source files in standard path
+
+nostdlib
+Ada AdaWhy AdaSCIL
+Do not look for object files in standard path
+
+fRTS=
+Ada AdaWhy AdaSCIL Joined RejectNegative
+Select the runtime
+
+fshort-enums
+Ada AdaWhy AdaSCIL
+Use the narrowest integer type possible for enumeration types
+
+gant
+Ada AdaWhy AdaSCIL Joined Undocumented
+Catch typos
+
+gnatO
+Ada AdaWhy AdaSCIL Separate
+Set name of output ALI file (internal switch)
+
+gnat
+Ada AdaWhy AdaSCIL Joined
+-gnat<options> Specify options to GNAT
+
+; This comment is to ensure we retain the blank line above.
diff --git a/gcc-4.9/gcc/ada/gcc-interface/misc.c b/gcc-4.9/gcc/ada/gcc-interface/misc.c
new file mode 100644
index 000000000..a5f2881d6
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/misc.c
@@ -0,0 +1,928 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * M I S C *
+ * *
+ * C Implementation File *
+ * *
+ * Copyright (C) 1992-2014, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License distributed with GNAT; see file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "opts.h"
+#include "options.h"
+#include "tm.h"
+#include "tree.h"
+#include "stor-layout.h"
+#include "print-tree.h"
+#include "diagnostic.h"
+#include "target.h"
+#include "ggc.h"
+#include "flags.h"
+#include "debug.h"
+#include "toplev.h"
+#include "langhooks.h"
+#include "langhooks-def.h"
+#include "plugin.h"
+#include "real.h"
+#include "function.h" /* For pass_by_reference. */
+
+#include "ada.h"
+#include "adadecode.h"
+#include "types.h"
+#include "atree.h"
+#include "elists.h"
+#include "namet.h"
+#include "nlists.h"
+#include "stringt.h"
+#include "uintp.h"
+#include "fe.h"
+#include "sinfo.h"
+#include "einfo.h"
+#include "ada-tree.h"
+#include "gigi.h"
+
+/* This symbol needs to be defined for the front-end. */
+void *callgraph_info_file = NULL;
+
+/* Command-line argc and argv. These variables are global since they are
+ imported in back_end.adb. */
+unsigned int save_argc;
+const char **save_argv;
+
+/* GNAT argc and argv. */
+extern int gnat_argc;
+extern char **gnat_argv;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Declare functions we use as part of startup. */
+extern void __gnat_initialize (void *);
+extern void __gnat_install_SEH_handler (void *);
+extern void adainit (void);
+extern void _ada_gnat1drv (void);
+
+#ifdef __cplusplus
+}
+#endif
+
+/* The parser for the language. For us, we process the GNAT tree. */
+
+static void
+gnat_parse_file (void)
+{
+ int seh[2];
+
+ /* Call the target specific initializations. */
+ __gnat_initialize (NULL);
+
+ /* ??? Call the SEH initialization routine. This is to workaround
+ a bootstrap path problem. The call below should be removed at some
+ point and the SEH pointer passed to __gnat_initialize() above. */
+ __gnat_install_SEH_handler((void *)seh);
+
+ /* Call the front-end elaboration procedures. */
+ adainit ();
+
+ /* Call the front end. */
+ _ada_gnat1drv ();
+}
+
+/* Return language mask for option processing. */
+
+static unsigned int
+gnat_option_lang_mask (void)
+{
+ return CL_Ada;
+}
+
+/* Decode all the language specific options that cannot be decoded by GCC.
+ The option decoding phase of GCC calls this routine on the flags that
+ are marked as Ada-specific. Return true on success or false on failure. */
+
+static bool
+gnat_handle_option (size_t scode, const char *arg ATTRIBUTE_UNUSED, int value,
+ int kind ATTRIBUTE_UNUSED, location_t loc ATTRIBUTE_UNUSED,
+ const struct cl_option_handlers *handlers ATTRIBUTE_UNUSED)
+{
+ enum opt_code code = (enum opt_code) scode;
+
+ switch (code)
+ {
+ case OPT_Wall:
+ handle_generated_option (&global_options, &global_options_set,
+ OPT_Wunused, NULL, value,
+ gnat_option_lang_mask (), kind, loc,
+ handlers, global_dc);
+ warn_uninitialized = value;
+ warn_maybe_uninitialized = value;
+ break;
+
+ case OPT_gant:
+ warning (0, "%<-gnat%> misspelled as %<-gant%>");
+
+ /* ... fall through ... */
+
+ case OPT_gnat:
+ case OPT_gnatO:
+ case OPT_fRTS_:
+ case OPT_I:
+ case OPT_nostdinc:
+ case OPT_nostdlib:
+ /* These are handled by the front-end. */
+ break;
+
+ case OPT_fshort_enums:
+ /* This is handled by the middle-end. */
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ Ada_handle_option_auto (&global_options, &global_options_set,
+ scode, arg, value,
+ gnat_option_lang_mask (), kind,
+ loc, handlers, global_dc);
+ return true;
+}
+
+/* Initialize options structure OPTS. */
+
+static void
+gnat_init_options_struct (struct gcc_options *opts)
+{
+ /* Uninitialized really means uninitialized in Ada. */
+ opts->x_flag_zero_initialized_in_bss = 0;
+
+ /* We can delete dead instructions that may throw exceptions in Ada. */
+ opts->x_flag_delete_dead_exceptions = 1;
+}
+
+/* Initialize for option processing. */
+
+static void
+gnat_init_options (unsigned int decoded_options_count,
+ struct cl_decoded_option *decoded_options)
+{
+ /* Reconstruct an argv array for use of back_end.adb.
+
+ ??? back_end.adb should not rely on this; instead, it should work with
+ decoded options without such reparsing, to ensure consistency in how
+ options are decoded. */
+ unsigned int i;
+
+ save_argv = XNEWVEC (const char *, 2 * decoded_options_count + 1);
+ save_argc = 0;
+ for (i = 0; i < decoded_options_count; i++)
+ {
+ size_t num_elements = decoded_options[i].canonical_option_num_elements;
+
+ if (decoded_options[i].errors
+ || decoded_options[i].opt_index == OPT_SPECIAL_unknown
+ || num_elements == 0)
+ continue;
+
+ /* Deal with -I- specially since it must be a single switch. */
+ if (decoded_options[i].opt_index == OPT_I
+ && num_elements == 2
+ && decoded_options[i].canonical_option[1][0] == '-'
+ && decoded_options[i].canonical_option[1][1] == '\0')
+ save_argv[save_argc++] = "-I-";
+ else
+ {
+ gcc_assert (num_elements >= 1 && num_elements <= 2);
+ save_argv[save_argc++] = decoded_options[i].canonical_option[0];
+ if (num_elements >= 2)
+ save_argv[save_argc++] = decoded_options[i].canonical_option[1];
+ }
+ }
+ save_argv[save_argc] = NULL;
+
+ gnat_argv = (char **) xmalloc (sizeof (save_argv[0]));
+ gnat_argv[0] = xstrdup (save_argv[0]); /* name of the command */
+ gnat_argc = 1;
+}
+
+/* Ada code requires variables for these settings rather than elements
+ of the global_options structure. */
+#undef optimize
+#undef optimize_size
+#undef flag_compare_debug
+#undef flag_short_enums
+#undef flag_stack_check
+int optimize;
+int optimize_size;
+int flag_compare_debug;
+int flag_short_enums;
+enum stack_check_type flag_stack_check = NO_STACK_CHECK;
+
+/* Settings adjustments after switches processing by the back-end.
+ Note that the front-end switches processing (Scan_Compiler_Arguments)
+ has not been done yet at this point! */
+
+static bool
+gnat_post_options (const char **pfilename ATTRIBUTE_UNUSED)
+{
+ /* Excess precision other than "fast" requires front-end support. */
+ if (flag_excess_precision_cmdline == EXCESS_PRECISION_STANDARD
+ && TARGET_FLT_EVAL_METHOD_NON_DEFAULT)
+ sorry ("-fexcess-precision=standard for Ada");
+ flag_excess_precision_cmdline = EXCESS_PRECISION_FAST;
+
+ /* ??? The warning machinery is outsmarted by Ada. */
+ warn_unused_parameter = 0;
+
+ /* No psABI change warnings for Ada. */
+ warn_psabi = 0;
+
+ /* No caret by default for Ada. */
+ if (!global_options_set.x_flag_diagnostics_show_caret)
+ global_dc->show_caret = false;
+
+ optimize = global_options.x_optimize;
+ optimize_size = global_options.x_optimize_size;
+ flag_compare_debug = global_options.x_flag_compare_debug;
+ flag_stack_check = global_options.x_flag_stack_check;
+ flag_short_enums = global_options.x_flag_short_enums;
+
+ /* Unfortunately the post_options hook is called before the value of
+ flag_short_enums is autodetected, if need be. Mimic the process
+ for our private flag_short_enums. */
+ if (flag_short_enums == 2)
+ flag_short_enums = targetm.default_short_enums ();
+
+ return false;
+}
+
+/* Here is the function to handle the compiler error processing in GCC. */
+
+static void
+internal_error_function (diagnostic_context *context,
+ const char *msgid, va_list *ap)
+{
+ text_info tinfo;
+ char *buffer, *p, *loc;
+ String_Template temp, temp_loc;
+ Fat_Pointer fp, fp_loc;
+ expanded_location s;
+
+ /* Warn if plugins present. */
+ warn_if_plugins ();
+
+ /* Reset the pretty-printer. */
+ pp_clear_output_area (context->printer);
+
+ /* Format the message into the pretty-printer. */
+ tinfo.format_spec = msgid;
+ tinfo.args_ptr = ap;
+ tinfo.err_no = errno;
+ pp_format_verbatim (context->printer, &tinfo);
+
+ /* Extract a (writable) pointer to the formatted text. */
+ buffer = xstrdup (pp_formatted_text (context->printer));
+
+ /* Go up to the first newline. */
+ for (p = buffer; *p; p++)
+ if (*p == '\n')
+ {
+ *p = '\0';
+ break;
+ }
+
+ temp.Low_Bound = 1;
+ temp.High_Bound = p - buffer;
+ fp.Bounds = &temp;
+ fp.Array = buffer;
+
+ s = expand_location (input_location);
+ if (context->show_column && s.column != 0)
+ asprintf (&loc, "%s:%d:%d", s.file, s.line, s.column);
+ else
+ asprintf (&loc, "%s:%d", s.file, s.line);
+ temp_loc.Low_Bound = 1;
+ temp_loc.High_Bound = strlen (loc);
+ fp_loc.Bounds = &temp_loc;
+ fp_loc.Array = loc;
+
+ Current_Error_Node = error_gnat_node;
+ Compiler_Abort (fp, -1, fp_loc);
+}
+
+/* Perform all the initialization steps that are language-specific. */
+
+static bool
+gnat_init (void)
+{
+ /* Do little here, most of the standard declarations are set up after the
+ front-end has been run. Use the same `char' as C, this doesn't really
+ matter since we'll use the explicit `unsigned char' for Character. */
+ build_common_tree_nodes (flag_signed_char, false);
+
+ /* In Ada, we use an unsigned 8-bit type for the default boolean type. */
+ boolean_type_node = make_unsigned_type (8);
+ TREE_SET_CODE (boolean_type_node, BOOLEAN_TYPE);
+ SET_TYPE_RM_MAX_VALUE (boolean_type_node,
+ build_int_cst (boolean_type_node, 1));
+ SET_TYPE_RM_SIZE (boolean_type_node, bitsize_int (1));
+ boolean_true_node = TYPE_MAX_VALUE (boolean_type_node);
+ boolean_false_node = TYPE_MIN_VALUE (boolean_type_node);
+
+ sbitsize_one_node = sbitsize_int (1);
+ sbitsize_unit_node = sbitsize_int (BITS_PER_UNIT);
+
+ ptr_void_type_node = build_pointer_type (void_type_node);
+
+ /* Show that REFERENCE_TYPEs are internal and should be Pmode. */
+ internal_reference_types ();
+
+ /* Register our internal error function. */
+ global_dc->internal_error = &internal_error_function;
+
+ return true;
+}
+
+/* Initialize the GCC support for exception handling. */
+
+void
+gnat_init_gcc_eh (void)
+{
+ /* We shouldn't do anything if the No_Exceptions_Handler pragma is set,
+ though. This could for instance lead to the emission of tables with
+ references to symbols (such as the Ada eh personality routine) within
+ libraries we won't link against. */
+ if (No_Exception_Handlers_Set ())
+ return;
+
+ /* Tell GCC we are handling cleanup actions through exception propagation.
+ This opens possibilities that we don't take advantage of yet, but is
+ nonetheless necessary to ensure that fixup code gets assigned to the
+ right exception regions. */
+ using_eh_for_cleanups ();
+
+ /* Turn on -fexceptions and -fnon-call-exceptions. The first one triggers
+ the generation of the necessary exception tables. The second one is
+ useful for two reasons: 1/ we map some asynchronous signals like SEGV to
+ exceptions, so we need to ensure that the insns which can lead to such
+ signals are correctly attached to the exception region they pertain to,
+ 2/ Some calls to pure subprograms are handled as libcall blocks and then
+ marked as "cannot trap" if the flag is not set (see emit_libcall_block).
+ We should not let this be since it is possible for such calls to actually
+ raise in Ada. */
+ flag_exceptions = 1;
+ flag_non_call_exceptions = 1;
+
+ init_eh ();
+}
+
+/* Initialize the GCC support for floating-point operations. */
+
+void
+gnat_init_gcc_fp (void)
+{
+ /* Disable FP optimizations that ignore the signedness of zero if
+ S'Signed_Zeros is true, but don't override the user if not. */
+ if (Signed_Zeros_On_Target)
+ flag_signed_zeros = 1;
+ else if (!global_options_set.x_flag_signed_zeros)
+ flag_signed_zeros = 0;
+
+ /* Assume that FP operations can trap if S'Machine_Overflow is true,
+ but don't override the user if not.
+
+ ??? Alpha/VMS enables FP traps without declaring it. */
+ if (Machine_Overflows_On_Target || TARGET_ABI_OPEN_VMS)
+ flag_trapping_math = 1;
+ else if (!global_options_set.x_flag_trapping_math)
+ flag_trapping_math = 0;
+}
+
+/* Print language-specific items in declaration NODE. */
+
+static void
+gnat_print_decl (FILE *file, tree node, int indent)
+{
+ switch (TREE_CODE (node))
+ {
+ case CONST_DECL:
+ print_node (file, "corresponding var",
+ DECL_CONST_CORRESPONDING_VAR (node), indent + 4);
+ break;
+
+ case FIELD_DECL:
+ print_node (file, "original field", DECL_ORIGINAL_FIELD (node),
+ indent + 4);
+ break;
+
+ case VAR_DECL:
+ if (DECL_LOOP_PARM_P (node))
+ print_node (file, "induction var", DECL_INDUCTION_VAR (node),
+ indent + 4);
+ else
+ print_node (file, "renamed object", DECL_RENAMED_OBJECT (node),
+ indent + 4);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Print language-specific items in type NODE. */
+
+static void
+gnat_print_type (FILE *file, tree node, int indent)
+{
+ switch (TREE_CODE (node))
+ {
+ case FUNCTION_TYPE:
+ print_node (file, "ci/co list", TYPE_CI_CO_LIST (node), indent + 4);
+ break;
+
+ case INTEGER_TYPE:
+ if (TYPE_MODULAR_P (node))
+ print_node_brief (file, "modulus", TYPE_MODULUS (node), indent + 4);
+ else if (TYPE_HAS_ACTUAL_BOUNDS_P (node))
+ print_node (file, "actual bounds", TYPE_ACTUAL_BOUNDS (node),
+ indent + 4);
+ else if (TYPE_VAX_FLOATING_POINT_P (node))
+ ;
+ else
+ print_node (file, "index type", TYPE_INDEX_TYPE (node), indent + 4);
+
+ /* ... fall through ... */
+
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ print_node_brief (file, "RM size", TYPE_RM_SIZE (node), indent + 4);
+
+ /* ... fall through ... */
+
+ case REAL_TYPE:
+ print_node_brief (file, "RM min", TYPE_RM_MIN_VALUE (node), indent + 4);
+ print_node_brief (file, "RM max", TYPE_RM_MAX_VALUE (node), indent + 4);
+ break;
+
+ case ARRAY_TYPE:
+ print_node (file,"actual bounds", TYPE_ACTUAL_BOUNDS (node), indent + 4);
+ break;
+
+ case VECTOR_TYPE:
+ print_node (file,"representative array",
+ TYPE_REPRESENTATIVE_ARRAY (node), indent + 4);
+ break;
+
+ case RECORD_TYPE:
+ if (TYPE_FAT_POINTER_P (node) || TYPE_CONTAINS_TEMPLATE_P (node))
+ print_node (file, "unconstrained array",
+ TYPE_UNCONSTRAINED_ARRAY (node), indent + 4);
+ else
+ print_node (file, "Ada size", TYPE_ADA_SIZE (node), indent + 4);
+ break;
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ print_node (file, "Ada size", TYPE_ADA_SIZE (node), indent + 4);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Return the name to be printed for DECL. */
+
+static const char *
+gnat_printable_name (tree decl, int verbosity)
+{
+ const char *coded_name = IDENTIFIER_POINTER (DECL_NAME (decl));
+ char *ada_name = (char *) ggc_alloc_atomic (strlen (coded_name) * 2 + 60);
+
+ __gnat_decode (coded_name, ada_name, 0);
+
+ if (verbosity == 2 && !DECL_IS_BUILTIN (decl))
+ {
+ Set_Identifier_Casing (ada_name, DECL_SOURCE_FILE (decl));
+ return ggc_strdup (Name_Buffer);
+ }
+
+ return ada_name;
+}
+
+/* Return the name to be used in DWARF debug info for DECL. */
+
+static const char *
+gnat_dwarf_name (tree decl, int verbosity ATTRIBUTE_UNUSED)
+{
+ gcc_assert (DECL_P (decl));
+ return (const char *) IDENTIFIER_POINTER (DECL_NAME (decl));
+}
+
+/* Return the descriptive type associated with TYPE, if any. */
+
+static tree
+gnat_descriptive_type (const_tree type)
+{
+ if (TYPE_STUB_DECL (type))
+ return DECL_PARALLEL_TYPE (TYPE_STUB_DECL (type));
+ else
+ return NULL_TREE;
+}
+
+/* Return true if types T1 and T2 are identical for type hashing purposes.
+ Called only after doing all language independent checks. At present,
+ this function is only called when both types are FUNCTION_TYPE. */
+
+static bool
+gnat_type_hash_eq (const_tree t1, const_tree t2)
+{
+ gcc_assert (TREE_CODE (t1) == FUNCTION_TYPE);
+ return fntype_same_flags_p (t1, TYPE_CI_CO_LIST (t2),
+ TYPE_RETURN_UNCONSTRAINED_P (t2),
+ TYPE_RETURN_BY_DIRECT_REF_P (t2),
+ TREE_ADDRESSABLE (t2));
+}
+
+/* Do nothing (return the tree node passed). */
+
+static tree
+gnat_return_tree (tree t)
+{
+ return t;
+}
+
+/* Get the alias set corresponding to a type or expression. */
+
+static alias_set_type
+gnat_get_alias_set (tree type)
+{
+ /* If this is a padding type, use the type of the first field. */
+ if (TYPE_IS_PADDING_P (type))
+ return get_alias_set (TREE_TYPE (TYPE_FIELDS (type)));
+
+ /* If the type is an unconstrained array, use the type of the
+ self-referential array we make. */
+ else if (TREE_CODE (type) == UNCONSTRAINED_ARRAY_TYPE)
+ return
+ get_alias_set (TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (type)))));
+
+ /* If the type can alias any other types, return the alias set 0. */
+ else if (TYPE_P (type)
+ && TYPE_UNIVERSAL_ALIASING_P (TYPE_MAIN_VARIANT (type)))
+ return 0;
+
+ return -1;
+}
+
+/* GNU_TYPE is a type. Return its maximum size in bytes, if known,
+ as a constant when possible. */
+
+static tree
+gnat_type_max_size (const_tree gnu_type)
+{
+ /* First see what we can get from TYPE_SIZE_UNIT, which might not
+ be constant even for simple expressions if it has already been
+ elaborated and possibly replaced by a VAR_DECL. */
+ tree max_unitsize = max_size (TYPE_SIZE_UNIT (gnu_type), true);
+
+ /* If we don't have a constant, see what we can get from TYPE_ADA_SIZE,
+ which should stay untouched. */
+ if (!tree_fits_uhwi_p (max_unitsize)
+ && RECORD_OR_UNION_TYPE_P (gnu_type)
+ && !TYPE_FAT_POINTER_P (gnu_type)
+ && TYPE_ADA_SIZE (gnu_type))
+ {
+ tree max_adasize = max_size (TYPE_ADA_SIZE (gnu_type), true);
+
+ /* If we have succeeded in finding a constant, round it up to the
+ type's alignment and return the result in units. */
+ if (tree_fits_uhwi_p (max_adasize))
+ max_unitsize
+ = size_binop (CEIL_DIV_EXPR,
+ round_up (max_adasize, TYPE_ALIGN (gnu_type)),
+ bitsize_unit_node);
+ }
+
+ return max_unitsize;
+}
+
+/* GNU_TYPE is a subtype of an integral type. Set LOWVAL to the low bound
+ and HIGHVAL to the high bound, respectively. */
+
+static void
+gnat_get_subrange_bounds (const_tree gnu_type, tree *lowval, tree *highval)
+{
+ *lowval = TYPE_MIN_VALUE (gnu_type);
+ *highval = TYPE_MAX_VALUE (gnu_type);
+}
+
+/* GNU_TYPE is the type of a subprogram parameter. Determine if it should be
+ passed by reference by default. */
+
+bool
+default_pass_by_ref (tree gnu_type)
+{
+ /* We pass aggregates by reference if they are sufficiently large for
+ their alignment. The ratio is somewhat arbitrary. We also pass by
+ reference if the target machine would either pass or return by
+ reference. Strictly speaking, we need only check the return if this
+ is an In Out parameter, but it's probably best to err on the side of
+ passing more things by reference. */
+
+ if (pass_by_reference (NULL, TYPE_MODE (gnu_type), gnu_type, true))
+ return true;
+
+ if (targetm.calls.return_in_memory (gnu_type, NULL_TREE))
+ return true;
+
+ if (AGGREGATE_TYPE_P (gnu_type)
+ && (!valid_constant_size_p (TYPE_SIZE_UNIT (gnu_type))
+ || 0 < compare_tree_int (TYPE_SIZE_UNIT (gnu_type),
+ TYPE_ALIGN (gnu_type))))
+ return true;
+
+ return false;
+}
+
+/* GNU_TYPE is the type of a subprogram parameter. Determine if it must be
+ passed by reference. */
+
+bool
+must_pass_by_ref (tree gnu_type)
+{
+ /* We pass only unconstrained objects, those required by the language
+ to be passed by reference, and objects of variable size. The latter
+ is more efficient, avoids problems with variable size temporaries,
+ and does not produce compatibility problems with C, since C does
+ not have such objects. */
+ return (TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE
+ || TYPE_IS_BY_REFERENCE_P (gnu_type)
+ || (TYPE_SIZE_UNIT (gnu_type)
+ && TREE_CODE (TYPE_SIZE_UNIT (gnu_type)) != INTEGER_CST));
+}
+
+/* This function is called by the front-end to enumerate all the supported
+ modes for the machine, as well as some predefined C types. F is a function
+ which is called back with the parameters as listed below, first a string,
+ then seven ints. The name is any arbitrary null-terminated string and has
+ no particular significance, except for the case of predefined C types, where
+ it should be the name of the C type. For integer types, only signed types
+ should be listed, unsigned versions are assumed. The order of types should
+ be in order of preference, with the smallest/cheapest types first.
+
+ In particular, C predefined types should be listed before other types,
+ binary floating point types before decimal ones, and narrower/cheaper
+ type versions before more expensive ones. In type selection the first
+ matching variant will be used.
+
+ NAME pointer to first char of type name
+ DIGS number of decimal digits for floating-point modes, else 0
+ COMPLEX_P nonzero is this represents a complex mode
+ COUNT count of number of items, nonzero for vector mode
+ FLOAT_REP Float_Rep_Kind for FP, otherwise undefined
+ PRECISION number of bits used to store data
+ SIZE number of bits occupied by the mode
+ ALIGN number of bits to which mode is aligned. */
+
+void
+enumerate_modes (void (*f) (const char *, int, int, int, int, int, int, int))
+{
+ const tree c_types[]
+ = { float_type_node, double_type_node, long_double_type_node };
+ const char *const c_names[]
+ = { "float", "double", "long double" };
+ int iloop;
+
+ for (iloop = 0; iloop < NUM_MACHINE_MODES; iloop++)
+ {
+ enum machine_mode i = (enum machine_mode) iloop;
+ enum machine_mode inner_mode = i;
+ bool float_p = false;
+ bool complex_p = false;
+ bool vector_p = false;
+ bool skip_p = false;
+ int digs = 0;
+ unsigned int nameloop;
+ Float_Rep_Kind float_rep = IEEE_Binary; /* Until proven otherwise */
+
+ switch (GET_MODE_CLASS (i))
+ {
+ case MODE_INT:
+ break;
+ case MODE_FLOAT:
+ float_p = true;
+ break;
+ case MODE_COMPLEX_INT:
+ complex_p = true;
+ inner_mode = GET_MODE_INNER (i);
+ break;
+ case MODE_COMPLEX_FLOAT:
+ float_p = true;
+ complex_p = true;
+ inner_mode = GET_MODE_INNER (i);
+ break;
+ case MODE_VECTOR_INT:
+ vector_p = true;
+ inner_mode = GET_MODE_INNER (i);
+ break;
+ case MODE_VECTOR_FLOAT:
+ float_p = true;
+ vector_p = true;
+ inner_mode = GET_MODE_INNER (i);
+ break;
+ default:
+ skip_p = true;
+ }
+
+ if (float_p)
+ {
+ const struct real_format *fmt = REAL_MODE_FORMAT (inner_mode);
+
+ /* ??? Cope with the ghost XFmode of the ARM port. */
+ if (!fmt)
+ continue;
+
+ if (fmt->b == 2)
+ digs = (fmt->p - 1) * 1233 / 4096; /* scale by log (2) */
+
+ else if (fmt->b == 10)
+ digs = fmt->p;
+
+ else
+ gcc_unreachable();
+
+ if (fmt == &vax_f_format
+ || fmt == &vax_d_format
+ || fmt == &vax_g_format)
+ float_rep = VAX_Native;
+ }
+
+ /* First register any C types for this mode that the front end
+ may need to know about, unless the mode should be skipped. */
+ if (!skip_p && !vector_p)
+ for (nameloop = 0; nameloop < ARRAY_SIZE (c_types); nameloop++)
+ {
+ tree type = c_types[nameloop];
+ const char *name = c_names[nameloop];
+
+ if (TYPE_MODE (type) == i)
+ {
+ f (name, digs, complex_p, 0, float_rep, TYPE_PRECISION (type),
+ TREE_INT_CST_LOW (TYPE_SIZE (type)), TYPE_ALIGN (type));
+ skip_p = true;
+ }
+ }
+
+ /* If no predefined C types were found, register the mode itself. */
+ if (!skip_p)
+ f (GET_MODE_NAME (i), digs, complex_p,
+ vector_p ? GET_MODE_NUNITS (i) : 0, float_rep,
+ GET_MODE_PRECISION (i), GET_MODE_BITSIZE (i),
+ GET_MODE_ALIGNMENT (i));
+ }
+}
+
+/* Return the size of the FP mode with precision PREC. */
+
+int
+fp_prec_to_size (int prec)
+{
+ enum machine_mode mode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_PRECISION (mode) == prec)
+ return GET_MODE_BITSIZE (mode);
+
+ gcc_unreachable ();
+}
+
+/* Return the precision of the FP mode with size SIZE. */
+
+int
+fp_size_to_prec (int size)
+{
+ enum machine_mode mode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_BITSIZE (mode) == size)
+ return GET_MODE_PRECISION (mode);
+
+ gcc_unreachable ();
+}
+
+static GTY(()) tree gnat_eh_personality_decl;
+
+/* Return the GNAT personality function decl. */
+
+static tree
+gnat_eh_personality (void)
+{
+ if (!gnat_eh_personality_decl)
+ gnat_eh_personality_decl = build_personality_function ("gnat");
+ return gnat_eh_personality_decl;
+}
+
+/* Initialize language-specific bits of tree_contains_struct. */
+
+static void
+gnat_init_ts (void)
+{
+ MARK_TS_COMMON (UNCONSTRAINED_ARRAY_TYPE);
+
+ MARK_TS_TYPED (UNCONSTRAINED_ARRAY_REF);
+ MARK_TS_TYPED (NULL_EXPR);
+ MARK_TS_TYPED (PLUS_NOMOD_EXPR);
+ MARK_TS_TYPED (MINUS_NOMOD_EXPR);
+ MARK_TS_TYPED (ATTR_ADDR_EXPR);
+ MARK_TS_TYPED (STMT_STMT);
+ MARK_TS_TYPED (LOOP_STMT);
+ MARK_TS_TYPED (EXIT_STMT);
+}
+
+/* Definitions for our language-specific hooks. */
+
+#undef LANG_HOOKS_NAME
+#define LANG_HOOKS_NAME "GNU Ada"
+#undef LANG_HOOKS_IDENTIFIER_SIZE
+#define LANG_HOOKS_IDENTIFIER_SIZE sizeof (struct tree_identifier)
+#undef LANG_HOOKS_INIT
+#define LANG_HOOKS_INIT gnat_init
+#undef LANG_HOOKS_OPTION_LANG_MASK
+#define LANG_HOOKS_OPTION_LANG_MASK gnat_option_lang_mask
+#undef LANG_HOOKS_INIT_OPTIONS_STRUCT
+#define LANG_HOOKS_INIT_OPTIONS_STRUCT gnat_init_options_struct
+#undef LANG_HOOKS_INIT_OPTIONS
+#define LANG_HOOKS_INIT_OPTIONS gnat_init_options
+#undef LANG_HOOKS_HANDLE_OPTION
+#define LANG_HOOKS_HANDLE_OPTION gnat_handle_option
+#undef LANG_HOOKS_POST_OPTIONS
+#define LANG_HOOKS_POST_OPTIONS gnat_post_options
+#undef LANG_HOOKS_PARSE_FILE
+#define LANG_HOOKS_PARSE_FILE gnat_parse_file
+#undef LANG_HOOKS_TYPE_HASH_EQ
+#define LANG_HOOKS_TYPE_HASH_EQ gnat_type_hash_eq
+#undef LANG_HOOKS_GETDECLS
+#define LANG_HOOKS_GETDECLS lhd_return_null_tree_v
+#undef LANG_HOOKS_PUSHDECL
+#define LANG_HOOKS_PUSHDECL gnat_return_tree
+#undef LANG_HOOKS_WRITE_GLOBALS
+#define LANG_HOOKS_WRITE_GLOBALS gnat_write_global_declarations
+#undef LANG_HOOKS_GET_ALIAS_SET
+#define LANG_HOOKS_GET_ALIAS_SET gnat_get_alias_set
+#undef LANG_HOOKS_PRINT_DECL
+#define LANG_HOOKS_PRINT_DECL gnat_print_decl
+#undef LANG_HOOKS_PRINT_TYPE
+#define LANG_HOOKS_PRINT_TYPE gnat_print_type
+#undef LANG_HOOKS_TYPE_MAX_SIZE
+#define LANG_HOOKS_TYPE_MAX_SIZE gnat_type_max_size
+#undef LANG_HOOKS_DECL_PRINTABLE_NAME
+#define LANG_HOOKS_DECL_PRINTABLE_NAME gnat_printable_name
+#undef LANG_HOOKS_DWARF_NAME
+#define LANG_HOOKS_DWARF_NAME gnat_dwarf_name
+#undef LANG_HOOKS_GIMPLIFY_EXPR
+#define LANG_HOOKS_GIMPLIFY_EXPR gnat_gimplify_expr
+#undef LANG_HOOKS_TYPE_FOR_MODE
+#define LANG_HOOKS_TYPE_FOR_MODE gnat_type_for_mode
+#undef LANG_HOOKS_TYPE_FOR_SIZE
+#define LANG_HOOKS_TYPE_FOR_SIZE gnat_type_for_size
+#undef LANG_HOOKS_TYPES_COMPATIBLE_P
+#define LANG_HOOKS_TYPES_COMPATIBLE_P gnat_types_compatible_p
+#undef LANG_HOOKS_GET_SUBRANGE_BOUNDS
+#define LANG_HOOKS_GET_SUBRANGE_BOUNDS gnat_get_subrange_bounds
+#undef LANG_HOOKS_DESCRIPTIVE_TYPE
+#define LANG_HOOKS_DESCRIPTIVE_TYPE gnat_descriptive_type
+#undef LANG_HOOKS_ATTRIBUTE_TABLE
+#define LANG_HOOKS_ATTRIBUTE_TABLE gnat_internal_attribute_table
+#undef LANG_HOOKS_BUILTIN_FUNCTION
+#define LANG_HOOKS_BUILTIN_FUNCTION gnat_builtin_function
+#undef LANG_HOOKS_EH_PERSONALITY
+#define LANG_HOOKS_EH_PERSONALITY gnat_eh_personality
+#undef LANG_HOOKS_DEEP_UNSHARING
+#define LANG_HOOKS_DEEP_UNSHARING true
+#undef LANG_HOOKS_INIT_TS
+#define LANG_HOOKS_INIT_TS gnat_init_ts
+
+struct lang_hooks lang_hooks = LANG_HOOKS_INITIALIZER;
+
+#include "gt-ada-misc.h"
diff --git a/gcc-4.9/gcc/ada/gcc-interface/targtyps.c b/gcc-4.9/gcc/ada/gcc-interface/targtyps.c
new file mode 100644
index 000000000..cfc45e7fe
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/targtyps.c
@@ -0,0 +1,266 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * T A R G T Y P S *
+ * *
+ * Body *
+ * *
+ * Copyright (C) 1992-2012, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License distributed with GNAT; see file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+/* Functions for retrieving target types. See Ada package Get_Targ */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "tm.h"
+#include "tm_p.h"
+
+#include "ada.h"
+#include "types.h"
+#include "atree.h"
+#include "elists.h"
+#include "namet.h"
+#include "nlists.h"
+#include "snames.h"
+#include "stringt.h"
+#include "uintp.h"
+#include "urealp.h"
+#include "fe.h"
+#include "sinfo.h"
+#include "einfo.h"
+#include "ada-tree.h"
+#include "gigi.h"
+
+/* If we don't have a specific size for Ada's equivalent of `long', use that
+ of C. */
+#ifndef ADA_LONG_TYPE_SIZE
+#define ADA_LONG_TYPE_SIZE LONG_TYPE_SIZE
+#endif
+
+/* If we don't have a target definition of WIDEST_HARDWARE_FP_SIZE, assume
+ DOUBLE_TYPE_SIZE. We used to default to LONG_DOUBLE_TYPE_SIZE, which now
+ most often maps 128 bits implemented with very inefficient software
+ emulations so is incorrect as a hardware size estimate. */
+
+#ifndef WIDEST_HARDWARE_FP_SIZE
+#define WIDEST_HARDWARE_FP_SIZE DOUBLE_TYPE_SIZE
+#endif
+
+/* The following provide a functional interface for the front end Ada code
+ to determine the sizes that are used for various C types. */
+
+Pos
+get_target_bits_per_unit (void)
+{
+ return BITS_PER_UNIT;
+}
+
+Pos
+get_target_bits_per_word (void)
+{
+ return BITS_PER_WORD;
+}
+
+Pos
+get_target_char_size (void)
+{
+ return CHAR_TYPE_SIZE;
+}
+
+Pos
+get_target_wchar_t_size (void)
+{
+ /* We never want wide characters less than "short" in Ada. */
+ return MAX (SHORT_TYPE_SIZE, WCHAR_TYPE_SIZE);
+}
+
+Pos
+get_target_short_size (void)
+{
+ return SHORT_TYPE_SIZE;
+}
+
+Pos
+get_target_int_size (void)
+{
+ return INT_TYPE_SIZE;
+}
+
+Pos
+get_target_long_size (void)
+{
+ return ADA_LONG_TYPE_SIZE;
+}
+
+Pos
+get_target_long_long_size (void)
+{
+ return LONG_LONG_TYPE_SIZE;
+}
+
+Pos
+get_target_float_size (void)
+{
+ return fp_prec_to_size (FLOAT_TYPE_SIZE);
+}
+
+Pos
+get_target_double_size (void)
+{
+ return fp_prec_to_size (DOUBLE_TYPE_SIZE);
+}
+
+Pos
+get_target_long_double_size (void)
+{
+ return fp_prec_to_size (WIDEST_HARDWARE_FP_SIZE);
+}
+
+Pos
+get_target_pointer_size (void)
+{
+ return POINTER_SIZE;
+}
+
+/* Alignment related values, mapped to attributes for functional and
+ documentation purposes. */
+
+/* Standard'Maximum_Default_Alignment. Maximum alignment that the compiler
+ might choose by default for a type or object.
+
+ Stricter alignment requests trigger gigi's aligning_type circuitry for
+ stack objects or objects allocated by the default allocator. */
+
+Pos
+get_target_maximum_default_alignment (void)
+{
+ return BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+}
+
+/* Standard'System_Allocator_Alignment. Alignment guaranteed to be honored
+ by the default allocator (System.Memory.Alloc or malloc if we have no
+ run-time library at hand).
+
+ Stricter alignment requests trigger gigi's aligning_type circuitry for
+ objects allocated by the default allocator. */
+
+/* ??? Need a way to get info about __gnat_malloc from here (whether it is
+ handy and what alignment it honors). In the meantime, resort to malloc
+ considerations only. */
+
+/* Account for MALLOC_OBSERVABLE_ALIGNMENTs here. Use this or the ABI
+ guaranteed alignment if greater. */
+
+#ifdef MALLOC_OBSERVABLE_ALIGNMENT
+#define MALLOC_ALIGNMENT MALLOC_OBSERVABLE_ALIGNMENT
+#else
+#define MALLOC_OBSERVABLE_ALIGNMENT (2 * LONG_TYPE_SIZE)
+#define MALLOC_ALIGNMENT \
+ MAX (MALLOC_ABI_ALIGNMENT, MALLOC_OBSERVABLE_ALIGNMENT)
+#endif
+
+Pos
+get_target_system_allocator_alignment (void)
+{
+ return MALLOC_ALIGNMENT / BITS_PER_UNIT;
+}
+
+/* Standard'Maximum_Allowed_Alignment. Maximum alignment that we may
+ accept for any type or object. */
+
+#ifndef MAX_OFILE_ALIGNMENT
+#define MAX_OFILE_ALIGNMENT BIGGEST_ALIGNMENT
+#endif
+
+Pos
+get_target_maximum_allowed_alignment (void)
+{
+ return MAX_OFILE_ALIGNMENT / BITS_PER_UNIT;
+}
+
+/* Standard'Maximum_Alignment. The single attribute initially made
+ available, now a synonym of Standard'Maximum_Default_Alignment. */
+
+Pos
+get_target_maximum_alignment (void)
+{
+ return get_target_maximum_default_alignment ();
+}
+
+#ifndef FLOAT_WORDS_BIG_ENDIAN
+#define FLOAT_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
+#endif
+
+Nat
+get_float_words_be (void)
+{
+ return FLOAT_WORDS_BIG_ENDIAN;
+}
+
+Nat
+get_words_be (void)
+{
+ return WORDS_BIG_ENDIAN;
+}
+
+Nat
+get_bytes_be (void)
+{
+ return BYTES_BIG_ENDIAN;
+}
+
+Nat
+get_bits_be (void)
+{
+ return BITS_BIG_ENDIAN;
+}
+
+Nat
+get_target_strict_alignment (void)
+{
+ return STRICT_ALIGNMENT;
+}
+
+Nat
+get_target_double_float_alignment (void)
+{
+#ifdef TARGET_ALIGN_NATURAL
+ /* This macro is only defined by the rs6000 port. */
+ if (!TARGET_ALIGN_NATURAL
+ && (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN))
+ return 32 / BITS_PER_UNIT;
+#endif
+ return 0;
+}
+
+Nat
+get_target_double_scalar_alignment (void)
+{
+#ifdef TARGET_ALIGN_DOUBLE
+ /* This macro is only defined by the i386 and sh ports. */
+ if (!TARGET_ALIGN_DOUBLE
+#ifdef TARGET_64BIT
+ && !TARGET_64BIT
+#endif
+ )
+ return 32 / BITS_PER_UNIT;
+#endif
+ return 0;
+}
diff --git a/gcc-4.9/gcc/ada/gcc-interface/trans.c b/gcc-4.9/gcc/ada/gcc-interface/trans.c
new file mode 100644
index 000000000..4a4d0faa9
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/trans.c
@@ -0,0 +1,9469 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * T R A N S *
+ * *
+ * C Implementation File *
+ * *
+ * Copyright (C) 1992-2014, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License distributed with GNAT; see file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "stmt.h"
+#include "varasm.h"
+#include "flags.h"
+#include "output.h"
+#include "libfuncs.h" /* For set_stack_check_libfunc. */
+#include "tree-iterator.h"
+#include "pointer-set.h"
+#include "gimple-expr.h"
+#include "gimplify.h"
+#include "bitmap.h"
+#include "cgraph.h"
+#include "diagnostic.h"
+#include "opts.h"
+#include "target.h"
+#include "common/common-target.h"
+
+#include "ada.h"
+#include "adadecode.h"
+#include "types.h"
+#include "atree.h"
+#include "elists.h"
+#include "namet.h"
+#include "nlists.h"
+#include "snames.h"
+#include "stringt.h"
+#include "uintp.h"
+#include "urealp.h"
+#include "fe.h"
+#include "sinfo.h"
+#include "einfo.h"
+#include "gadaint.h"
+#include "ada-tree.h"
+#include "gigi.h"
+
+/* We should avoid allocating more than ALLOCA_THRESHOLD bytes via alloca,
+ for fear of running out of stack space. If we need more, we use xmalloc
+ instead. */
+#define ALLOCA_THRESHOLD 1000
+
+/* In configurations where blocks have no end_locus attached, just
+ sink assignments into a dummy global. */
+#ifndef BLOCK_SOURCE_END_LOCATION
+static location_t block_end_locus_sink;
+#define BLOCK_SOURCE_END_LOCATION(BLOCK) block_end_locus_sink
+#endif
+
+/* For efficient float-to-int rounding, it is necessary to know whether
+ floating-point arithmetic may use wider intermediate results. When
+ FP_ARITH_MAY_WIDEN is not defined, be conservative and only assume
+ that arithmetic does not widen if double precision is emulated. */
+#ifndef FP_ARITH_MAY_WIDEN
+#if defined(HAVE_extendsfdf2)
+#define FP_ARITH_MAY_WIDEN HAVE_extendsfdf2
+#else
+#define FP_ARITH_MAY_WIDEN 0
+#endif
+#endif
+
+/* Pointers to front-end tables accessed through macros. */
+struct Node *Nodes_Ptr;
+struct Flags *Flags_Ptr;
+Node_Id *Next_Node_Ptr;
+Node_Id *Prev_Node_Ptr;
+struct Elist_Header *Elists_Ptr;
+struct Elmt_Item *Elmts_Ptr;
+struct String_Entry *Strings_Ptr;
+Char_Code *String_Chars_Ptr;
+struct List_Header *List_Headers_Ptr;
+
+/* Highest number in the front-end node table. */
+int max_gnat_nodes;
+
+/* Current node being treated, in case abort called. */
+Node_Id error_gnat_node;
+
+/* True when gigi is being called on an analyzed but unexpanded
+ tree, and the only purpose of the call is to properly annotate
+ types with representation information. */
+bool type_annotate_only;
+
+/* Current filename without path. */
+const char *ref_filename;
+
+
+/* List of N_Validate_Unchecked_Conversion nodes in the unit. */
+static vec<Node_Id> gnat_validate_uc_list;
+
+/* When not optimizing, we cache the 'First, 'Last and 'Length attributes
+ of unconstrained array IN parameters to avoid emitting a great deal of
+ redundant instructions to recompute them each time. */
+struct GTY (()) parm_attr_d {
+ int id; /* GTY doesn't like Entity_Id. */
+ int dim;
+ tree first;
+ tree last;
+ tree length;
+};
+
+typedef struct parm_attr_d *parm_attr;
+
+
+struct GTY(()) language_function {
+ vec<parm_attr, va_gc> *parm_attr_cache;
+ bitmap named_ret_val;
+ vec<tree, va_gc> *other_ret_val;
+ int gnat_ret;
+};
+
+#define f_parm_attr_cache \
+ DECL_STRUCT_FUNCTION (current_function_decl)->language->parm_attr_cache
+
+#define f_named_ret_val \
+ DECL_STRUCT_FUNCTION (current_function_decl)->language->named_ret_val
+
+#define f_other_ret_val \
+ DECL_STRUCT_FUNCTION (current_function_decl)->language->other_ret_val
+
+#define f_gnat_ret \
+ DECL_STRUCT_FUNCTION (current_function_decl)->language->gnat_ret
+
+/* A structure used to gather together information about a statement group.
+ We use this to gather related statements, for example the "then" part
+ of a IF. In the case where it represents a lexical scope, we may also
+ have a BLOCK node corresponding to it and/or cleanups. */
+
+struct GTY((chain_next ("%h.previous"))) stmt_group {
+ struct stmt_group *previous; /* Previous code group. */
+ tree stmt_list; /* List of statements for this code group. */
+ tree block; /* BLOCK for this code group, if any. */
+ tree cleanups; /* Cleanups for this code group, if any. */
+};
+
+static GTY(()) struct stmt_group *current_stmt_group;
+
+/* List of unused struct stmt_group nodes. */
+static GTY((deletable)) struct stmt_group *stmt_group_free_list;
+
+/* A structure used to record information on elaboration procedures
+ we've made and need to process.
+
+ ??? gnat_node should be Node_Id, but gengtype gets confused. */
+
+struct GTY((chain_next ("%h.next"))) elab_info {
+ struct elab_info *next; /* Pointer to next in chain. */
+ tree elab_proc; /* Elaboration procedure. */
+ int gnat_node; /* The N_Compilation_Unit. */
+};
+
+static GTY(()) struct elab_info *elab_info_list;
+
+/* Stack of exception pointer variables. Each entry is the VAR_DECL
+ that stores the address of the raised exception. Nonzero means we
+ are in an exception handler. Not used in the zero-cost case. */
+static GTY(()) vec<tree, va_gc> *gnu_except_ptr_stack;
+
+/* In ZCX case, current exception pointer. Used to re-raise it. */
+static GTY(()) tree gnu_incoming_exc_ptr;
+
+/* Stack for storing the current elaboration procedure decl. */
+static GTY(()) vec<tree, va_gc> *gnu_elab_proc_stack;
+
+/* Stack of labels to be used as a goto target instead of a return in
+ some functions. See processing for N_Subprogram_Body. */
+static GTY(()) vec<tree, va_gc> *gnu_return_label_stack;
+
+/* Stack of variable for the return value of a function with copy-in/copy-out
+ parameters. See processing for N_Subprogram_Body. */
+static GTY(()) vec<tree, va_gc> *gnu_return_var_stack;
+
+/* Structure used to record information for a range check. */
+struct GTY(()) range_check_info_d {
+ tree low_bound;
+ tree high_bound;
+ tree type;
+ tree invariant_cond;
+};
+
+typedef struct range_check_info_d *range_check_info;
+
+
+/* Structure used to record information for a loop. */
+struct GTY(()) loop_info_d {
+ tree stmt;
+ tree loop_var;
+ vec<range_check_info, va_gc> *checks;
+};
+
+typedef struct loop_info_d *loop_info;
+
+
+/* Stack of loop_info structures associated with LOOP_STMT nodes. */
+static GTY(()) vec<loop_info, va_gc> *gnu_loop_stack;
+
+/* The stacks for N_{Push,Pop}_*_Label. */
+static GTY(()) vec<tree, va_gc> *gnu_constraint_error_label_stack;
+static GTY(()) vec<tree, va_gc> *gnu_storage_error_label_stack;
+static GTY(()) vec<tree, va_gc> *gnu_program_error_label_stack;
+
+/* Map GNAT tree codes to GCC tree codes for simple expressions. */
+static enum tree_code gnu_codes[Number_Node_Kinds];
+
+static void init_code_table (void);
+static void Compilation_Unit_to_gnu (Node_Id);
+static void record_code_position (Node_Id);
+static void insert_code_for (Node_Id);
+static void add_cleanup (tree, Node_Id);
+static void add_stmt_list (List_Id);
+static void push_exception_label_stack (vec<tree, va_gc> **, Entity_Id);
+static tree build_stmt_group (List_Id, bool);
+static inline bool stmt_group_may_fallthru (void);
+static enum gimplify_status gnat_gimplify_stmt (tree *);
+static void elaborate_all_entities (Node_Id);
+static void process_freeze_entity (Node_Id);
+static void process_decls (List_Id, List_Id, Node_Id, bool, bool);
+static tree emit_range_check (tree, Node_Id, Node_Id);
+static tree emit_index_check (tree, tree, tree, tree, Node_Id);
+static tree emit_check (tree, tree, int, Node_Id);
+static tree build_unary_op_trapv (enum tree_code, tree, tree, Node_Id);
+static tree build_binary_op_trapv (enum tree_code, tree, tree, tree, Node_Id);
+static tree convert_with_check (Entity_Id, tree, bool, bool, bool, Node_Id);
+static bool addressable_p (tree, tree);
+static tree assoc_to_constructor (Entity_Id, Node_Id, tree);
+static tree extract_values (tree, tree);
+static tree pos_to_constructor (Node_Id, tree, Entity_Id);
+static void validate_unchecked_conversion (Node_Id);
+static tree maybe_implicit_deref (tree);
+static void set_expr_location_from_node (tree, Node_Id);
+static void set_expr_location_from_node1 (tree, Node_Id, bool);
+static bool Sloc_to_locus1 (Source_Ptr, location_t *, bool);
+static bool set_end_locus_from_node (tree, Node_Id);
+static void set_gnu_expr_location_from_node (tree, Node_Id);
+static int lvalue_required_p (Node_Id, tree, bool, bool, bool);
+static tree build_raise_check (int, enum exception_info_kind);
+static tree create_init_temporary (const char *, tree, tree *, Node_Id);
+
+/* Hooks for debug info back-ends, only supported and used in a restricted set
+ of configurations. */
+static const char *extract_encoding (const char *) ATTRIBUTE_UNUSED;
+static const char *decode_name (const char *) ATTRIBUTE_UNUSED;
+
+/* This is the main program of the back-end. It sets up all the table
+ structures and then generates code. */
+
+void
+gigi (Node_Id gnat_root,
+ int max_gnat_node,
+ int number_name ATTRIBUTE_UNUSED,
+ struct Node *nodes_ptr,
+ struct Flags *flags_ptr,
+ Node_Id *next_node_ptr,
+ Node_Id *prev_node_ptr,
+ struct Elist_Header *elists_ptr,
+ struct Elmt_Item *elmts_ptr,
+ struct String_Entry *strings_ptr,
+ Char_Code *string_chars_ptr,
+ struct List_Header *list_headers_ptr,
+ Nat number_file,
+ struct File_Info_Type *file_info_ptr,
+ Entity_Id standard_boolean,
+ Entity_Id standard_integer,
+ Entity_Id standard_character,
+ Entity_Id standard_long_long_float,
+ Entity_Id standard_exception_type,
+ Int gigi_operating_mode)
+{
+ Node_Id gnat_iter;
+ Entity_Id gnat_literal;
+ tree long_long_float_type, exception_type, t, ftype;
+ tree int64_type = gnat_type_for_size (64, 0);
+ struct elab_info *info;
+ int i;
+
+ max_gnat_nodes = max_gnat_node;
+
+ Nodes_Ptr = nodes_ptr;
+ Flags_Ptr = flags_ptr;
+ Next_Node_Ptr = next_node_ptr;
+ Prev_Node_Ptr = prev_node_ptr;
+ Elists_Ptr = elists_ptr;
+ Elmts_Ptr = elmts_ptr;
+ Strings_Ptr = strings_ptr;
+ String_Chars_Ptr = string_chars_ptr;
+ List_Headers_Ptr = list_headers_ptr;
+
+ type_annotate_only = (gigi_operating_mode == 1);
+
+#if TARGET_ABI_OPEN_VMS
+ vms_float_format = Float_Format;
+#endif
+
+ for (i = 0; i < number_file; i++)
+ {
+ /* Use the identifier table to make a permanent copy of the filename as
+ the name table gets reallocated after Gigi returns but before all the
+ debugging information is output. The __gnat_to_canonical_file_spec
+ call translates filenames from pragmas Source_Reference that contain
+ host style syntax not understood by gdb. */
+ const char *filename
+ = IDENTIFIER_POINTER
+ (get_identifier
+ (__gnat_to_canonical_file_spec
+ (Get_Name_String (file_info_ptr[i].File_Name))));
+
+ /* We rely on the order isomorphism between files and line maps. */
+ gcc_assert ((int) LINEMAPS_ORDINARY_USED (line_table) == i);
+
+ /* We create the line map for a source file at once, with a fixed number
+ of columns chosen to avoid jumping over the next power of 2. */
+ linemap_add (line_table, LC_ENTER, 0, filename, 1);
+ linemap_line_start (line_table, file_info_ptr[i].Num_Source_Lines, 252);
+ linemap_position_for_column (line_table, 252 - 1);
+ linemap_add (line_table, LC_LEAVE, 0, NULL, 0);
+ }
+
+ gcc_assert (Nkind (gnat_root) == N_Compilation_Unit);
+
+ /* Declare the name of the compilation unit as the first global
+ name in order to make the middle-end fully deterministic. */
+ t = create_concat_name (Defining_Entity (Unit (gnat_root)), NULL);
+ first_global_object_name = ggc_strdup (IDENTIFIER_POINTER (t));
+
+ /* Initialize ourselves. */
+ init_code_table ();
+ init_gnat_utils ();
+
+ /* If we are just annotating types, give VOID_TYPE zero sizes to avoid
+ errors. */
+ if (type_annotate_only)
+ {
+ TYPE_SIZE (void_type_node) = bitsize_zero_node;
+ TYPE_SIZE_UNIT (void_type_node) = size_zero_node;
+ }
+
+ /* Enable GNAT stack checking method if needed */
+ if (!Stack_Check_Probes_On_Target)
+ set_stack_check_libfunc ("_gnat_stack_check");
+
+ /* Retrieve alignment settings. */
+ double_float_alignment = get_target_double_float_alignment ();
+ double_scalar_alignment = get_target_double_scalar_alignment ();
+
+ /* Record the builtin types. Define `integer' and `character' first so that
+ dbx will output them first. */
+ record_builtin_type ("integer", integer_type_node, false);
+ record_builtin_type ("character", unsigned_char_type_node, false);
+ record_builtin_type ("boolean", boolean_type_node, false);
+ record_builtin_type ("void", void_type_node, false);
+
+ /* Save the type we made for integer as the type for Standard.Integer. */
+ save_gnu_tree (Base_Type (standard_integer),
+ TYPE_NAME (integer_type_node),
+ false);
+
+ /* Likewise for character as the type for Standard.Character. */
+ save_gnu_tree (Base_Type (standard_character),
+ TYPE_NAME (unsigned_char_type_node),
+ false);
+
+ /* Likewise for boolean as the type for Standard.Boolean. */
+ save_gnu_tree (Base_Type (standard_boolean),
+ TYPE_NAME (boolean_type_node),
+ false);
+ gnat_literal = First_Literal (Base_Type (standard_boolean));
+ t = UI_To_gnu (Enumeration_Rep (gnat_literal), boolean_type_node);
+ gcc_assert (t == boolean_false_node);
+ t = create_var_decl (get_entity_name (gnat_literal), NULL_TREE,
+ boolean_type_node, t, true, false, false, false,
+ NULL, gnat_literal);
+ DECL_IGNORED_P (t) = 1;
+ save_gnu_tree (gnat_literal, t, false);
+ gnat_literal = Next_Literal (gnat_literal);
+ t = UI_To_gnu (Enumeration_Rep (gnat_literal), boolean_type_node);
+ gcc_assert (t == boolean_true_node);
+ t = create_var_decl (get_entity_name (gnat_literal), NULL_TREE,
+ boolean_type_node, t, true, false, false, false,
+ NULL, gnat_literal);
+ DECL_IGNORED_P (t) = 1;
+ save_gnu_tree (gnat_literal, t, false);
+
+ void_ftype = build_function_type_list (void_type_node, NULL_TREE);
+ ptr_void_ftype = build_pointer_type (void_ftype);
+
+ /* Now declare run-time functions. */
+ ftype = build_function_type_list (ptr_void_type_node, sizetype, NULL_TREE);
+
+ /* malloc is a function declaration tree for a function to allocate
+ memory. */
+ malloc_decl
+ = create_subprog_decl (get_identifier ("__gnat_malloc"), NULL_TREE,
+ ftype, NULL_TREE, is_disabled, true, true, true,
+ NULL, Empty);
+ DECL_IS_MALLOC (malloc_decl) = 1;
+
+ /* malloc32 is a function declaration tree for a function to allocate
+ 32-bit memory on a 64-bit system. Needed only on 64-bit VMS. */
+ malloc32_decl
+ = create_subprog_decl (get_identifier ("__gnat_malloc32"), NULL_TREE,
+ ftype, NULL_TREE, is_disabled, true, true, true,
+ NULL, Empty);
+ DECL_IS_MALLOC (malloc32_decl) = 1;
+
+ /* free is a function declaration tree for a function to free memory. */
+ free_decl
+ = create_subprog_decl (get_identifier ("__gnat_free"), NULL_TREE,
+ build_function_type_list (void_type_node,
+ ptr_void_type_node,
+ NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL,
+ Empty);
+
+ /* This is used for 64-bit multiplication with overflow checking. */
+ mulv64_decl
+ = create_subprog_decl (get_identifier ("__gnat_mulv64"), NULL_TREE,
+ build_function_type_list (int64_type, int64_type,
+ int64_type, NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL,
+ Empty);
+
+ /* Name of the _Parent field in tagged record types. */
+ parent_name_id = get_identifier (Get_Name_String (Name_uParent));
+
+ /* Name of the Exception_Data type defined in System.Standard_Library. */
+ exception_data_name_id
+ = get_identifier ("system__standard_library__exception_data");
+
+ /* Make the types and functions used for exception processing. */
+ jmpbuf_type
+ = build_array_type (gnat_type_for_mode (Pmode, 0),
+ build_index_type (size_int (5)));
+ record_builtin_type ("JMPBUF_T", jmpbuf_type, true);
+ jmpbuf_ptr_type = build_pointer_type (jmpbuf_type);
+
+ /* Functions to get and set the jumpbuf pointer for the current thread. */
+ get_jmpbuf_decl
+ = create_subprog_decl
+ (get_identifier ("system__soft_links__get_jmpbuf_address_soft"),
+ NULL_TREE, build_function_type_list (jmpbuf_ptr_type, NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ DECL_IGNORED_P (get_jmpbuf_decl) = 1;
+
+ set_jmpbuf_decl
+ = create_subprog_decl
+ (get_identifier ("system__soft_links__set_jmpbuf_address_soft"),
+ NULL_TREE, build_function_type_list (void_type_node, jmpbuf_ptr_type,
+ NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ DECL_IGNORED_P (set_jmpbuf_decl) = 1;
+
+ /* setjmp returns an integer and has one operand, which is a pointer to
+ a jmpbuf. */
+ setjmp_decl
+ = create_subprog_decl
+ (get_identifier ("__builtin_setjmp"), NULL_TREE,
+ build_function_type_list (integer_type_node, jmpbuf_ptr_type,
+ NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ DECL_BUILT_IN_CLASS (setjmp_decl) = BUILT_IN_NORMAL;
+ DECL_FUNCTION_CODE (setjmp_decl) = BUILT_IN_SETJMP;
+
+ /* update_setjmp_buf updates a setjmp buffer from the current stack pointer
+ address. */
+ update_setjmp_buf_decl
+ = create_subprog_decl
+ (get_identifier ("__builtin_update_setjmp_buf"), NULL_TREE,
+ build_function_type_list (void_type_node, jmpbuf_ptr_type, NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ DECL_BUILT_IN_CLASS (update_setjmp_buf_decl) = BUILT_IN_NORMAL;
+ DECL_FUNCTION_CODE (update_setjmp_buf_decl) = BUILT_IN_UPDATE_SETJMP_BUF;
+
+ /* Hooks to call when entering/leaving an exception handler. */
+ ftype
+ = build_function_type_list (void_type_node, ptr_void_type_node, NULL_TREE);
+
+ begin_handler_decl
+ = create_subprog_decl (get_identifier ("__gnat_begin_handler"), NULL_TREE,
+ ftype, NULL_TREE, is_disabled, true, true, true,
+ NULL, Empty);
+ DECL_IGNORED_P (begin_handler_decl) = 1;
+
+ end_handler_decl
+ = create_subprog_decl (get_identifier ("__gnat_end_handler"), NULL_TREE,
+ ftype, NULL_TREE, is_disabled, true, true, true,
+ NULL, Empty);
+ DECL_IGNORED_P (end_handler_decl) = 1;
+
+ unhandled_except_decl
+ = create_subprog_decl (get_identifier ("__gnat_unhandled_except_handler"),
+ NULL_TREE,
+ ftype, NULL_TREE, is_disabled, true, true, true,
+ NULL, Empty);
+ DECL_IGNORED_P (unhandled_except_decl) = 1;
+
+ reraise_zcx_decl
+ = create_subprog_decl (get_identifier ("__gnat_reraise_zcx"), NULL_TREE,
+ ftype, NULL_TREE, is_disabled, true, true, true,
+ NULL, Empty);
+ /* Indicate that these never return. */
+ DECL_IGNORED_P (reraise_zcx_decl) = 1;
+ TREE_THIS_VOLATILE (reraise_zcx_decl) = 1;
+ TREE_SIDE_EFFECTS (reraise_zcx_decl) = 1;
+ TREE_TYPE (reraise_zcx_decl)
+ = build_qualified_type (TREE_TYPE (reraise_zcx_decl), TYPE_QUAL_VOLATILE);
+
+ /* If in no exception handlers mode, all raise statements are redirected to
+ __gnat_last_chance_handler. No need to redefine raise_nodefer_decl since
+ this procedure will never be called in this mode. */
+ if (No_Exception_Handlers_Set ())
+ {
+ tree decl
+ = create_subprog_decl
+ (get_identifier ("__gnat_last_chance_handler"), NULL_TREE,
+ build_function_type_list (void_type_node,
+ build_pointer_type
+ (unsigned_char_type_node),
+ integer_type_node, NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ TREE_THIS_VOLATILE (decl) = 1;
+ TREE_SIDE_EFFECTS (decl) = 1;
+ TREE_TYPE (decl)
+ = build_qualified_type (TREE_TYPE (decl), TYPE_QUAL_VOLATILE);
+ for (i = 0; i < (int) ARRAY_SIZE (gnat_raise_decls); i++)
+ gnat_raise_decls[i] = decl;
+ }
+ else
+ {
+ /* Otherwise, make one decl for each exception reason. */
+ for (i = 0; i < (int) ARRAY_SIZE (gnat_raise_decls); i++)
+ gnat_raise_decls[i] = build_raise_check (i, exception_simple);
+ for (i = 0; i < (int) ARRAY_SIZE (gnat_raise_decls_ext); i++)
+ gnat_raise_decls_ext[i]
+ = build_raise_check (i,
+ i == CE_Index_Check_Failed
+ || i == CE_Range_Check_Failed
+ || i == CE_Invalid_Data
+ ? exception_range : exception_column);
+ }
+
+ /* Set the types that GCC and Gigi use from the front end. */
+ exception_type
+ = gnat_to_gnu_entity (Base_Type (standard_exception_type), NULL_TREE, 0);
+ except_type_node = TREE_TYPE (exception_type);
+
+ /* Make other functions used for exception processing. */
+ get_excptr_decl
+ = create_subprog_decl
+ (get_identifier ("system__soft_links__get_gnat_exception"), NULL_TREE,
+ build_function_type_list (build_pointer_type (except_type_node),
+ NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ DECL_IGNORED_P (get_excptr_decl) = 1;
+
+ set_exception_parameter_decl
+ = create_subprog_decl
+ (get_identifier ("__gnat_set_exception_parameter"), NULL_TREE,
+ build_function_type_list (void_type_node,
+ ptr_void_type_node,
+ ptr_void_type_node,
+ NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+
+ raise_nodefer_decl
+ = create_subprog_decl
+ (get_identifier ("__gnat_raise_nodefer_with_msg"), NULL_TREE,
+ build_function_type_list (void_type_node,
+ build_pointer_type (except_type_node),
+ NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+
+ /* Indicate that it never returns. */
+ TREE_THIS_VOLATILE (raise_nodefer_decl) = 1;
+ TREE_SIDE_EFFECTS (raise_nodefer_decl) = 1;
+ TREE_TYPE (raise_nodefer_decl)
+ = build_qualified_type (TREE_TYPE (raise_nodefer_decl),
+ TYPE_QUAL_VOLATILE);
+
+ /* Build the special descriptor type and its null node if needed. */
+ if (TARGET_VTABLE_USES_DESCRIPTORS)
+ {
+ tree null_node = fold_convert (ptr_void_ftype, null_pointer_node);
+ tree field_list = NULL_TREE;
+ int j;
+ vec<constructor_elt, va_gc> *null_vec = NULL;
+ constructor_elt *elt;
+
+ fdesc_type_node = make_node (RECORD_TYPE);
+ vec_safe_grow (null_vec, TARGET_VTABLE_USES_DESCRIPTORS);
+ elt = (null_vec->address () + TARGET_VTABLE_USES_DESCRIPTORS - 1);
+
+ for (j = 0; j < TARGET_VTABLE_USES_DESCRIPTORS; j++)
+ {
+ tree field
+ = create_field_decl (NULL_TREE, ptr_void_ftype, fdesc_type_node,
+ NULL_TREE, NULL_TREE, 0, 1);
+ DECL_CHAIN (field) = field_list;
+ field_list = field;
+ elt->index = field;
+ elt->value = null_node;
+ elt--;
+ }
+
+ finish_record_type (fdesc_type_node, nreverse (field_list), 0, false);
+ record_builtin_type ("descriptor", fdesc_type_node, true);
+ null_fdesc_node = gnat_build_constructor (fdesc_type_node, null_vec);
+ }
+
+ long_long_float_type
+ = gnat_to_gnu_entity (Base_Type (standard_long_long_float), NULL_TREE, 0);
+
+ if (TREE_CODE (TREE_TYPE (long_long_float_type)) == INTEGER_TYPE)
+ {
+ /* In this case, the builtin floating point types are VAX float,
+ so make up a type for use. */
+ longest_float_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (longest_float_type_node) = LONG_DOUBLE_TYPE_SIZE;
+ layout_type (longest_float_type_node);
+ record_builtin_type ("longest float type", longest_float_type_node,
+ false);
+ }
+ else
+ longest_float_type_node = TREE_TYPE (long_long_float_type);
+
+ /* Dummy objects to materialize "others" and "all others" in the exception
+ tables. These are exported by a-exexpr-gcc.adb, so see this unit for
+ the types to use. */
+ others_decl
+ = create_var_decl (get_identifier ("OTHERS"),
+ get_identifier ("__gnat_others_value"),
+ unsigned_char_type_node,
+ NULL_TREE, true, false, true, false, NULL, Empty);
+
+ all_others_decl
+ = create_var_decl (get_identifier ("ALL_OTHERS"),
+ get_identifier ("__gnat_all_others_value"),
+ unsigned_char_type_node,
+ NULL_TREE, true, false, true, false, NULL, Empty);
+
+ unhandled_others_decl
+ = create_var_decl (get_identifier ("UNHANDLED_OTHERS"),
+ get_identifier ("__gnat_unhandled_others_value"),
+ unsigned_char_type_node,
+ NULL_TREE, true, false, true, false, NULL, Empty);
+
+ main_identifier_node = get_identifier ("main");
+
+ /* Install the builtins we might need, either internally or as
+ user available facilities for Intrinsic imports. */
+ gnat_install_builtins ();
+
+ vec_safe_push (gnu_except_ptr_stack, NULL_TREE);
+ vec_safe_push (gnu_constraint_error_label_stack, NULL_TREE);
+ vec_safe_push (gnu_storage_error_label_stack, NULL_TREE);
+ vec_safe_push (gnu_program_error_label_stack, NULL_TREE);
+
+ /* Process any Pragma Ident for the main unit. */
+ if (Present (Ident_String (Main_Unit)))
+ targetm.asm_out.output_ident
+ (TREE_STRING_POINTER (gnat_to_gnu (Ident_String (Main_Unit))));
+
+ /* If we are using the GCC exception mechanism, let GCC know. */
+ if (Exception_Mechanism == Back_End_Exceptions)
+ gnat_init_gcc_eh ();
+
+ /* Initialize the GCC support for FP operations. */
+ gnat_init_gcc_fp ();
+
+ /* Now translate the compilation unit proper. */
+ Compilation_Unit_to_gnu (gnat_root);
+
+ /* Then process the N_Validate_Unchecked_Conversion nodes. We do this at
+ the very end to avoid having to second-guess the front-end when we run
+ into dummy nodes during the regular processing. */
+ for (i = 0; gnat_validate_uc_list.iterate (i, &gnat_iter); i++)
+ validate_unchecked_conversion (gnat_iter);
+ gnat_validate_uc_list.release ();
+
+ /* Finally see if we have any elaboration procedures to deal with. */
+ for (info = elab_info_list; info; info = info->next)
+ {
+ tree gnu_body = DECL_SAVED_TREE (info->elab_proc), gnu_stmts;
+
+ /* We should have a BIND_EXPR but it may not have any statements in it.
+ If it doesn't have any, we have nothing to do except for setting the
+ flag on the GNAT node. Otherwise, process the function as others. */
+ gnu_stmts = gnu_body;
+ if (TREE_CODE (gnu_stmts) == BIND_EXPR)
+ gnu_stmts = BIND_EXPR_BODY (gnu_stmts);
+ if (!gnu_stmts || !STATEMENT_LIST_HEAD (gnu_stmts))
+ Set_Has_No_Elaboration_Code (info->gnat_node, 1);
+ else
+ {
+ begin_subprog_body (info->elab_proc);
+ end_subprog_body (gnu_body);
+ rest_of_subprog_body_compilation (info->elab_proc);
+ }
+ }
+
+ /* Destroy ourselves. */
+ destroy_gnat_utils ();
+
+ /* We cannot track the location of errors past this point. */
+ error_gnat_node = Empty;
+}
+
+/* Return a subprogram decl corresponding to __gnat_rcheck_xx for the given
+ CHECK if KIND is EXCEPTION_SIMPLE, or else to __gnat_rcheck_xx_ext. */
+
+static tree
+build_raise_check (int check, enum exception_info_kind kind)
+{
+ tree result, ftype;
+ const char pfx[] = "__gnat_rcheck_";
+
+ strcpy (Name_Buffer, pfx);
+ Name_Len = sizeof (pfx) - 1;
+ Get_RT_Exception_Name (check);
+
+ if (kind == exception_simple)
+ {
+ Name_Buffer[Name_Len] = 0;
+ ftype
+ = build_function_type_list (void_type_node,
+ build_pointer_type
+ (unsigned_char_type_node),
+ integer_type_node, NULL_TREE);
+ }
+ else
+ {
+ tree t = (kind == exception_column ? NULL_TREE : integer_type_node);
+
+ strcpy (Name_Buffer + Name_Len, "_ext");
+ Name_Buffer[Name_Len + 4] = 0;
+ ftype
+ = build_function_type_list (void_type_node,
+ build_pointer_type
+ (unsigned_char_type_node),
+ integer_type_node, integer_type_node,
+ t, t, NULL_TREE);
+ }
+
+ result
+ = create_subprog_decl (get_identifier (Name_Buffer),
+ NULL_TREE, ftype, NULL_TREE,
+ is_disabled, true, true, true, NULL, Empty);
+
+ /* Indicate that it never returns. */
+ TREE_THIS_VOLATILE (result) = 1;
+ TREE_SIDE_EFFECTS (result) = 1;
+ TREE_TYPE (result)
+ = build_qualified_type (TREE_TYPE (result), TYPE_QUAL_VOLATILE);
+
+ return result;
+}
+
+/* Return a positive value if an lvalue is required for GNAT_NODE, which is
+ an N_Attribute_Reference. */
+
+static int
+lvalue_required_for_attribute_p (Node_Id gnat_node)
+{
+ switch (Get_Attribute_Id (Attribute_Name (gnat_node)))
+ {
+ case Attr_Pos:
+ case Attr_Val:
+ case Attr_Pred:
+ case Attr_Succ:
+ case Attr_First:
+ case Attr_Last:
+ case Attr_Range_Length:
+ case Attr_Length:
+ case Attr_Object_Size:
+ case Attr_Value_Size:
+ case Attr_Component_Size:
+ case Attr_Max_Size_In_Storage_Elements:
+ case Attr_Min:
+ case Attr_Max:
+ case Attr_Null_Parameter:
+ case Attr_Passed_By_Reference:
+ case Attr_Mechanism_Code:
+ return 0;
+
+ case Attr_Address:
+ case Attr_Access:
+ case Attr_Unchecked_Access:
+ case Attr_Unrestricted_Access:
+ case Attr_Code_Address:
+ case Attr_Pool_Address:
+ case Attr_Size:
+ case Attr_Alignment:
+ case Attr_Bit_Position:
+ case Attr_Position:
+ case Attr_First_Bit:
+ case Attr_Last_Bit:
+ case Attr_Bit:
+ case Attr_Asm_Input:
+ case Attr_Asm_Output:
+ default:
+ return 1;
+ }
+}
+
+/* Return a positive value if an lvalue is required for GNAT_NODE. GNU_TYPE
+ is the type that will be used for GNAT_NODE in the translated GNU tree.
+ CONSTANT indicates whether the underlying object represented by GNAT_NODE
+ is constant in the Ada sense. If it is, ADDRESS_OF_CONSTANT indicates
+ whether its value is the address of a constant and ALIASED whether it is
+ aliased. If it isn't, ADDRESS_OF_CONSTANT and ALIASED are ignored.
+
+ The function climbs up the GNAT tree starting from the node and returns 1
+ upon encountering a node that effectively requires an lvalue downstream.
+ It returns int instead of bool to facilitate usage in non-purely binary
+ logic contexts. */
+
+static int
+lvalue_required_p (Node_Id gnat_node, tree gnu_type, bool constant,
+ bool address_of_constant, bool aliased)
+{
+ Node_Id gnat_parent = Parent (gnat_node), gnat_temp;
+
+ switch (Nkind (gnat_parent))
+ {
+ case N_Reference:
+ return 1;
+
+ case N_Attribute_Reference:
+ return lvalue_required_for_attribute_p (gnat_parent);
+
+ case N_Parameter_Association:
+ case N_Function_Call:
+ case N_Procedure_Call_Statement:
+ /* If the parameter is by reference, an lvalue is required. */
+ return (!constant
+ || must_pass_by_ref (gnu_type)
+ || default_pass_by_ref (gnu_type));
+
+ case N_Indexed_Component:
+ /* Only the array expression can require an lvalue. */
+ if (Prefix (gnat_parent) != gnat_node)
+ return 0;
+
+ /* ??? Consider that referencing an indexed component with a
+ non-constant index forces the whole aggregate to memory.
+ Note that N_Integer_Literal is conservative, any static
+ expression in the RM sense could probably be accepted. */
+ for (gnat_temp = First (Expressions (gnat_parent));
+ Present (gnat_temp);
+ gnat_temp = Next (gnat_temp))
+ if (Nkind (gnat_temp) != N_Integer_Literal)
+ return 1;
+
+ /* ... fall through ... */
+
+ case N_Slice:
+ /* Only the array expression can require an lvalue. */
+ if (Prefix (gnat_parent) != gnat_node)
+ return 0;
+
+ aliased |= Has_Aliased_Components (Etype (gnat_node));
+ return lvalue_required_p (gnat_parent, gnu_type, constant,
+ address_of_constant, aliased);
+
+ case N_Selected_Component:
+ aliased |= Is_Aliased (Entity (Selector_Name (gnat_parent)));
+ return lvalue_required_p (gnat_parent, gnu_type, constant,
+ address_of_constant, aliased);
+
+ case N_Object_Renaming_Declaration:
+ /* We need to make a real renaming only if the constant object is
+ aliased or if we may use a renaming pointer; otherwise we can
+ optimize and return the rvalue. We make an exception if the object
+ is an identifier since in this case the rvalue can be propagated
+ attached to the CONST_DECL. */
+ return (!constant
+ || aliased
+ /* This should match the constant case of the renaming code. */
+ || Is_Composite_Type
+ (Underlying_Type (Etype (Name (gnat_parent))))
+ || Nkind (Name (gnat_parent)) == N_Identifier);
+
+ case N_Object_Declaration:
+ /* We cannot use a constructor if this is an atomic object because
+ the actual assignment might end up being done component-wise. */
+ return (!constant
+ ||(Is_Composite_Type (Underlying_Type (Etype (gnat_node)))
+ && Is_Atomic (Defining_Entity (gnat_parent)))
+ /* We don't use a constructor if this is a class-wide object
+ because the effective type of the object is the equivalent
+ type of the class-wide subtype and it smashes most of the
+ data into an array of bytes to which we cannot convert. */
+ || Ekind ((Etype (Defining_Entity (gnat_parent))))
+ == E_Class_Wide_Subtype);
+
+ case N_Assignment_Statement:
+ /* We cannot use a constructor if the LHS is an atomic object because
+ the actual assignment might end up being done component-wise. */
+ return (!constant
+ || Name (gnat_parent) == gnat_node
+ || (Is_Composite_Type (Underlying_Type (Etype (gnat_node)))
+ && Is_Atomic (Entity (Name (gnat_parent)))));
+
+ case N_Unchecked_Type_Conversion:
+ if (!constant)
+ return 1;
+
+ /* ... fall through ... */
+
+ case N_Type_Conversion:
+ case N_Qualified_Expression:
+ /* We must look through all conversions because we may need to bypass
+ an intermediate conversion that is meant to be purely formal. */
+ return lvalue_required_p (gnat_parent,
+ get_unpadded_type (Etype (gnat_parent)),
+ constant, address_of_constant, aliased);
+
+ case N_Allocator:
+ /* We should only reach here through the N_Qualified_Expression case.
+ Force an lvalue for composite types since a block-copy to the newly
+ allocated area of memory is made. */
+ return Is_Composite_Type (Underlying_Type (Etype (gnat_node)));
+
+ case N_Explicit_Dereference:
+ /* We look through dereferences for address of constant because we need
+ to handle the special cases listed above. */
+ if (constant && address_of_constant)
+ return lvalue_required_p (gnat_parent,
+ get_unpadded_type (Etype (gnat_parent)),
+ true, false, true);
+
+ /* ... fall through ... */
+
+ default:
+ return 0;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Subroutine of gnat_to_gnu to translate gnat_node, an N_Identifier,
+ to a GCC tree, which is returned. GNU_RESULT_TYPE_P is a pointer
+ to where we should place the result type. */
+
+static tree
+Identifier_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p)
+{
+ Node_Id gnat_temp, gnat_temp_type;
+ tree gnu_result, gnu_result_type;
+
+ /* Whether we should require an lvalue for GNAT_NODE. Needed in
+ specific circumstances only, so evaluated lazily. < 0 means
+ unknown, > 0 means known true, 0 means known false. */
+ int require_lvalue = -1;
+
+ /* If GNAT_NODE is a constant, whether we should use the initialization
+ value instead of the constant entity, typically for scalars with an
+ address clause when the parent doesn't require an lvalue. */
+ bool use_constant_initializer = false;
+
+ /* If the Etype of this node does not equal the Etype of the Entity,
+ something is wrong with the entity map, probably in generic
+ instantiation. However, this does not apply to types. Since we sometime
+ have strange Ekind's, just do this test for objects. Also, if the Etype of
+ the Entity is private, the Etype of the N_Identifier is allowed to be the
+ full type and also we consider a packed array type to be the same as the
+ original type. Similarly, a class-wide type is equivalent to a subtype of
+ itself. Finally, if the types are Itypes, one may be a copy of the other,
+ which is also legal. */
+ gnat_temp = (Nkind (gnat_node) == N_Defining_Identifier
+ ? gnat_node : Entity (gnat_node));
+ gnat_temp_type = Etype (gnat_temp);
+
+ gcc_assert (Etype (gnat_node) == gnat_temp_type
+ || (Is_Packed (gnat_temp_type)
+ && Etype (gnat_node) == Packed_Array_Type (gnat_temp_type))
+ || (Is_Class_Wide_Type (Etype (gnat_node)))
+ || (IN (Ekind (gnat_temp_type), Private_Kind)
+ && Present (Full_View (gnat_temp_type))
+ && ((Etype (gnat_node) == Full_View (gnat_temp_type))
+ || (Is_Packed (Full_View (gnat_temp_type))
+ && (Etype (gnat_node)
+ == Packed_Array_Type (Full_View
+ (gnat_temp_type))))))
+ || (Is_Itype (Etype (gnat_node)) && Is_Itype (gnat_temp_type))
+ || !(Ekind (gnat_temp) == E_Variable
+ || Ekind (gnat_temp) == E_Component
+ || Ekind (gnat_temp) == E_Constant
+ || Ekind (gnat_temp) == E_Loop_Parameter
+ || IN (Ekind (gnat_temp), Formal_Kind)));
+
+ /* If this is a reference to a deferred constant whose partial view is an
+ unconstrained private type, the proper type is on the full view of the
+ constant, not on the full view of the type, which may be unconstrained.
+
+ This may be a reference to a type, for example in the prefix of the
+ attribute Position, generated for dispatching code (see Make_DT in
+ exp_disp,adb). In that case we need the type itself, not is parent,
+ in particular if it is a derived type */
+ if (Ekind (gnat_temp) == E_Constant
+ && Is_Private_Type (gnat_temp_type)
+ && (Has_Unknown_Discriminants (gnat_temp_type)
+ || (Present (Full_View (gnat_temp_type))
+ && Has_Discriminants (Full_View (gnat_temp_type))))
+ && Present (Full_View (gnat_temp)))
+ {
+ gnat_temp = Full_View (gnat_temp);
+ gnat_temp_type = Etype (gnat_temp);
+ }
+ else
+ {
+ /* We want to use the Actual_Subtype if it has already been elaborated,
+ otherwise the Etype. Avoid using Actual_Subtype for packed arrays to
+ simplify things. */
+ if ((Ekind (gnat_temp) == E_Constant
+ || Ekind (gnat_temp) == E_Variable || Is_Formal (gnat_temp))
+ && !(Is_Array_Type (Etype (gnat_temp))
+ && Present (Packed_Array_Type (Etype (gnat_temp))))
+ && Present (Actual_Subtype (gnat_temp))
+ && present_gnu_tree (Actual_Subtype (gnat_temp)))
+ gnat_temp_type = Actual_Subtype (gnat_temp);
+ else
+ gnat_temp_type = Etype (gnat_node);
+ }
+
+ /* Expand the type of this identifier first, in case it is an enumeral
+ literal, which only get made when the type is expanded. There is no
+ order-of-elaboration issue here. */
+ gnu_result_type = get_unpadded_type (gnat_temp_type);
+
+ /* If this is a non-imported elementary constant with an address clause,
+ retrieve the value instead of a pointer to be dereferenced unless
+ an lvalue is required. This is generally more efficient and actually
+ required if this is a static expression because it might be used
+ in a context where a dereference is inappropriate, such as a case
+ statement alternative or a record discriminant. There is no possible
+ volatile-ness short-circuit here since Volatile constants must be
+ imported per C.6. */
+ if (Ekind (gnat_temp) == E_Constant
+ && Is_Elementary_Type (gnat_temp_type)
+ && !Is_Imported (gnat_temp)
+ && Present (Address_Clause (gnat_temp)))
+ {
+ require_lvalue = lvalue_required_p (gnat_node, gnu_result_type, true,
+ false, Is_Aliased (gnat_temp));
+ use_constant_initializer = !require_lvalue;
+ }
+
+ if (use_constant_initializer)
+ {
+ /* If this is a deferred constant, the initializer is attached to
+ the full view. */
+ if (Present (Full_View (gnat_temp)))
+ gnat_temp = Full_View (gnat_temp);
+
+ gnu_result = gnat_to_gnu (Expression (Declaration_Node (gnat_temp)));
+ }
+ else
+ gnu_result = gnat_to_gnu_entity (gnat_temp, NULL_TREE, 0);
+
+ /* Some objects (such as parameters passed by reference, globals of
+ variable size, and renamed objects) actually represent the address
+ of the object. In that case, we must do the dereference. Likewise,
+ deal with parameters to foreign convention subprograms. */
+ if (DECL_P (gnu_result)
+ && (DECL_BY_REF_P (gnu_result)
+ || (TREE_CODE (gnu_result) == PARM_DECL
+ && DECL_BY_COMPONENT_PTR_P (gnu_result))))
+ {
+ const bool read_only = DECL_POINTS_TO_READONLY_P (gnu_result);
+
+ /* If it's a PARM_DECL to foreign convention subprogram, convert it. */
+ if (TREE_CODE (gnu_result) == PARM_DECL
+ && DECL_BY_COMPONENT_PTR_P (gnu_result))
+ gnu_result
+ = convert (build_pointer_type (gnu_result_type), gnu_result);
+
+ /* If it's a CONST_DECL, return the underlying constant like below. */
+ else if (TREE_CODE (gnu_result) == CONST_DECL
+ && !(DECL_CONST_ADDRESS_P (gnu_result)
+ && lvalue_required_p (gnat_node, gnu_result_type, true,
+ true, false)))
+ gnu_result = DECL_INITIAL (gnu_result);
+
+ /* If it's a renaming pointer and we are at the right binding level,
+ we can reference the renamed object directly, since the renamed
+ expression has been protected against multiple evaluations. */
+ if (TREE_CODE (gnu_result) == VAR_DECL
+ && !DECL_LOOP_PARM_P (gnu_result)
+ && DECL_RENAMED_OBJECT (gnu_result)
+ && (!DECL_RENAMING_GLOBAL_P (gnu_result) || global_bindings_p ()))
+ gnu_result = DECL_RENAMED_OBJECT (gnu_result);
+
+ /* Otherwise, do the final dereference. */
+ else
+ {
+ gnu_result = build_unary_op (INDIRECT_REF, NULL_TREE, gnu_result);
+
+ if ((TREE_CODE (gnu_result) == INDIRECT_REF
+ || TREE_CODE (gnu_result) == UNCONSTRAINED_ARRAY_REF)
+ && No (Address_Clause (gnat_temp)))
+ TREE_THIS_NOTRAP (gnu_result) = 1;
+
+ if (read_only)
+ TREE_READONLY (gnu_result) = 1;
+ }
+ }
+
+ /* If we have a constant declaration and its initializer, try to return the
+ latter to avoid the need to call fold in lots of places and the need for
+ elaboration code if this identifier is used as an initializer itself.
+ Don't do it for aggregate types that contain a placeholder since their
+ initializers cannot be manipulated easily. */
+ if (TREE_CONSTANT (gnu_result)
+ && DECL_P (gnu_result)
+ && DECL_INITIAL (gnu_result)
+ && !(AGGREGATE_TYPE_P (TREE_TYPE (gnu_result))
+ && !TYPE_IS_FAT_POINTER_P (TREE_TYPE (gnu_result))
+ && type_contains_placeholder_p (TREE_TYPE (gnu_result))))
+ {
+ bool constant_only = (TREE_CODE (gnu_result) == CONST_DECL
+ && !DECL_CONST_CORRESPONDING_VAR (gnu_result));
+ bool address_of_constant = (TREE_CODE (gnu_result) == CONST_DECL
+ && DECL_CONST_ADDRESS_P (gnu_result));
+
+ /* If there is a (corresponding) variable or this is the address of a
+ constant, we only want to return the initializer if an lvalue isn't
+ required. Evaluate this now if we have not already done so. */
+ if ((!constant_only || address_of_constant) && require_lvalue < 0)
+ require_lvalue
+ = lvalue_required_p (gnat_node, gnu_result_type, true,
+ address_of_constant, Is_Aliased (gnat_temp));
+
+ /* Finally retrieve the initializer if this is deemed valid. */
+ if ((constant_only && !address_of_constant) || !require_lvalue)
+ gnu_result = DECL_INITIAL (gnu_result);
+ }
+
+ /* The GNAT tree has the type of a function set to its result type, so we
+ adjust here. Also use the type of the result if the Etype is a subtype
+ that is nominally unconstrained. Likewise if this is a deferred constant
+ of a discriminated type whose full view can be elaborated statically, to
+ avoid problematic conversions to the nominal subtype. But remove any
+ padding from the resulting type. */
+ if (TREE_CODE (TREE_TYPE (gnu_result)) == FUNCTION_TYPE
+ || Is_Constr_Subt_For_UN_Aliased (gnat_temp_type)
+ || (Ekind (gnat_temp) == E_Constant
+ && Present (Full_View (gnat_temp))
+ && Has_Discriminants (gnat_temp_type)
+ && TREE_CODE (gnu_result) == CONSTRUCTOR))
+ {
+ gnu_result_type = TREE_TYPE (gnu_result);
+ if (TYPE_IS_PADDING_P (gnu_result_type))
+ gnu_result_type = TREE_TYPE (TYPE_FIELDS (gnu_result_type));
+ }
+
+ *gnu_result_type_p = gnu_result_type;
+
+ return gnu_result;
+}
+
+/* Subroutine of gnat_to_gnu to process gnat_node, an N_Pragma. Return
+ any statements we generate. */
+
+static tree
+Pragma_to_gnu (Node_Id gnat_node)
+{
+ tree gnu_result = alloc_stmt_list ();
+ unsigned char pragma_id;
+ Node_Id gnat_temp;
+
+ /* Do nothing if we are just annotating types and check for (and ignore)
+ unrecognized pragmas. */
+ if (type_annotate_only
+ || !Is_Pragma_Name (Chars (Pragma_Identifier (gnat_node))))
+ return gnu_result;
+
+ pragma_id = Get_Pragma_Id (Chars (Pragma_Identifier (gnat_node)));
+ switch (pragma_id)
+ {
+ case Pragma_Inspection_Point:
+ /* Do nothing at top level: all such variables are already viewable. */
+ if (global_bindings_p ())
+ break;
+
+ for (gnat_temp = First (Pragma_Argument_Associations (gnat_node));
+ Present (gnat_temp);
+ gnat_temp = Next (gnat_temp))
+ {
+ Node_Id gnat_expr = Expression (gnat_temp);
+ tree gnu_expr = gnat_to_gnu (gnat_expr);
+ int use_address;
+ enum machine_mode mode;
+ tree asm_constraint = NULL_TREE;
+#ifdef ASM_COMMENT_START
+ char *comment;
+#endif
+
+ if (TREE_CODE (gnu_expr) == UNCONSTRAINED_ARRAY_REF)
+ gnu_expr = TREE_OPERAND (gnu_expr, 0);
+
+ /* Use the value only if it fits into a normal register,
+ otherwise use the address. */
+ mode = TYPE_MODE (TREE_TYPE (gnu_expr));
+ use_address = ((GET_MODE_CLASS (mode) != MODE_INT
+ && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
+ || GET_MODE_SIZE (mode) > UNITS_PER_WORD);
+
+ if (use_address)
+ gnu_expr = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_expr);
+
+#ifdef ASM_COMMENT_START
+ comment = concat (ASM_COMMENT_START,
+ " inspection point: ",
+ Get_Name_String (Chars (gnat_expr)),
+ use_address ? " address" : "",
+ " is in %0",
+ NULL);
+ asm_constraint = build_string (strlen (comment), comment);
+ free (comment);
+#endif
+ gnu_expr = build5 (ASM_EXPR, void_type_node,
+ asm_constraint,
+ NULL_TREE,
+ tree_cons
+ (build_tree_list (NULL_TREE,
+ build_string (1, "g")),
+ gnu_expr, NULL_TREE),
+ NULL_TREE, NULL_TREE);
+ ASM_VOLATILE_P (gnu_expr) = 1;
+ set_expr_location_from_node (gnu_expr, gnat_node);
+ append_to_statement_list (gnu_expr, &gnu_result);
+ }
+ break;
+
+ case Pragma_Loop_Optimize:
+ for (gnat_temp = First (Pragma_Argument_Associations (gnat_node));
+ Present (gnat_temp);
+ gnat_temp = Next (gnat_temp))
+ {
+ tree gnu_loop_stmt = gnu_loop_stack ->last ()->stmt;
+
+ switch (Chars (Expression (gnat_temp)))
+ {
+ case Name_No_Unroll:
+ LOOP_STMT_NO_UNROLL (gnu_loop_stmt) = 1;
+ break;
+
+ case Name_Unroll:
+ LOOP_STMT_UNROLL (gnu_loop_stmt) = 1;
+ break;
+
+ case Name_No_Vector:
+ LOOP_STMT_NO_VECTOR (gnu_loop_stmt) = 1;
+ break;
+
+ case Name_Vector:
+ LOOP_STMT_VECTOR (gnu_loop_stmt) = 1;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case Pragma_Optimize:
+ switch (Chars (Expression
+ (First (Pragma_Argument_Associations (gnat_node)))))
+ {
+ case Name_Off:
+ if (optimize)
+ post_error ("must specify -O0?", gnat_node);
+ break;
+
+ case Name_Space:
+ if (!optimize_size)
+ post_error ("must specify -Os?", gnat_node);
+ break;
+
+ case Name_Time:
+ if (!optimize)
+ post_error ("insufficient -O value?", gnat_node);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case Pragma_Reviewable:
+ if (write_symbols == NO_DEBUG)
+ post_error ("must specify -g?", gnat_node);
+ break;
+
+ case Pragma_Warning_As_Error:
+ case Pragma_Warnings:
+ {
+ Node_Id gnat_expr;
+ /* Preserve the location of the pragma. */
+ const location_t location = input_location;
+ struct cl_option_handlers handlers;
+ unsigned int option_index;
+ diagnostic_t kind;
+ bool imply;
+
+ gnat_temp = First (Pragma_Argument_Associations (gnat_node));
+
+ /* This is the String form: pragma Warning{s|_As_Error}(String). */
+ if (Nkind (Expression (gnat_temp)) == N_String_Literal)
+ {
+ switch (pragma_id)
+ {
+ case Pragma_Warning_As_Error:
+ kind = DK_ERROR;
+ imply = false;
+ break;
+
+ case Pragma_Warnings:
+ kind = DK_WARNING;
+ imply = true;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ gnat_expr = Expression (gnat_temp);
+ }
+
+ /* This is the On/Off form: pragma Warnings (On | Off [,String]). */
+ else if (Nkind (Expression (gnat_temp)) == N_Identifier)
+ {
+ switch (Chars (Expression (gnat_temp)))
+ {
+ case Name_Off:
+ kind = DK_IGNORED;
+ break;
+
+ case Name_On:
+ kind = DK_WARNING;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (Present (Next (gnat_temp)))
+ {
+ /* pragma Warnings (On | Off, Name) is handled differently. */
+ if (Nkind (Expression (Next (gnat_temp))) != N_String_Literal)
+ break;
+
+ gnat_expr = Expression (Next (gnat_temp));
+ }
+ else
+ gnat_expr = Empty;
+
+ imply = false;
+ }
+
+ else
+ gcc_unreachable ();
+
+ /* This is the same implementation as in the C family of compilers. */
+ if (Present (gnat_expr))
+ {
+ tree gnu_expr = gnat_to_gnu (gnat_expr);
+ const char *opt_string = TREE_STRING_POINTER (gnu_expr);
+ const int len = TREE_STRING_LENGTH (gnu_expr);
+ if (len < 3 || opt_string[0] != '-' || opt_string[1] != 'W')
+ break;
+ for (option_index = 0;
+ option_index < cl_options_count;
+ option_index++)
+ if (strcmp (cl_options[option_index].opt_text, opt_string) == 0)
+ break;
+ if (option_index == cl_options_count)
+ {
+ post_error ("unknown -W switch", gnat_node);
+ break;
+ }
+ }
+ else
+ option_index = 0;
+
+ set_default_handlers (&handlers);
+ control_warning_option (option_index, (int) kind, imply, location,
+ CL_Ada, &handlers, &global_options,
+ &global_options_set, global_dc);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return gnu_result;
+}
+
+/* Subroutine of gnat_to_gnu to translate GNAT_NODE, an N_Attribute node,
+ to a GCC tree, which is returned. GNU_RESULT_TYPE_P is a pointer to
+ where we should place the result type. ATTRIBUTE is the attribute ID. */
+
+static tree
+Attribute_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, int attribute)
+{
+ const Node_Id gnat_prefix = Prefix (gnat_node);
+ tree gnu_prefix, gnu_type, gnu_expr;
+ tree gnu_result_type, gnu_result = error_mark_node;
+ bool prefix_unused = false;
+
+ /* ??? If this is an access attribute for a public subprogram to be used in
+ a dispatch table, do not translate its type as it's useless there and the
+ parameter types might be incomplete types coming from a limited with. */
+ if (Ekind (Etype (gnat_node)) == E_Access_Subprogram_Type
+ && Is_Dispatch_Table_Entity (Etype (gnat_node))
+ && Nkind (gnat_prefix) == N_Identifier
+ && Is_Subprogram (Entity (gnat_prefix))
+ && Is_Public (Entity (gnat_prefix))
+ && !present_gnu_tree (Entity (gnat_prefix)))
+ gnu_prefix = get_minimal_subprog_decl (Entity (gnat_prefix));
+ else
+ gnu_prefix = gnat_to_gnu (gnat_prefix);
+ gnu_type = TREE_TYPE (gnu_prefix);
+
+ /* If the input is a NULL_EXPR, make a new one. */
+ if (TREE_CODE (gnu_prefix) == NULL_EXPR)
+ {
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ *gnu_result_type_p = gnu_result_type;
+ return build1 (NULL_EXPR, gnu_result_type, TREE_OPERAND (gnu_prefix, 0));
+ }
+
+ switch (attribute)
+ {
+ case Attr_Pos:
+ case Attr_Val:
+ /* These are just conversions since representation clauses for
+ enumeration types are handled in the front-end. */
+ {
+ bool checkp = Do_Range_Check (First (Expressions (gnat_node)));
+ gnu_result = gnat_to_gnu (First (Expressions (gnat_node)));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_result = convert_with_check (Etype (gnat_node), gnu_result,
+ checkp, checkp, true, gnat_node);
+ }
+ break;
+
+ case Attr_Pred:
+ case Attr_Succ:
+ /* These just add or subtract the constant 1 since representation
+ clauses for enumeration types are handled in the front-end. */
+ gnu_expr = gnat_to_gnu (First (Expressions (gnat_node)));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ if (Do_Range_Check (First (Expressions (gnat_node))))
+ {
+ gnu_expr = gnat_protect_expr (gnu_expr);
+ gnu_expr
+ = emit_check
+ (build_binary_op (EQ_EXPR, boolean_type_node,
+ gnu_expr,
+ attribute == Attr_Pred
+ ? TYPE_MIN_VALUE (gnu_result_type)
+ : TYPE_MAX_VALUE (gnu_result_type)),
+ gnu_expr, CE_Range_Check_Failed, gnat_node);
+ }
+
+ gnu_result
+ = build_binary_op (attribute == Attr_Pred ? MINUS_EXPR : PLUS_EXPR,
+ gnu_result_type, gnu_expr,
+ convert (gnu_result_type, integer_one_node));
+ break;
+
+ case Attr_Address:
+ case Attr_Unrestricted_Access:
+ /* Conversions don't change addresses but can cause us to miss the
+ COMPONENT_REF case below, so strip them off. */
+ gnu_prefix = remove_conversions (gnu_prefix,
+ !Must_Be_Byte_Aligned (gnat_node));
+
+ /* If we are taking 'Address of an unconstrained object, this is the
+ pointer to the underlying array. */
+ if (attribute == Attr_Address)
+ gnu_prefix = maybe_unconstrained_array (gnu_prefix);
+
+ /* If we are building a static dispatch table, we have to honor
+ TARGET_VTABLE_USES_DESCRIPTORS if we want to be compatible
+ with the C++ ABI. We do it in the non-static case as well,
+ see gnat_to_gnu_entity, case E_Access_Subprogram_Type. */
+ else if (TARGET_VTABLE_USES_DESCRIPTORS
+ && Is_Dispatch_Table_Entity (Etype (gnat_node)))
+ {
+ tree gnu_field, t;
+ /* Descriptors can only be built here for top-level functions. */
+ bool build_descriptor = (global_bindings_p () != 0);
+ int i;
+ vec<constructor_elt, va_gc> *gnu_vec = NULL;
+ constructor_elt *elt;
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ /* If we're not going to build the descriptor, we have to retrieve
+ the one which will be built by the linker (or by the compiler
+ later if a static chain is requested). */
+ if (!build_descriptor)
+ {
+ gnu_result = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_prefix);
+ gnu_result = fold_convert (build_pointer_type (gnu_result_type),
+ gnu_result);
+ gnu_result = build1 (INDIRECT_REF, gnu_result_type, gnu_result);
+ }
+
+ vec_safe_grow (gnu_vec, TARGET_VTABLE_USES_DESCRIPTORS);
+ elt = (gnu_vec->address () + TARGET_VTABLE_USES_DESCRIPTORS - 1);
+ for (gnu_field = TYPE_FIELDS (gnu_result_type), i = 0;
+ i < TARGET_VTABLE_USES_DESCRIPTORS;
+ gnu_field = DECL_CHAIN (gnu_field), i++)
+ {
+ if (build_descriptor)
+ {
+ t = build2 (FDESC_EXPR, TREE_TYPE (gnu_field), gnu_prefix,
+ build_int_cst (NULL_TREE, i));
+ TREE_CONSTANT (t) = 1;
+ }
+ else
+ t = build3 (COMPONENT_REF, ptr_void_ftype, gnu_result,
+ gnu_field, NULL_TREE);
+
+ elt->index = gnu_field;
+ elt->value = t;
+ elt--;
+ }
+
+ gnu_result = gnat_build_constructor (gnu_result_type, gnu_vec);
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case Attr_Access:
+ case Attr_Unchecked_Access:
+ case Attr_Code_Address:
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_result
+ = build_unary_op (((attribute == Attr_Address
+ || attribute == Attr_Unrestricted_Access)
+ && !Must_Be_Byte_Aligned (gnat_node))
+ ? ATTR_ADDR_EXPR : ADDR_EXPR,
+ gnu_result_type, gnu_prefix);
+
+ /* For 'Code_Address, find an inner ADDR_EXPR and mark it so that we
+ don't try to build a trampoline. */
+ if (attribute == Attr_Code_Address)
+ {
+ gnu_expr = remove_conversions (gnu_result, false);
+
+ if (TREE_CODE (gnu_expr) == ADDR_EXPR)
+ TREE_NO_TRAMPOLINE (gnu_expr) = TREE_CONSTANT (gnu_expr) = 1;
+ }
+
+ /* For 'Access, issue an error message if the prefix is a C++ method
+ since it can use a special calling convention on some platforms,
+ which cannot be propagated to the access type. */
+ else if (attribute == Attr_Access
+ && Nkind (gnat_prefix) == N_Identifier
+ && is_cplusplus_method (Entity (gnat_prefix)))
+ post_error ("access to C++ constructor or member function not allowed",
+ gnat_node);
+
+ /* For other address attributes applied to a nested function,
+ find an inner ADDR_EXPR and annotate it so that we can issue
+ a useful warning with -Wtrampolines. */
+ else if (TREE_CODE (TREE_TYPE (gnu_prefix)) == FUNCTION_TYPE)
+ {
+ gnu_expr = remove_conversions (gnu_result, false);
+
+ if (TREE_CODE (gnu_expr) == ADDR_EXPR
+ && decl_function_context (TREE_OPERAND (gnu_expr, 0)))
+ {
+ set_expr_location_from_node (gnu_expr, gnat_node);
+
+ /* Check that we're not violating the No_Implicit_Dynamic_Code
+ restriction. Be conservative if we don't know anything
+ about the trampoline strategy for the target. */
+ Check_Implicit_Dynamic_Code_Allowed (gnat_node);
+ }
+ }
+ break;
+
+ case Attr_Pool_Address:
+ {
+ tree gnu_ptr = gnu_prefix;
+ tree gnu_obj_type;
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ /* If this is fat pointer, the object must have been allocated with the
+ template in front of the array. So compute the template address; do
+ it by converting to a thin pointer. */
+ if (TYPE_IS_FAT_POINTER_P (TREE_TYPE (gnu_ptr)))
+ gnu_ptr
+ = convert (build_pointer_type
+ (TYPE_OBJECT_RECORD_TYPE
+ (TYPE_UNCONSTRAINED_ARRAY (TREE_TYPE (gnu_ptr)))),
+ gnu_ptr);
+
+ gnu_obj_type = TREE_TYPE (TREE_TYPE (gnu_ptr));
+
+ /* If this is a thin pointer, the object must have been allocated with
+ the template in front of the array. So compute the template address
+ and return it. */
+ if (TYPE_IS_THIN_POINTER_P (TREE_TYPE (gnu_ptr)))
+ gnu_ptr
+ = build_binary_op (POINTER_PLUS_EXPR, TREE_TYPE (gnu_ptr),
+ gnu_ptr,
+ fold_build1 (NEGATE_EXPR, sizetype,
+ byte_position
+ (DECL_CHAIN
+ TYPE_FIELDS ((gnu_obj_type)))));
+
+ gnu_result = convert (gnu_result_type, gnu_ptr);
+ }
+ break;
+
+ case Attr_Size:
+ case Attr_Object_Size:
+ case Attr_Value_Size:
+ case Attr_Max_Size_In_Storage_Elements:
+ gnu_expr = gnu_prefix;
+
+ /* Remove NOPs and conversions between original and packable version
+ from GNU_EXPR, and conversions from GNU_PREFIX. We use GNU_EXPR
+ to see if a COMPONENT_REF was involved. */
+ while (TREE_CODE (gnu_expr) == NOP_EXPR
+ || (TREE_CODE (gnu_expr) == VIEW_CONVERT_EXPR
+ && TREE_CODE (TREE_TYPE (gnu_expr)) == RECORD_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (gnu_expr, 0)))
+ == RECORD_TYPE
+ && TYPE_NAME (TREE_TYPE (gnu_expr))
+ == TYPE_NAME (TREE_TYPE (TREE_OPERAND (gnu_expr, 0)))))
+ gnu_expr = TREE_OPERAND (gnu_expr, 0);
+
+ gnu_prefix = remove_conversions (gnu_prefix, true);
+ prefix_unused = true;
+ gnu_type = TREE_TYPE (gnu_prefix);
+
+ /* Replace an unconstrained array type with the type of the underlying
+ array. We can't do this with a call to maybe_unconstrained_array
+ since we may have a TYPE_DECL. For 'Max_Size_In_Storage_Elements,
+ use the record type that will be used to allocate the object and its
+ template. */
+ if (TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE)
+ {
+ gnu_type = TYPE_OBJECT_RECORD_TYPE (gnu_type);
+ if (attribute != Attr_Max_Size_In_Storage_Elements)
+ gnu_type = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_type)));
+ }
+
+ /* If we're looking for the size of a field, return the field size. */
+ if (TREE_CODE (gnu_prefix) == COMPONENT_REF)
+ gnu_result = DECL_SIZE (TREE_OPERAND (gnu_prefix, 1));
+
+ /* Otherwise, if the prefix is an object, or if we are looking for
+ 'Object_Size or 'Max_Size_In_Storage_Elements, the result is the
+ GCC size of the type. We make an exception for padded objects,
+ as we do not take into account alignment promotions for the size.
+ This is in keeping with the object case of gnat_to_gnu_entity. */
+ else if ((TREE_CODE (gnu_prefix) != TYPE_DECL
+ && !(TYPE_IS_PADDING_P (gnu_type)
+ && TREE_CODE (gnu_expr) == COMPONENT_REF))
+ || attribute == Attr_Object_Size
+ || attribute == Attr_Max_Size_In_Storage_Elements)
+ {
+ /* If this is a dereference and we have a special dynamic constrained
+ subtype on the prefix, use it to compute the size; otherwise, use
+ the designated subtype. */
+ if (Nkind (gnat_prefix) == N_Explicit_Dereference)
+ {
+ Node_Id gnat_actual_subtype
+ = Actual_Designated_Subtype (gnat_prefix);
+ tree gnu_ptr_type
+ = TREE_TYPE (gnat_to_gnu (Prefix (gnat_prefix)));
+
+ if (TYPE_IS_FAT_OR_THIN_POINTER_P (gnu_ptr_type)
+ && Present (gnat_actual_subtype))
+ {
+ tree gnu_actual_obj_type
+ = gnat_to_gnu_type (gnat_actual_subtype);
+ gnu_type
+ = build_unc_object_type_from_ptr (gnu_ptr_type,
+ gnu_actual_obj_type,
+ get_identifier ("SIZE"),
+ false);
+ }
+ }
+
+ gnu_result = TYPE_SIZE (gnu_type);
+ }
+
+ /* Otherwise, the result is the RM size of the type. */
+ else
+ gnu_result = rm_size (gnu_type);
+
+ /* Deal with a self-referential size by returning the maximum size for
+ a type and by qualifying the size with the object otherwise. */
+ if (CONTAINS_PLACEHOLDER_P (gnu_result))
+ {
+ if (TREE_CODE (gnu_prefix) == TYPE_DECL)
+ gnu_result = max_size (gnu_result, true);
+ else
+ gnu_result = substitute_placeholder_in_expr (gnu_result, gnu_expr);
+ }
+
+ /* If the type contains a template, subtract its size. */
+ if (TREE_CODE (gnu_type) == RECORD_TYPE
+ && TYPE_CONTAINS_TEMPLATE_P (gnu_type))
+ gnu_result = size_binop (MINUS_EXPR, gnu_result,
+ DECL_SIZE (TYPE_FIELDS (gnu_type)));
+
+ /* For 'Max_Size_In_Storage_Elements, adjust the unit. */
+ if (attribute == Attr_Max_Size_In_Storage_Elements)
+ gnu_result = size_binop (CEIL_DIV_EXPR, gnu_result, bitsize_unit_node);
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ break;
+
+ case Attr_Alignment:
+ {
+ unsigned int align;
+
+ if (TREE_CODE (gnu_prefix) == COMPONENT_REF
+ && TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (gnu_prefix, 0))))
+ gnu_prefix = TREE_OPERAND (gnu_prefix, 0);
+
+ gnu_type = TREE_TYPE (gnu_prefix);
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ prefix_unused = true;
+
+ if (TREE_CODE (gnu_prefix) == COMPONENT_REF)
+ align = DECL_ALIGN (TREE_OPERAND (gnu_prefix, 1)) / BITS_PER_UNIT;
+ else
+ {
+ Entity_Id gnat_type = Etype (gnat_prefix);
+ unsigned int double_align;
+ bool is_capped_double, align_clause;
+
+ /* If the default alignment of "double" or larger scalar types is
+ specifically capped and there is an alignment clause neither
+ on the type nor on the prefix itself, return the cap. */
+ if ((double_align = double_float_alignment) > 0)
+ is_capped_double
+ = is_double_float_or_array (gnat_type, &align_clause);
+ else if ((double_align = double_scalar_alignment) > 0)
+ is_capped_double
+ = is_double_scalar_or_array (gnat_type, &align_clause);
+ else
+ is_capped_double = align_clause = false;
+
+ if (is_capped_double
+ && Nkind (gnat_prefix) == N_Identifier
+ && Present (Alignment_Clause (Entity (gnat_prefix))))
+ align_clause = true;
+
+ if (is_capped_double && !align_clause)
+ align = double_align;
+ else
+ align = TYPE_ALIGN (gnu_type) / BITS_PER_UNIT;
+ }
+
+ gnu_result = size_int (align);
+ }
+ break;
+
+ case Attr_First:
+ case Attr_Last:
+ case Attr_Range_Length:
+ prefix_unused = true;
+
+ if (INTEGRAL_TYPE_P (gnu_type) || TREE_CODE (gnu_type) == REAL_TYPE)
+ {
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ if (attribute == Attr_First)
+ gnu_result = TYPE_MIN_VALUE (gnu_type);
+ else if (attribute == Attr_Last)
+ gnu_result = TYPE_MAX_VALUE (gnu_type);
+ else
+ gnu_result
+ = build_binary_op
+ (MAX_EXPR, get_base_type (gnu_result_type),
+ build_binary_op
+ (PLUS_EXPR, get_base_type (gnu_result_type),
+ build_binary_op (MINUS_EXPR,
+ get_base_type (gnu_result_type),
+ convert (gnu_result_type,
+ TYPE_MAX_VALUE (gnu_type)),
+ convert (gnu_result_type,
+ TYPE_MIN_VALUE (gnu_type))),
+ convert (gnu_result_type, integer_one_node)),
+ convert (gnu_result_type, integer_zero_node));
+
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case Attr_Length:
+ {
+ int Dimension = (Present (Expressions (gnat_node))
+ ? UI_To_Int (Intval (First (Expressions (gnat_node))))
+ : 1), i;
+ struct parm_attr_d *pa = NULL;
+ Entity_Id gnat_param = Empty;
+ bool unconstrained_ptr_deref = false;
+
+ /* Make sure any implicit dereference gets done. */
+ gnu_prefix = maybe_implicit_deref (gnu_prefix);
+ gnu_prefix = maybe_unconstrained_array (gnu_prefix);
+
+ /* We treat unconstrained array In parameters specially. We also note
+ whether we are dereferencing a pointer to unconstrained array. */
+ if (!Is_Constrained (Etype (gnat_prefix)))
+ switch (Nkind (gnat_prefix))
+ {
+ case N_Identifier:
+ /* This is the direct case. */
+ if (Ekind (Entity (gnat_prefix)) == E_In_Parameter)
+ gnat_param = Entity (gnat_prefix);
+ break;
+
+ case N_Explicit_Dereference:
+ /* This is the indirect case. Note that we need to be sure that
+ the access value cannot be null as we'll hoist the load. */
+ if (Nkind (Prefix (gnat_prefix)) == N_Identifier
+ && Ekind (Entity (Prefix (gnat_prefix))) == E_In_Parameter)
+ {
+ if (Can_Never_Be_Null (Entity (Prefix (gnat_prefix))))
+ gnat_param = Entity (Prefix (gnat_prefix));
+ }
+ else
+ unconstrained_ptr_deref = true;
+ break;
+
+ default:
+ break;
+ }
+
+ /* If the prefix is the view conversion of a constrained array to an
+ unconstrained form, we retrieve the constrained array because we
+ might not be able to substitute the PLACEHOLDER_EXPR coming from
+ the conversion. This can occur with the 'Old attribute applied
+ to a parameter with an unconstrained type, which gets rewritten
+ into a constrained local variable very late in the game. */
+ if (TREE_CODE (gnu_prefix) == VIEW_CONVERT_EXPR
+ && CONTAINS_PLACEHOLDER_P (TYPE_SIZE (TREE_TYPE (gnu_prefix)))
+ && !CONTAINS_PLACEHOLDER_P
+ (TYPE_SIZE (TREE_TYPE (TREE_OPERAND (gnu_prefix, 0)))))
+ gnu_type = TREE_TYPE (TREE_OPERAND (gnu_prefix, 0));
+ else
+ gnu_type = TREE_TYPE (gnu_prefix);
+
+ prefix_unused = true;
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ if (TYPE_CONVENTION_FORTRAN_P (gnu_type))
+ {
+ int ndim;
+ tree gnu_type_temp;
+
+ for (ndim = 1, gnu_type_temp = gnu_type;
+ TREE_CODE (TREE_TYPE (gnu_type_temp)) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_type_temp));
+ ndim++, gnu_type_temp = TREE_TYPE (gnu_type_temp))
+ ;
+
+ Dimension = ndim + 1 - Dimension;
+ }
+
+ for (i = 1; i < Dimension; i++)
+ gnu_type = TREE_TYPE (gnu_type);
+
+ gcc_assert (TREE_CODE (gnu_type) == ARRAY_TYPE);
+
+ /* When not optimizing, look up the slot associated with the parameter
+ and the dimension in the cache and create a new one on failure. */
+ if (!optimize && Present (gnat_param))
+ {
+ FOR_EACH_VEC_SAFE_ELT (f_parm_attr_cache, i, pa)
+ if (pa->id == gnat_param && pa->dim == Dimension)
+ break;
+
+ if (!pa)
+ {
+ pa = ggc_alloc_cleared_parm_attr_d ();
+ pa->id = gnat_param;
+ pa->dim = Dimension;
+ vec_safe_push (f_parm_attr_cache, pa);
+ }
+ }
+
+ /* Return the cached expression or build a new one. */
+ if (attribute == Attr_First)
+ {
+ if (pa && pa->first)
+ {
+ gnu_result = pa->first;
+ break;
+ }
+
+ gnu_result
+ = TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type)));
+ }
+
+ else if (attribute == Attr_Last)
+ {
+ if (pa && pa->last)
+ {
+ gnu_result = pa->last;
+ break;
+ }
+
+ gnu_result
+ = TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type)));
+ }
+
+ else /* attribute == Attr_Range_Length || attribute == Attr_Length */
+ {
+ if (pa && pa->length)
+ {
+ gnu_result = pa->length;
+ break;
+ }
+ else
+ {
+ /* We used to compute the length as max (hb - lb + 1, 0),
+ which could overflow for some cases of empty arrays, e.g.
+ when lb == index_type'first. We now compute the length as
+ (hb >= lb) ? hb - lb + 1 : 0, which would only overflow in
+ much rarer cases, for extremely large arrays we expect
+ never to encounter in practice. In addition, the former
+ computation required the use of potentially constraining
+ signed arithmetic while the latter doesn't. Note that
+ the comparison must be done in the original index type,
+ to avoid any overflow during the conversion. */
+ tree comp_type = get_base_type (gnu_result_type);
+ tree index_type = TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type));
+ tree lb = TYPE_MIN_VALUE (index_type);
+ tree hb = TYPE_MAX_VALUE (index_type);
+ gnu_result
+ = build_binary_op (PLUS_EXPR, comp_type,
+ build_binary_op (MINUS_EXPR,
+ comp_type,
+ convert (comp_type, hb),
+ convert (comp_type, lb)),
+ convert (comp_type, integer_one_node));
+ gnu_result
+ = build_cond_expr (comp_type,
+ build_binary_op (GE_EXPR,
+ boolean_type_node,
+ hb, lb),
+ gnu_result,
+ convert (comp_type, integer_zero_node));
+ }
+ }
+
+ /* If this has a PLACEHOLDER_EXPR, qualify it by the object we are
+ handling. Note that these attributes could not have been used on
+ an unconstrained array type. */
+ gnu_result = SUBSTITUTE_PLACEHOLDER_IN_EXPR (gnu_result, gnu_prefix);
+
+ /* Cache the expression we have just computed. Since we want to do it
+ at run time, we force the use of a SAVE_EXPR and let the gimplifier
+ create the temporary in the outermost binding level. We will make
+ sure in Subprogram_Body_to_gnu that it is evaluated on all possible
+ paths by forcing its evaluation on entry of the function. */
+ if (pa)
+ {
+ gnu_result
+ = build1 (SAVE_EXPR, TREE_TYPE (gnu_result), gnu_result);
+ switch (attribute)
+ {
+ case Attr_First:
+ pa->first = gnu_result;
+ break;
+
+ case Attr_Last:
+ pa->last = gnu_result;
+ break;
+
+ case Attr_Length:
+ case Attr_Range_Length:
+ pa->length = gnu_result;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* Otherwise, evaluate it each time it is referenced. */
+ else
+ switch (attribute)
+ {
+ case Attr_First:
+ case Attr_Last:
+ /* If we are dereferencing a pointer to unconstrained array, we
+ need to capture the value because the pointed-to bounds may
+ subsequently be released. */
+ if (unconstrained_ptr_deref)
+ gnu_result
+ = build1 (SAVE_EXPR, TREE_TYPE (gnu_result), gnu_result);
+ break;
+
+ case Attr_Length:
+ case Attr_Range_Length:
+ /* Set the source location onto the predicate of the condition
+ but not if the expression is cached to avoid messing up the
+ debug info. */
+ if (TREE_CODE (gnu_result) == COND_EXPR
+ && EXPR_P (TREE_OPERAND (gnu_result, 0)))
+ set_expr_location_from_node (TREE_OPERAND (gnu_result, 0),
+ gnat_node);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ break;
+ }
+
+ case Attr_Bit_Position:
+ case Attr_Position:
+ case Attr_First_Bit:
+ case Attr_Last_Bit:
+ case Attr_Bit:
+ {
+ HOST_WIDE_INT bitsize;
+ HOST_WIDE_INT bitpos;
+ tree gnu_offset;
+ tree gnu_field_bitpos;
+ tree gnu_field_offset;
+ tree gnu_inner;
+ enum machine_mode mode;
+ int unsignedp, volatilep;
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_prefix = remove_conversions (gnu_prefix, true);
+ prefix_unused = true;
+
+ /* We can have 'Bit on any object, but if it isn't a COMPONENT_REF,
+ the result is 0. Don't allow 'Bit on a bare component, though. */
+ if (attribute == Attr_Bit
+ && TREE_CODE (gnu_prefix) != COMPONENT_REF
+ && TREE_CODE (gnu_prefix) != FIELD_DECL)
+ {
+ gnu_result = integer_zero_node;
+ break;
+ }
+
+ else
+ gcc_assert (TREE_CODE (gnu_prefix) == COMPONENT_REF
+ || (attribute == Attr_Bit_Position
+ && TREE_CODE (gnu_prefix) == FIELD_DECL));
+
+ get_inner_reference (gnu_prefix, &bitsize, &bitpos, &gnu_offset,
+ &mode, &unsignedp, &volatilep, false);
+
+ if (TREE_CODE (gnu_prefix) == COMPONENT_REF)
+ {
+ gnu_field_bitpos = bit_position (TREE_OPERAND (gnu_prefix, 1));
+ gnu_field_offset = byte_position (TREE_OPERAND (gnu_prefix, 1));
+
+ for (gnu_inner = TREE_OPERAND (gnu_prefix, 0);
+ TREE_CODE (gnu_inner) == COMPONENT_REF
+ && DECL_INTERNAL_P (TREE_OPERAND (gnu_inner, 1));
+ gnu_inner = TREE_OPERAND (gnu_inner, 0))
+ {
+ gnu_field_bitpos
+ = size_binop (PLUS_EXPR, gnu_field_bitpos,
+ bit_position (TREE_OPERAND (gnu_inner, 1)));
+ gnu_field_offset
+ = size_binop (PLUS_EXPR, gnu_field_offset,
+ byte_position (TREE_OPERAND (gnu_inner, 1)));
+ }
+ }
+ else if (TREE_CODE (gnu_prefix) == FIELD_DECL)
+ {
+ gnu_field_bitpos = bit_position (gnu_prefix);
+ gnu_field_offset = byte_position (gnu_prefix);
+ }
+ else
+ {
+ gnu_field_bitpos = bitsize_zero_node;
+ gnu_field_offset = size_zero_node;
+ }
+
+ switch (attribute)
+ {
+ case Attr_Position:
+ gnu_result = gnu_field_offset;
+ break;
+
+ case Attr_First_Bit:
+ case Attr_Bit:
+ gnu_result = size_int (bitpos % BITS_PER_UNIT);
+ break;
+
+ case Attr_Last_Bit:
+ gnu_result = bitsize_int (bitpos % BITS_PER_UNIT);
+ gnu_result = size_binop (PLUS_EXPR, gnu_result,
+ TYPE_SIZE (TREE_TYPE (gnu_prefix)));
+ /* ??? Avoid a large unsigned result that will overflow when
+ converted to the signed universal_integer. */
+ if (integer_zerop (gnu_result))
+ gnu_result = integer_minus_one_node;
+ else
+ gnu_result
+ = size_binop (MINUS_EXPR, gnu_result, bitsize_one_node);
+ break;
+
+ case Attr_Bit_Position:
+ gnu_result = gnu_field_bitpos;
+ break;
+ }
+
+ /* If this has a PLACEHOLDER_EXPR, qualify it by the object we are
+ handling. */
+ gnu_result = SUBSTITUTE_PLACEHOLDER_IN_EXPR (gnu_result, gnu_prefix);
+ break;
+ }
+
+ case Attr_Min:
+ case Attr_Max:
+ {
+ tree gnu_lhs = gnat_to_gnu (First (Expressions (gnat_node)));
+ tree gnu_rhs = gnat_to_gnu (Next (First (Expressions (gnat_node))));
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_result = build_binary_op (attribute == Attr_Min
+ ? MIN_EXPR : MAX_EXPR,
+ gnu_result_type, gnu_lhs, gnu_rhs);
+ }
+ break;
+
+ case Attr_Passed_By_Reference:
+ gnu_result = size_int (default_pass_by_ref (gnu_type)
+ || must_pass_by_ref (gnu_type));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ break;
+
+ case Attr_Component_Size:
+ if (TREE_CODE (gnu_prefix) == COMPONENT_REF
+ && TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (gnu_prefix, 0))))
+ gnu_prefix = TREE_OPERAND (gnu_prefix, 0);
+
+ gnu_prefix = maybe_implicit_deref (gnu_prefix);
+ gnu_type = TREE_TYPE (gnu_prefix);
+
+ if (TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE)
+ gnu_type = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_type))));
+
+ while (TREE_CODE (TREE_TYPE (gnu_type)) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_type)))
+ gnu_type = TREE_TYPE (gnu_type);
+
+ gcc_assert (TREE_CODE (gnu_type) == ARRAY_TYPE);
+
+ /* Note this size cannot be self-referential. */
+ gnu_result = TYPE_SIZE (TREE_TYPE (gnu_type));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ prefix_unused = true;
+ break;
+
+ case Attr_Descriptor_Size:
+ gnu_type = TREE_TYPE (gnu_prefix);
+ gcc_assert (TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE);
+
+ /* What we want is the offset of the ARRAY field in the record
+ that the thin pointer designates. */
+ gnu_type = TYPE_OBJECT_RECORD_TYPE (gnu_type);
+ gnu_result = bit_position (DECL_CHAIN (TYPE_FIELDS (gnu_type)));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ prefix_unused = true;
+ break;
+
+ case Attr_Null_Parameter:
+ /* This is just a zero cast to the pointer type for our prefix and
+ dereferenced. */
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_result
+ = build_unary_op (INDIRECT_REF, NULL_TREE,
+ convert (build_pointer_type (gnu_result_type),
+ integer_zero_node));
+ TREE_PRIVATE (gnu_result) = 1;
+ break;
+
+ case Attr_Mechanism_Code:
+ {
+ Entity_Id gnat_obj = Entity (gnat_prefix);
+ int code;
+
+ prefix_unused = true;
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ if (Present (Expressions (gnat_node)))
+ {
+ int i = UI_To_Int (Intval (First (Expressions (gnat_node))));
+
+ for (gnat_obj = First_Formal (gnat_obj); i > 1;
+ i--, gnat_obj = Next_Formal (gnat_obj))
+ ;
+ }
+
+ code = Mechanism (gnat_obj);
+ if (code == Default)
+ code = ((present_gnu_tree (gnat_obj)
+ && (DECL_BY_REF_P (get_gnu_tree (gnat_obj))
+ || ((TREE_CODE (get_gnu_tree (gnat_obj))
+ == PARM_DECL)
+ && (DECL_BY_COMPONENT_PTR_P
+ (get_gnu_tree (gnat_obj))))))
+ ? By_Reference : By_Copy);
+ gnu_result = convert (gnu_result_type, size_int (- code));
+ }
+ break;
+
+ default:
+ /* This abort means that we have an unimplemented attribute. */
+ gcc_unreachable ();
+ }
+
+ /* If this is an attribute where the prefix was unused, force a use of it if
+ it has a side-effect. But don't do it if the prefix is just an entity
+ name. However, if an access check is needed, we must do it. See second
+ example in AARM 11.6(5.e). */
+ if (prefix_unused
+ && TREE_SIDE_EFFECTS (gnu_prefix)
+ && !Is_Entity_Name (gnat_prefix))
+ gnu_result
+ = build_compound_expr (TREE_TYPE (gnu_result), gnu_prefix, gnu_result);
+
+ *gnu_result_type_p = gnu_result_type;
+ return gnu_result;
+}
+
+/* Subroutine of gnat_to_gnu to translate gnat_node, an N_Case_Statement,
+ to a GCC tree, which is returned. */
+
+static tree
+Case_Statement_to_gnu (Node_Id gnat_node)
+{
+ tree gnu_result, gnu_expr, gnu_label;
+ Node_Id gnat_when;
+ location_t end_locus;
+ bool may_fallthru = false;
+
+ gnu_expr = gnat_to_gnu (Expression (gnat_node));
+ gnu_expr = convert (get_base_type (TREE_TYPE (gnu_expr)), gnu_expr);
+
+ /* The range of values in a case statement is determined by the rules in
+ RM 5.4(7-9). In almost all cases, this range is represented by the Etype
+ of the expression. One exception arises in the case of a simple name that
+ is parenthesized. This still has the Etype of the name, but since it is
+ not a name, para 7 does not apply, and we need to go to the base type.
+ This is the only case where parenthesization affects the dynamic
+ semantics (i.e. the range of possible values at run time that is covered
+ by the others alternative).
+
+ Another exception is if the subtype of the expression is non-static. In
+ that case, we also have to use the base type. */
+ if (Paren_Count (Expression (gnat_node)) != 0
+ || !Is_OK_Static_Subtype (Underlying_Type
+ (Etype (Expression (gnat_node)))))
+ gnu_expr = convert (get_base_type (TREE_TYPE (gnu_expr)), gnu_expr);
+
+ /* We build a SWITCH_EXPR that contains the code with interspersed
+ CASE_LABEL_EXPRs for each label. */
+ if (!Sloc_to_locus (Sloc (gnat_node) + UI_To_Int (End_Span (gnat_node)),
+ &end_locus))
+ end_locus = input_location;
+ gnu_label = create_artificial_label (end_locus);
+ start_stmt_group ();
+
+ for (gnat_when = First_Non_Pragma (Alternatives (gnat_node));
+ Present (gnat_when);
+ gnat_when = Next_Non_Pragma (gnat_when))
+ {
+ bool choices_added_p = false;
+ Node_Id gnat_choice;
+
+ /* First compile all the different case choices for the current WHEN
+ alternative. */
+ for (gnat_choice = First (Discrete_Choices (gnat_when));
+ Present (gnat_choice); gnat_choice = Next (gnat_choice))
+ {
+ tree gnu_low = NULL_TREE, gnu_high = NULL_TREE;
+
+ switch (Nkind (gnat_choice))
+ {
+ case N_Range:
+ gnu_low = gnat_to_gnu (Low_Bound (gnat_choice));
+ gnu_high = gnat_to_gnu (High_Bound (gnat_choice));
+ break;
+
+ case N_Subtype_Indication:
+ gnu_low = gnat_to_gnu (Low_Bound (Range_Expression
+ (Constraint (gnat_choice))));
+ gnu_high = gnat_to_gnu (High_Bound (Range_Expression
+ (Constraint (gnat_choice))));
+ break;
+
+ case N_Identifier:
+ case N_Expanded_Name:
+ /* This represents either a subtype range or a static value of
+ some kind; Ekind says which. */
+ if (IN (Ekind (Entity (gnat_choice)), Type_Kind))
+ {
+ tree gnu_type = get_unpadded_type (Entity (gnat_choice));
+
+ gnu_low = fold (TYPE_MIN_VALUE (gnu_type));
+ gnu_high = fold (TYPE_MAX_VALUE (gnu_type));
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case N_Character_Literal:
+ case N_Integer_Literal:
+ gnu_low = gnat_to_gnu (gnat_choice);
+ break;
+
+ case N_Others_Choice:
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* If the case value is a subtype that raises Constraint_Error at
+ run time because of a wrong bound, then gnu_low or gnu_high is
+ not translated into an INTEGER_CST. In such a case, we need
+ to ensure that the when statement is not added in the tree,
+ otherwise it will crash the gimplifier. */
+ if ((!gnu_low || TREE_CODE (gnu_low) == INTEGER_CST)
+ && (!gnu_high || TREE_CODE (gnu_high) == INTEGER_CST))
+ {
+ add_stmt_with_node (build_case_label
+ (gnu_low, gnu_high,
+ create_artificial_label (input_location)),
+ gnat_choice);
+ choices_added_p = true;
+ }
+ }
+
+ /* This construct doesn't define a scope so we shouldn't push a binding
+ level around the statement list. Except that we have always done so
+ historically and this makes it possible to reduce stack usage. As a
+ compromise, we keep doing it for case statements, for which this has
+ never been problematic, but not for case expressions in Ada 2012. */
+ if (choices_added_p)
+ {
+ const bool is_case_expression
+ = (Nkind (Parent (gnat_node)) == N_Expression_With_Actions);
+ tree group
+ = build_stmt_group (Statements (gnat_when), !is_case_expression);
+ bool group_may_fallthru = block_may_fallthru (group);
+ add_stmt (group);
+ if (group_may_fallthru)
+ {
+ tree stmt = build1 (GOTO_EXPR, void_type_node, gnu_label);
+ SET_EXPR_LOCATION (stmt, end_locus);
+ add_stmt (stmt);
+ may_fallthru = true;
+ }
+ }
+ }
+
+ /* Now emit a definition of the label the cases branch to, if any. */
+ if (may_fallthru)
+ add_stmt (build1 (LABEL_EXPR, void_type_node, gnu_label));
+ gnu_result = build3 (SWITCH_EXPR, TREE_TYPE (gnu_expr), gnu_expr,
+ end_stmt_group (), NULL_TREE);
+
+ return gnu_result;
+}
+
+/* Find out whether VAR is an iteration variable of an enclosing loop in the
+ current function. If so, push a range_check_info structure onto the stack
+ of this enclosing loop and return it. Otherwise, return NULL. */
+
+static struct range_check_info_d *
+push_range_check_info (tree var)
+{
+ struct loop_info_d *iter = NULL;
+ unsigned int i;
+
+ if (vec_safe_is_empty (gnu_loop_stack))
+ return NULL;
+
+ var = remove_conversions (var, false);
+
+ if (TREE_CODE (var) != VAR_DECL)
+ return NULL;
+
+ if (decl_function_context (var) != current_function_decl)
+ return NULL;
+
+ for (i = vec_safe_length (gnu_loop_stack) - 1;
+ vec_safe_iterate (gnu_loop_stack, i, &iter);
+ i--)
+ if (var == iter->loop_var)
+ break;
+
+ if (iter)
+ {
+ struct range_check_info_d *rci = ggc_alloc_range_check_info_d ();
+ vec_safe_push (iter->checks, rci);
+ return rci;
+ }
+
+ return NULL;
+}
+
+/* Return true if VAL (of type TYPE) can equal the minimum value if MAX is
+ false, or the maximum value if MAX is true, of TYPE. */
+
+static bool
+can_equal_min_or_max_val_p (tree val, tree type, bool max)
+{
+ tree min_or_max_val = (max ? TYPE_MAX_VALUE (type) : TYPE_MIN_VALUE (type));
+
+ if (TREE_CODE (min_or_max_val) != INTEGER_CST)
+ return true;
+
+ if (TREE_CODE (val) == NOP_EXPR)
+ val = (max
+ ? TYPE_MAX_VALUE (TREE_TYPE (TREE_OPERAND (val, 0)))
+ : TYPE_MIN_VALUE (TREE_TYPE (TREE_OPERAND (val, 0))));
+
+ if (TREE_CODE (val) != INTEGER_CST)
+ return true;
+
+ if (max)
+ return tree_int_cst_lt (val, min_or_max_val) == 0;
+ else
+ return tree_int_cst_lt (min_or_max_val, val) == 0;
+}
+
+/* Return true if VAL (of type TYPE) can equal the minimum value of TYPE.
+ If REVERSE is true, minimum value is taken as maximum value. */
+
+static inline bool
+can_equal_min_val_p (tree val, tree type, bool reverse)
+{
+ return can_equal_min_or_max_val_p (val, type, reverse);
+}
+
+/* Return true if VAL (of type TYPE) can equal the maximum value of TYPE.
+ If REVERSE is true, maximum value is taken as minimum value. */
+
+static inline bool
+can_equal_max_val_p (tree val, tree type, bool reverse)
+{
+ return can_equal_min_or_max_val_p (val, type, !reverse);
+}
+
+/* Return true if VAL1 can be lower than VAL2. */
+
+static bool
+can_be_lower_p (tree val1, tree val2)
+{
+ if (TREE_CODE (val1) == NOP_EXPR)
+ val1 = TYPE_MIN_VALUE (TREE_TYPE (TREE_OPERAND (val1, 0)));
+
+ if (TREE_CODE (val1) != INTEGER_CST)
+ return true;
+
+ if (TREE_CODE (val2) == NOP_EXPR)
+ val2 = TYPE_MAX_VALUE (TREE_TYPE (TREE_OPERAND (val2, 0)));
+
+ if (TREE_CODE (val2) != INTEGER_CST)
+ return true;
+
+ return tree_int_cst_lt (val1, val2);
+}
+
+/* Subroutine of gnat_to_gnu to translate gnat_node, an N_Loop_Statement,
+ to a GCC tree, which is returned. */
+
+static tree
+Loop_Statement_to_gnu (Node_Id gnat_node)
+{
+ const Node_Id gnat_iter_scheme = Iteration_Scheme (gnat_node);
+ struct loop_info_d *gnu_loop_info = ggc_alloc_cleared_loop_info_d ();
+ tree gnu_loop_stmt = build4 (LOOP_STMT, void_type_node, NULL_TREE,
+ NULL_TREE, NULL_TREE, NULL_TREE);
+ tree gnu_loop_label = create_artificial_label (input_location);
+ tree gnu_cond_expr = NULL_TREE, gnu_low = NULL_TREE, gnu_high = NULL_TREE;
+ tree gnu_result;
+
+ /* Push the loop_info structure associated with the LOOP_STMT. */
+ vec_safe_push (gnu_loop_stack, gnu_loop_info);
+
+ /* Set location information for statement and end label. */
+ set_expr_location_from_node (gnu_loop_stmt, gnat_node);
+ Sloc_to_locus (Sloc (End_Label (gnat_node)),
+ &DECL_SOURCE_LOCATION (gnu_loop_label));
+ LOOP_STMT_LABEL (gnu_loop_stmt) = gnu_loop_label;
+
+ /* Save the statement for later reuse. */
+ gnu_loop_info->stmt = gnu_loop_stmt;
+
+ /* Set the condition under which the loop must keep going.
+ For the case "LOOP .... END LOOP;" the condition is always true. */
+ if (No (gnat_iter_scheme))
+ ;
+
+ /* For the case "WHILE condition LOOP ..... END LOOP;" it's immediate. */
+ else if (Present (Condition (gnat_iter_scheme)))
+ LOOP_STMT_COND (gnu_loop_stmt)
+ = gnat_to_gnu (Condition (gnat_iter_scheme));
+
+ /* Otherwise we have an iteration scheme and the condition is given by the
+ bounds of the subtype of the iteration variable. */
+ else
+ {
+ Node_Id gnat_loop_spec = Loop_Parameter_Specification (gnat_iter_scheme);
+ Entity_Id gnat_loop_var = Defining_Entity (gnat_loop_spec);
+ Entity_Id gnat_type = Etype (gnat_loop_var);
+ tree gnu_type = get_unpadded_type (gnat_type);
+ tree gnu_base_type = get_base_type (gnu_type);
+ tree gnu_one_node = convert (gnu_base_type, integer_one_node);
+ tree gnu_loop_var, gnu_loop_iv, gnu_first, gnu_last, gnu_stmt;
+ enum tree_code update_code, test_code, shift_code;
+ bool reverse = Reverse_Present (gnat_loop_spec), use_iv = false;
+
+ gnu_low = TYPE_MIN_VALUE (gnu_type);
+ gnu_high = TYPE_MAX_VALUE (gnu_type);
+
+ /* We must disable modulo reduction for the iteration variable, if any,
+ in order for the loop comparison to be effective. */
+ if (reverse)
+ {
+ gnu_first = gnu_high;
+ gnu_last = gnu_low;
+ update_code = MINUS_NOMOD_EXPR;
+ test_code = GE_EXPR;
+ shift_code = PLUS_NOMOD_EXPR;
+ }
+ else
+ {
+ gnu_first = gnu_low;
+ gnu_last = gnu_high;
+ update_code = PLUS_NOMOD_EXPR;
+ test_code = LE_EXPR;
+ shift_code = MINUS_NOMOD_EXPR;
+ }
+
+ /* We use two different strategies to translate the loop, depending on
+ whether optimization is enabled.
+
+ If it is, we generate the canonical loop form expected by the loop
+ optimizer and the loop vectorizer, which is the do-while form:
+
+ ENTRY_COND
+ loop:
+ TOP_UPDATE
+ BODY
+ BOTTOM_COND
+ GOTO loop
+
+ This avoids an implicit dependency on loop header copying and makes
+ it possible to turn BOTTOM_COND into an inequality test.
+
+ If optimization is disabled, loop header copying doesn't come into
+ play and we try to generate the loop form with the fewer conditional
+ branches. First, the default form, which is:
+
+ loop:
+ TOP_COND
+ BODY
+ BOTTOM_UPDATE
+ GOTO loop
+
+ It should catch most loops with constant ending point. Then, if we
+ cannot, we try to generate the shifted form:
+
+ loop:
+ TOP_COND
+ TOP_UPDATE
+ BODY
+ GOTO loop
+
+ which should catch loops with constant starting point. Otherwise, if
+ we cannot, we generate the fallback form:
+
+ ENTRY_COND
+ loop:
+ BODY
+ BOTTOM_COND
+ BOTTOM_UPDATE
+ GOTO loop
+
+ which works in all cases. */
+
+ if (optimize)
+ {
+ /* We can use the do-while form directly if GNU_FIRST-1 doesn't
+ overflow. */
+ if (!can_equal_min_val_p (gnu_first, gnu_base_type, reverse))
+ ;
+
+ /* Otherwise, use the do-while form with the help of a special
+ induction variable in the unsigned version of the base type
+ or the unsigned version of the size type, whichever is the
+ largest, in order to have wrap-around arithmetics for it. */
+ else
+ {
+ if (TYPE_PRECISION (gnu_base_type)
+ > TYPE_PRECISION (size_type_node))
+ gnu_base_type
+ = gnat_type_for_size (TYPE_PRECISION (gnu_base_type), 1);
+ else
+ gnu_base_type = size_type_node;
+
+ gnu_first = convert (gnu_base_type, gnu_first);
+ gnu_last = convert (gnu_base_type, gnu_last);
+ gnu_one_node = convert (gnu_base_type, integer_one_node);
+ use_iv = true;
+ }
+
+ gnu_first
+ = build_binary_op (shift_code, gnu_base_type, gnu_first,
+ gnu_one_node);
+ LOOP_STMT_TOP_UPDATE_P (gnu_loop_stmt) = 1;
+ LOOP_STMT_BOTTOM_COND_P (gnu_loop_stmt) = 1;
+ }
+ else
+ {
+ /* We can use the default form if GNU_LAST+1 doesn't overflow. */
+ if (!can_equal_max_val_p (gnu_last, gnu_base_type, reverse))
+ ;
+
+ /* Otherwise, we can use the shifted form if neither GNU_FIRST-1 nor
+ GNU_LAST-1 does. */
+ else if (!can_equal_min_val_p (gnu_first, gnu_base_type, reverse)
+ && !can_equal_min_val_p (gnu_last, gnu_base_type, reverse))
+ {
+ gnu_first
+ = build_binary_op (shift_code, gnu_base_type, gnu_first,
+ gnu_one_node);
+ gnu_last
+ = build_binary_op (shift_code, gnu_base_type, gnu_last,
+ gnu_one_node);
+ LOOP_STMT_TOP_UPDATE_P (gnu_loop_stmt) = 1;
+ }
+
+ /* Otherwise, use the fallback form. */
+ else
+ LOOP_STMT_BOTTOM_COND_P (gnu_loop_stmt) = 1;
+ }
+
+ /* If we use the BOTTOM_COND, we can turn the test into an inequality
+ test but we may have to add ENTRY_COND to protect the empty loop. */
+ if (LOOP_STMT_BOTTOM_COND_P (gnu_loop_stmt))
+ {
+ test_code = NE_EXPR;
+ if (can_be_lower_p (gnu_high, gnu_low))
+ {
+ gnu_cond_expr
+ = build3 (COND_EXPR, void_type_node,
+ build_binary_op (LE_EXPR, boolean_type_node,
+ gnu_low, gnu_high),
+ NULL_TREE, alloc_stmt_list ());
+ set_expr_location_from_node (gnu_cond_expr, gnat_loop_spec);
+ }
+ }
+
+ /* Open a new nesting level that will surround the loop to declare the
+ iteration variable. */
+ start_stmt_group ();
+ gnat_pushlevel ();
+
+ /* If we use the special induction variable, create it and set it to
+ its initial value. Morever, the regular iteration variable cannot
+ itself be initialized, lest the initial value wrapped around. */
+ if (use_iv)
+ {
+ gnu_loop_iv
+ = create_init_temporary ("I", gnu_first, &gnu_stmt, gnat_loop_var);
+ add_stmt (gnu_stmt);
+ gnu_first = NULL_TREE;
+ }
+ else
+ gnu_loop_iv = NULL_TREE;
+
+ /* Declare the iteration variable and set it to its initial value. */
+ gnu_loop_var = gnat_to_gnu_entity (gnat_loop_var, gnu_first, 1);
+ if (DECL_BY_REF_P (gnu_loop_var))
+ gnu_loop_var = build_unary_op (INDIRECT_REF, NULL_TREE, gnu_loop_var);
+ else if (use_iv)
+ {
+ gcc_assert (DECL_LOOP_PARM_P (gnu_loop_var));
+ SET_DECL_INDUCTION_VAR (gnu_loop_var, gnu_loop_iv);
+ }
+ gnu_loop_info->loop_var = gnu_loop_var;
+
+ /* Do all the arithmetics in the base type. */
+ gnu_loop_var = convert (gnu_base_type, gnu_loop_var);
+
+ /* Set either the top or bottom exit condition. */
+ if (use_iv)
+ LOOP_STMT_COND (gnu_loop_stmt)
+ = build_binary_op (test_code, boolean_type_node, gnu_loop_iv,
+ gnu_last);
+ else
+ LOOP_STMT_COND (gnu_loop_stmt)
+ = build_binary_op (test_code, boolean_type_node, gnu_loop_var,
+ gnu_last);
+
+ /* Set either the top or bottom update statement and give it the source
+ location of the iteration for better coverage info. */
+ if (use_iv)
+ {
+ gnu_stmt
+ = build_binary_op (MODIFY_EXPR, NULL_TREE, gnu_loop_iv,
+ build_binary_op (update_code, gnu_base_type,
+ gnu_loop_iv, gnu_one_node));
+ set_expr_location_from_node (gnu_stmt, gnat_iter_scheme);
+ append_to_statement_list (gnu_stmt,
+ &LOOP_STMT_UPDATE (gnu_loop_stmt));
+ gnu_stmt
+ = build_binary_op (MODIFY_EXPR, NULL_TREE, gnu_loop_var,
+ gnu_loop_iv);
+ set_expr_location_from_node (gnu_stmt, gnat_iter_scheme);
+ append_to_statement_list (gnu_stmt,
+ &LOOP_STMT_UPDATE (gnu_loop_stmt));
+ }
+ else
+ {
+ gnu_stmt
+ = build_binary_op (MODIFY_EXPR, NULL_TREE, gnu_loop_var,
+ build_binary_op (update_code, gnu_base_type,
+ gnu_loop_var, gnu_one_node));
+ set_expr_location_from_node (gnu_stmt, gnat_iter_scheme);
+ LOOP_STMT_UPDATE (gnu_loop_stmt) = gnu_stmt;
+ }
+ }
+
+ /* If the loop was named, have the name point to this loop. In this case,
+ the association is not a DECL node, but the end label of the loop. */
+ if (Present (Identifier (gnat_node)))
+ save_gnu_tree (Entity (Identifier (gnat_node)), gnu_loop_label, true);
+
+ /* Make the loop body into its own block, so any allocated storage will be
+ released every iteration. This is needed for stack allocation. */
+ LOOP_STMT_BODY (gnu_loop_stmt)
+ = build_stmt_group (Statements (gnat_node), true);
+ TREE_SIDE_EFFECTS (gnu_loop_stmt) = 1;
+
+ /* If we have an iteration scheme, then we are in a statement group. Add
+ the LOOP_STMT to it, finish it and make it the "loop". */
+ if (Present (gnat_iter_scheme) && No (Condition (gnat_iter_scheme)))
+ {
+ struct range_check_info_d *rci;
+ unsigned n_checks = vec_safe_length (gnu_loop_info->checks);
+ unsigned int i;
+
+ /* First, if we have computed a small number of invariant conditions for
+ range checks applied to the iteration variable, then initialize these
+ conditions in front of the loop. Otherwise, leave them set to true.
+
+ ??? The heuristics need to be improved, by taking into account the
+ following datapoints:
+ - loop unswitching is disabled for big loops. The cap is the
+ parameter PARAM_MAX_UNSWITCH_INSNS (50).
+ - loop unswitching can only be applied a small number of times
+ to a given loop. The cap is PARAM_MAX_UNSWITCH_LEVEL (3).
+ - the front-end quickly generates useless or redundant checks
+ that can be entirely optimized away in the end. */
+ if (1 <= n_checks && n_checks <= 4)
+ for (i = 0;
+ vec_safe_iterate (gnu_loop_info->checks, i, &rci);
+ i++)
+ {
+ tree low_ok
+ = rci->low_bound
+ ? build_binary_op (GE_EXPR, boolean_type_node,
+ convert (rci->type, gnu_low),
+ rci->low_bound)
+ : boolean_true_node;
+
+ tree high_ok
+ = rci->high_bound
+ ? build_binary_op (LE_EXPR, boolean_type_node,
+ convert (rci->type, gnu_high),
+ rci->high_bound)
+ : boolean_true_node;
+
+ tree range_ok
+ = build_binary_op (TRUTH_ANDIF_EXPR, boolean_type_node,
+ low_ok, high_ok);
+
+ TREE_OPERAND (rci->invariant_cond, 0)
+ = build_unary_op (TRUTH_NOT_EXPR, boolean_type_node, range_ok);
+
+ add_stmt_with_node_force (rci->invariant_cond, gnat_node);
+ }
+
+ add_stmt (gnu_loop_stmt);
+ gnat_poplevel ();
+ gnu_loop_stmt = end_stmt_group ();
+ }
+
+ /* If we have an outer COND_EXPR, that's our result and this loop is its
+ "true" statement. Otherwise, the result is the LOOP_STMT. */
+ if (gnu_cond_expr)
+ {
+ COND_EXPR_THEN (gnu_cond_expr) = gnu_loop_stmt;
+ TREE_SIDE_EFFECTS (gnu_cond_expr) = 1;
+ gnu_result = gnu_cond_expr;
+ }
+ else
+ gnu_result = gnu_loop_stmt;
+
+ gnu_loop_stack->pop ();
+
+ return gnu_result;
+}
+
+/* Emit statements to establish __gnat_handle_vms_condition as a VMS condition
+ handler for the current function. */
+
+/* This is implemented by issuing a call to the appropriate VMS specific
+ builtin. To avoid having VMS specific sections in the global gigi decls
+ array, we maintain the decls of interest here. We can't declare them
+ inside the function because we must mark them never to be GC'd, which we
+ can only do at the global level. */
+
+static GTY(()) tree vms_builtin_establish_handler_decl = NULL_TREE;
+static GTY(()) tree gnat_vms_condition_handler_decl = NULL_TREE;
+
+static void
+establish_gnat_vms_condition_handler (void)
+{
+ tree establish_stmt;
+
+ /* Elaborate the required decls on the first call. Check on the decl for
+ the gnat condition handler to decide, as this is one we create so we are
+ sure that it will be non null on subsequent calls. The builtin decl is
+ looked up so remains null on targets where it is not implemented yet. */
+ if (gnat_vms_condition_handler_decl == NULL_TREE)
+ {
+ vms_builtin_establish_handler_decl
+ = builtin_decl_for
+ (get_identifier ("__builtin_establish_vms_condition_handler"));
+
+ gnat_vms_condition_handler_decl
+ = create_subprog_decl (get_identifier ("__gnat_handle_vms_condition"),
+ NULL_TREE,
+ build_function_type_list (boolean_type_node,
+ ptr_void_type_node,
+ ptr_void_type_node,
+ NULL_TREE),
+ NULL_TREE, is_disabled, true, true, true, NULL,
+ Empty);
+
+ /* ??? DECL_CONTEXT shouldn't have been set because of DECL_EXTERNAL. */
+ DECL_CONTEXT (gnat_vms_condition_handler_decl) = NULL_TREE;
+ }
+
+ /* Do nothing if the establish builtin is not available, which might happen
+ on targets where the facility is not implemented. */
+ if (vms_builtin_establish_handler_decl == NULL_TREE)
+ return;
+
+ establish_stmt
+ = build_call_n_expr (vms_builtin_establish_handler_decl, 1,
+ build_unary_op
+ (ADDR_EXPR, NULL_TREE,
+ gnat_vms_condition_handler_decl));
+
+ add_stmt (establish_stmt);
+}
+
+/* This page implements a form of Named Return Value optimization modelled
+ on the C++ optimization of the same name. The main difference is that
+ we disregard any semantical considerations when applying it here, the
+ counterpart being that we don't try to apply it to semantically loaded
+ return types, i.e. types with the TYPE_BY_REFERENCE_P flag set.
+
+ We consider a function body of the following GENERIC form:
+
+ return_type R1;
+ [...]
+ RETURN_EXPR [<retval> = ...]
+ [...]
+ RETURN_EXPR [<retval> = R1]
+ [...]
+ return_type Ri;
+ [...]
+ RETURN_EXPR [<retval> = ...]
+ [...]
+ RETURN_EXPR [<retval> = Ri]
+ [...]
+
+ and we try to fulfill a simple criterion that would make it possible to
+ replace one or several Ri variables with the RESULT_DECL of the function.
+
+ The first observation is that RETURN_EXPRs that don't directly reference
+ any of the Ri variables on the RHS of their assignment are transparent wrt
+ the optimization. This is because the Ri variables aren't addressable so
+ any transformation applied to them doesn't affect the RHS; moreover, the
+ assignment writes the full <retval> object so existing values are entirely
+ discarded.
+
+ This property can be extended to some forms of RETURN_EXPRs that reference
+ the Ri variables, for example CONSTRUCTORs, but isn't true in the general
+ case, in particular when function calls are involved.
+
+ Therefore the algorithm is as follows:
+
+ 1. Collect the list of candidates for a Named Return Value (Ri variables
+ on the RHS of assignments of RETURN_EXPRs) as well as the list of the
+ other expressions on the RHS of such assignments.
+
+ 2. Prune the members of the first list (candidates) that are referenced
+ by a member of the second list (expressions).
+
+ 3. Extract a set of candidates with non-overlapping live ranges from the
+ first list. These are the Named Return Values.
+
+ 4. Adjust the relevant RETURN_EXPRs and replace the occurrences of the
+ Named Return Values in the function with the RESULT_DECL.
+
+ If the function returns an unconstrained type, things are a bit different
+ because the anonymous return object is allocated on the secondary stack
+ and RESULT_DECL is only a pointer to it. Each return object can be of a
+ different size and is allocated separately so we need not care about the
+ aforementioned overlapping issues. Therefore, we don't collect the other
+ expressions and skip step #2 in the algorithm. */
+
+struct nrv_data
+{
+ bitmap nrv;
+ tree result;
+ Node_Id gnat_ret;
+ struct pointer_set_t *visited;
+};
+
+/* Return true if T is a Named Return Value. */
+
+static inline bool
+is_nrv_p (bitmap nrv, tree t)
+{
+ return TREE_CODE (t) == VAR_DECL && bitmap_bit_p (nrv, DECL_UID (t));
+}
+
+/* Helper function for walk_tree, used by finalize_nrv below. */
+
+static tree
+prune_nrv_r (tree *tp, int *walk_subtrees, void *data)
+{
+ struct nrv_data *dp = (struct nrv_data *)data;
+ tree t = *tp;
+
+ /* No need to walk into types or decls. */
+ if (IS_TYPE_OR_DECL_P (t))
+ *walk_subtrees = 0;
+
+ if (is_nrv_p (dp->nrv, t))
+ bitmap_clear_bit (dp->nrv, DECL_UID (t));
+
+ return NULL_TREE;
+}
+
+/* Prune Named Return Values in BLOCK and return true if there is still a
+ Named Return Value in BLOCK or one of its sub-blocks. */
+
+static bool
+prune_nrv_in_block (bitmap nrv, tree block)
+{
+ bool has_nrv = false;
+ tree t;
+
+ /* First recurse on the sub-blocks. */
+ for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t))
+ has_nrv |= prune_nrv_in_block (nrv, t);
+
+ /* Then make sure to keep at most one NRV per block. */
+ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
+ if (is_nrv_p (nrv, t))
+ {
+ if (has_nrv)
+ bitmap_clear_bit (nrv, DECL_UID (t));
+ else
+ has_nrv = true;
+ }
+
+ return has_nrv;
+}
+
+/* Helper function for walk_tree, used by finalize_nrv below. */
+
+static tree
+finalize_nrv_r (tree *tp, int *walk_subtrees, void *data)
+{
+ struct nrv_data *dp = (struct nrv_data *)data;
+ tree t = *tp;
+
+ /* No need to walk into types. */
+ if (TYPE_P (t))
+ *walk_subtrees = 0;
+
+ /* Change RETURN_EXPRs of NRVs to just refer to the RESULT_DECL; this is a
+ nop, but differs from using NULL_TREE in that it indicates that we care
+ about the value of the RESULT_DECL. */
+ else if (TREE_CODE (t) == RETURN_EXPR
+ && TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR)
+ {
+ tree ret_val = TREE_OPERAND (TREE_OPERAND (t, 0), 1), init_expr;
+
+ /* If this is the temporary created for a return value with variable
+ size in Call_to_gnu, we replace the RHS with the init expression. */
+ if (TREE_CODE (ret_val) == COMPOUND_EXPR
+ && TREE_CODE (TREE_OPERAND (ret_val, 0)) == INIT_EXPR
+ && TREE_OPERAND (TREE_OPERAND (ret_val, 0), 0)
+ == TREE_OPERAND (ret_val, 1))
+ {
+ init_expr = TREE_OPERAND (TREE_OPERAND (ret_val, 0), 1);
+ ret_val = TREE_OPERAND (ret_val, 1);
+ }
+ else
+ init_expr = NULL_TREE;
+
+ /* Strip useless conversions around the return value. */
+ if (gnat_useless_type_conversion (ret_val))
+ ret_val = TREE_OPERAND (ret_val, 0);
+
+ if (is_nrv_p (dp->nrv, ret_val))
+ {
+ if (init_expr)
+ TREE_OPERAND (TREE_OPERAND (t, 0), 1) = init_expr;
+ else
+ TREE_OPERAND (t, 0) = dp->result;
+ }
+ }
+
+ /* Replace the DECL_EXPR of NRVs with an initialization of the RESULT_DECL,
+ if needed. */
+ else if (TREE_CODE (t) == DECL_EXPR
+ && is_nrv_p (dp->nrv, DECL_EXPR_DECL (t)))
+ {
+ tree var = DECL_EXPR_DECL (t), init;
+
+ if (DECL_INITIAL (var))
+ {
+ init = build_binary_op (INIT_EXPR, NULL_TREE, dp->result,
+ DECL_INITIAL (var));
+ SET_EXPR_LOCATION (init, EXPR_LOCATION (t));
+ DECL_INITIAL (var) = NULL_TREE;
+ }
+ else
+ init = build_empty_stmt (EXPR_LOCATION (t));
+ *tp = init;
+
+ /* Identify the NRV to the RESULT_DECL for debugging purposes. */
+ SET_DECL_VALUE_EXPR (var, dp->result);
+ DECL_HAS_VALUE_EXPR_P (var) = 1;
+ /* ??? Kludge to avoid an assertion failure during inlining. */
+ DECL_SIZE (var) = bitsize_unit_node;
+ DECL_SIZE_UNIT (var) = size_one_node;
+ }
+
+ /* And replace all uses of NRVs with the RESULT_DECL. */
+ else if (is_nrv_p (dp->nrv, t))
+ *tp = convert (TREE_TYPE (t), dp->result);
+
+ /* Avoid walking into the same tree more than once. Unfortunately, we
+ can't just use walk_tree_without_duplicates because it would only
+ call us for the first occurrence of NRVs in the function body. */
+ if (pointer_set_insert (dp->visited, *tp))
+ *walk_subtrees = 0;
+
+ return NULL_TREE;
+}
+
+/* Likewise, but used when the function returns an unconstrained type. */
+
+static tree
+finalize_nrv_unc_r (tree *tp, int *walk_subtrees, void *data)
+{
+ struct nrv_data *dp = (struct nrv_data *)data;
+ tree t = *tp;
+
+ /* No need to walk into types. */
+ if (TYPE_P (t))
+ *walk_subtrees = 0;
+
+ /* We need to see the DECL_EXPR of NRVs before any other references so we
+ walk the body of BIND_EXPR before walking its variables. */
+ else if (TREE_CODE (t) == BIND_EXPR)
+ walk_tree (&BIND_EXPR_BODY (t), finalize_nrv_unc_r, data, NULL);
+
+ /* Change RETURN_EXPRs of NRVs to assign to the RESULT_DECL only the final
+ return value built by the allocator instead of the whole construct. */
+ else if (TREE_CODE (t) == RETURN_EXPR
+ && TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR)
+ {
+ tree ret_val = TREE_OPERAND (TREE_OPERAND (t, 0), 1);
+
+ /* This is the construct returned by the allocator. */
+ if (TREE_CODE (ret_val) == COMPOUND_EXPR
+ && TREE_CODE (TREE_OPERAND (ret_val, 0)) == INIT_EXPR)
+ {
+ if (TYPE_IS_FAT_POINTER_P (TREE_TYPE (ret_val)))
+ ret_val
+ = (*CONSTRUCTOR_ELTS (TREE_OPERAND (TREE_OPERAND (ret_val, 0),
+ 1)))[1].value;
+ else
+ ret_val = TREE_OPERAND (TREE_OPERAND (ret_val, 0), 1);
+ }
+
+ /* Strip useless conversions around the return value. */
+ if (gnat_useless_type_conversion (ret_val)
+ || TREE_CODE (ret_val) == VIEW_CONVERT_EXPR)
+ ret_val = TREE_OPERAND (ret_val, 0);
+
+ /* Strip unpadding around the return value. */
+ if (TREE_CODE (ret_val) == COMPONENT_REF
+ && TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (ret_val, 0))))
+ ret_val = TREE_OPERAND (ret_val, 0);
+
+ /* Assign the new return value to the RESULT_DECL. */
+ if (is_nrv_p (dp->nrv, ret_val))
+ TREE_OPERAND (TREE_OPERAND (t, 0), 1)
+ = TREE_OPERAND (DECL_INITIAL (ret_val), 0);
+ }
+
+ /* Adjust the DECL_EXPR of NRVs to call the allocator and save the result
+ into a new variable. */
+ else if (TREE_CODE (t) == DECL_EXPR
+ && is_nrv_p (dp->nrv, DECL_EXPR_DECL (t)))
+ {
+ tree saved_current_function_decl = current_function_decl;
+ tree var = DECL_EXPR_DECL (t);
+ tree alloc, p_array, new_var, new_ret;
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 2);
+
+ /* Create an artificial context to build the allocation. */
+ current_function_decl = decl_function_context (var);
+ start_stmt_group ();
+ gnat_pushlevel ();
+
+ /* This will return a COMPOUND_EXPR with the allocation in the first
+ arm and the final return value in the second arm. */
+ alloc = build_allocator (TREE_TYPE (var), DECL_INITIAL (var),
+ TREE_TYPE (dp->result),
+ Procedure_To_Call (dp->gnat_ret),
+ Storage_Pool (dp->gnat_ret),
+ Empty, false);
+
+ /* The new variable is built as a reference to the allocated space. */
+ new_var
+ = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, DECL_NAME (var),
+ build_reference_type (TREE_TYPE (var)));
+ DECL_BY_REFERENCE (new_var) = 1;
+
+ if (TYPE_IS_FAT_POINTER_P (TREE_TYPE (alloc)))
+ {
+ /* The new initial value is a COMPOUND_EXPR with the allocation in
+ the first arm and the value of P_ARRAY in the second arm. */
+ DECL_INITIAL (new_var)
+ = build2 (COMPOUND_EXPR, TREE_TYPE (new_var),
+ TREE_OPERAND (alloc, 0),
+ (*CONSTRUCTOR_ELTS (TREE_OPERAND (alloc, 1)))[0].value);
+
+ /* Build a modified CONSTRUCTOR that references NEW_VAR. */
+ p_array = TYPE_FIELDS (TREE_TYPE (alloc));
+ CONSTRUCTOR_APPEND_ELT (v, p_array,
+ fold_convert (TREE_TYPE (p_array), new_var));
+ CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (p_array),
+ (*CONSTRUCTOR_ELTS (
+ TREE_OPERAND (alloc, 1)))[1].value);
+ new_ret = build_constructor (TREE_TYPE (alloc), v);
+ }
+ else
+ {
+ /* The new initial value is just the allocation. */
+ DECL_INITIAL (new_var) = alloc;
+ new_ret = fold_convert (TREE_TYPE (alloc), new_var);
+ }
+
+ gnat_pushdecl (new_var, Empty);
+
+ /* Destroy the artificial context and insert the new statements. */
+ gnat_zaplevel ();
+ *tp = end_stmt_group ();
+ current_function_decl = saved_current_function_decl;
+
+ /* Chain NEW_VAR immediately after VAR and ignore the latter. */
+ DECL_CHAIN (new_var) = DECL_CHAIN (var);
+ DECL_CHAIN (var) = new_var;
+ DECL_IGNORED_P (var) = 1;
+
+ /* Save the new return value and the dereference of NEW_VAR. */
+ DECL_INITIAL (var)
+ = build2 (COMPOUND_EXPR, TREE_TYPE (var), new_ret,
+ build1 (INDIRECT_REF, TREE_TYPE (var), new_var));
+ /* ??? Kludge to avoid messing up during inlining. */
+ DECL_CONTEXT (var) = NULL_TREE;
+ }
+
+ /* And replace all uses of NRVs with the dereference of NEW_VAR. */
+ else if (is_nrv_p (dp->nrv, t))
+ *tp = TREE_OPERAND (DECL_INITIAL (t), 1);
+
+ /* Avoid walking into the same tree more than once. Unfortunately, we
+ can't just use walk_tree_without_duplicates because it would only
+ call us for the first occurrence of NRVs in the function body. */
+ if (pointer_set_insert (dp->visited, *tp))
+ *walk_subtrees = 0;
+
+ return NULL_TREE;
+}
+
+/* Finalize the Named Return Value optimization for FNDECL. The NRV bitmap
+ contains the candidates for Named Return Value and OTHER is a list of
+ the other return values. GNAT_RET is a representative return node. */
+
+static void
+finalize_nrv (tree fndecl, bitmap nrv, vec<tree, va_gc> *other, Node_Id gnat_ret)
+{
+ struct cgraph_node *node;
+ struct nrv_data data;
+ walk_tree_fn func;
+ unsigned int i;
+ tree iter;
+
+ /* We shouldn't be applying the optimization to return types that we aren't
+ allowed to manipulate freely. */
+ gcc_assert (!TYPE_IS_BY_REFERENCE_P (TREE_TYPE (TREE_TYPE (fndecl))));
+
+ /* Prune the candidates that are referenced by other return values. */
+ data.nrv = nrv;
+ data.result = NULL_TREE;
+ data.visited = NULL;
+ for (i = 0; vec_safe_iterate (other, i, &iter); i++)
+ walk_tree_without_duplicates (&iter, prune_nrv_r, &data);
+ if (bitmap_empty_p (nrv))
+ return;
+
+ /* Prune also the candidates that are referenced by nested functions. */
+ node = cgraph_get_create_node (fndecl);
+ for (node = node->nested; node; node = node->next_nested)
+ walk_tree_without_duplicates (&DECL_SAVED_TREE (node->decl), prune_nrv_r,
+ &data);
+ if (bitmap_empty_p (nrv))
+ return;
+
+ /* Extract a set of NRVs with non-overlapping live ranges. */
+ if (!prune_nrv_in_block (nrv, DECL_INITIAL (fndecl)))
+ return;
+
+ /* Adjust the relevant RETURN_EXPRs and replace the occurrences of NRVs. */
+ data.nrv = nrv;
+ data.result = DECL_RESULT (fndecl);
+ data.gnat_ret = gnat_ret;
+ data.visited = pointer_set_create ();
+ if (TYPE_RETURN_UNCONSTRAINED_P (TREE_TYPE (fndecl)))
+ func = finalize_nrv_unc_r;
+ else
+ func = finalize_nrv_r;
+ walk_tree (&DECL_SAVED_TREE (fndecl), func, &data, NULL);
+ pointer_set_destroy (data.visited);
+}
+
+/* Return true if RET_VAL can be used as a Named Return Value for the
+ anonymous return object RET_OBJ. */
+
+static bool
+return_value_ok_for_nrv_p (tree ret_obj, tree ret_val)
+{
+ if (TREE_CODE (ret_val) != VAR_DECL)
+ return false;
+
+ if (TREE_THIS_VOLATILE (ret_val))
+ return false;
+
+ if (DECL_CONTEXT (ret_val) != current_function_decl)
+ return false;
+
+ if (TREE_STATIC (ret_val))
+ return false;
+
+ if (TREE_ADDRESSABLE (ret_val))
+ return false;
+
+ if (ret_obj && DECL_ALIGN (ret_val) > DECL_ALIGN (ret_obj))
+ return false;
+
+ return true;
+}
+
+/* Build a RETURN_EXPR. If RET_VAL is non-null, build a RETURN_EXPR around
+ the assignment of RET_VAL to RET_OBJ. Otherwise build a bare RETURN_EXPR
+ around RESULT_OBJ, which may be null in this case. */
+
+static tree
+build_return_expr (tree ret_obj, tree ret_val)
+{
+ tree result_expr;
+
+ if (ret_val)
+ {
+ /* The gimplifier explicitly enforces the following invariant:
+
+ RETURN_EXPR
+ |
+ MODIFY_EXPR
+ / \
+ / \
+ RET_OBJ ...
+
+ As a consequence, type consistency dictates that we use the type
+ of the RET_OBJ as the operation type. */
+ tree operation_type = TREE_TYPE (ret_obj);
+
+ /* Convert the right operand to the operation type. Note that it's the
+ same transformation as in the MODIFY_EXPR case of build_binary_op,
+ with the assumption that the type cannot involve a placeholder. */
+ if (operation_type != TREE_TYPE (ret_val))
+ ret_val = convert (operation_type, ret_val);
+
+ result_expr = build2 (MODIFY_EXPR, void_type_node, ret_obj, ret_val);
+
+ /* If the function returns an aggregate type, find out whether this is
+ a candidate for Named Return Value. If so, record it. Otherwise,
+ if this is an expression of some kind, record it elsewhere. */
+ if (optimize
+ && AGGREGATE_TYPE_P (operation_type)
+ && !TYPE_IS_FAT_POINTER_P (operation_type)
+ && TYPE_MODE (operation_type) == BLKmode
+ && aggregate_value_p (operation_type, current_function_decl))
+ {
+ /* Recognize the temporary created for a return value with variable
+ size in Call_to_gnu. We want to eliminate it if possible. */
+ if (TREE_CODE (ret_val) == COMPOUND_EXPR
+ && TREE_CODE (TREE_OPERAND (ret_val, 0)) == INIT_EXPR
+ && TREE_OPERAND (TREE_OPERAND (ret_val, 0), 0)
+ == TREE_OPERAND (ret_val, 1))
+ ret_val = TREE_OPERAND (ret_val, 1);
+
+ /* Strip useless conversions around the return value. */
+ if (gnat_useless_type_conversion (ret_val))
+ ret_val = TREE_OPERAND (ret_val, 0);
+
+ /* Now apply the test to the return value. */
+ if (return_value_ok_for_nrv_p (ret_obj, ret_val))
+ {
+ if (!f_named_ret_val)
+ f_named_ret_val = BITMAP_GGC_ALLOC ();
+ bitmap_set_bit (f_named_ret_val, DECL_UID (ret_val));
+ }
+
+ /* Note that we need not care about CONSTRUCTORs here, as they are
+ totally transparent given the read-compose-write semantics of
+ assignments from CONSTRUCTORs. */
+ else if (EXPR_P (ret_val))
+ vec_safe_push (f_other_ret_val, ret_val);
+ }
+ }
+ else
+ result_expr = ret_obj;
+
+ return build1 (RETURN_EXPR, void_type_node, result_expr);
+}
+
+/* Build a stub for the subprogram specified by the GCC tree GNU_SUBPROG
+ and the GNAT node GNAT_SUBPROG. */
+
+static void
+build_function_stub (tree gnu_subprog, Entity_Id gnat_subprog)
+{
+ tree gnu_subprog_type, gnu_subprog_addr, gnu_subprog_call;
+ tree gnu_subprog_param, gnu_stub_param, gnu_param;
+ tree gnu_stub_decl = DECL_FUNCTION_STUB (gnu_subprog);
+ vec<tree, va_gc> *gnu_param_vec = NULL;
+
+ gnu_subprog_type = TREE_TYPE (gnu_subprog);
+
+ /* Initialize the information structure for the function. */
+ allocate_struct_function (gnu_stub_decl, false);
+ set_cfun (NULL);
+
+ begin_subprog_body (gnu_stub_decl);
+
+ start_stmt_group ();
+ gnat_pushlevel ();
+
+ /* Loop over the parameters of the stub and translate any of them
+ passed by descriptor into a by reference one. */
+ for (gnu_stub_param = DECL_ARGUMENTS (gnu_stub_decl),
+ gnu_subprog_param = DECL_ARGUMENTS (gnu_subprog);
+ gnu_stub_param;
+ gnu_stub_param = DECL_CHAIN (gnu_stub_param),
+ gnu_subprog_param = DECL_CHAIN (gnu_subprog_param))
+ {
+ if (DECL_BY_DESCRIPTOR_P (gnu_stub_param))
+ {
+ gcc_assert (DECL_BY_REF_P (gnu_subprog_param));
+ gnu_param
+ = convert_vms_descriptor (TREE_TYPE (gnu_subprog_param),
+ gnu_stub_param,
+ DECL_PARM_ALT_TYPE (gnu_stub_param),
+ gnat_subprog);
+ }
+ else
+ gnu_param = gnu_stub_param;
+
+ vec_safe_push (gnu_param_vec, gnu_param);
+ }
+
+ /* Invoke the internal subprogram. */
+ gnu_subprog_addr = build1 (ADDR_EXPR, build_pointer_type (gnu_subprog_type),
+ gnu_subprog);
+ gnu_subprog_call = build_call_vec (TREE_TYPE (gnu_subprog_type),
+ gnu_subprog_addr, gnu_param_vec);
+
+ /* Propagate the return value, if any. */
+ if (VOID_TYPE_P (TREE_TYPE (gnu_subprog_type)))
+ add_stmt (gnu_subprog_call);
+ else
+ add_stmt (build_return_expr (DECL_RESULT (gnu_stub_decl),
+ gnu_subprog_call));
+
+ gnat_poplevel ();
+ end_subprog_body (end_stmt_group ());
+ rest_of_subprog_body_compilation (gnu_stub_decl);
+}
+
+/* Subroutine of gnat_to_gnu to process gnat_node, an N_Subprogram_Body. We
+ don't return anything. */
+
+static void
+Subprogram_Body_to_gnu (Node_Id gnat_node)
+{
+ /* Defining identifier of a parameter to the subprogram. */
+ Entity_Id gnat_param;
+ /* The defining identifier for the subprogram body. Note that if a
+ specification has appeared before for this body, then the identifier
+ occurring in that specification will also be a defining identifier and all
+ the calls to this subprogram will point to that specification. */
+ Entity_Id gnat_subprog_id
+ = (Present (Corresponding_Spec (gnat_node))
+ ? Corresponding_Spec (gnat_node) : Defining_Entity (gnat_node));
+ /* The FUNCTION_DECL node corresponding to the subprogram spec. */
+ tree gnu_subprog_decl;
+ /* Its RESULT_DECL node. */
+ tree gnu_result_decl;
+ /* Its FUNCTION_TYPE node. */
+ tree gnu_subprog_type;
+ /* The TYPE_CI_CO_LIST of its FUNCTION_TYPE node, if any. */
+ tree gnu_cico_list;
+ /* The entry in the CI_CO_LIST that represents a function return, if any. */
+ tree gnu_return_var_elmt = NULL_TREE;
+ tree gnu_result;
+ struct language_function *gnu_subprog_language;
+ vec<parm_attr, va_gc> *cache;
+
+ /* If this is a generic object or if it has been eliminated,
+ ignore it. */
+ if (Ekind (gnat_subprog_id) == E_Generic_Procedure
+ || Ekind (gnat_subprog_id) == E_Generic_Function
+ || Is_Eliminated (gnat_subprog_id))
+ return;
+
+ /* If this subprogram acts as its own spec, define it. Otherwise, just get
+ the already-elaborated tree node. However, if this subprogram had its
+ elaboration deferred, we will already have made a tree node for it. So
+ treat it as not being defined in that case. Such a subprogram cannot
+ have an address clause or a freeze node, so this test is safe, though it
+ does disable some otherwise-useful error checking. */
+ gnu_subprog_decl
+ = gnat_to_gnu_entity (gnat_subprog_id, NULL_TREE,
+ Acts_As_Spec (gnat_node)
+ && !present_gnu_tree (gnat_subprog_id));
+ gnu_result_decl = DECL_RESULT (gnu_subprog_decl);
+ gnu_subprog_type = TREE_TYPE (gnu_subprog_decl);
+ gnu_cico_list = TYPE_CI_CO_LIST (gnu_subprog_type);
+ if (gnu_cico_list)
+ gnu_return_var_elmt = value_member (void_type_node, gnu_cico_list);
+
+ /* If the function returns by invisible reference, make it explicit in the
+ function body. See gnat_to_gnu_entity, E_Subprogram_Type case.
+ Handle the explicit case here and the copy-in/copy-out case below. */
+ if (TREE_ADDRESSABLE (gnu_subprog_type) && !gnu_return_var_elmt)
+ {
+ TREE_TYPE (gnu_result_decl)
+ = build_reference_type (TREE_TYPE (gnu_result_decl));
+ relayout_decl (gnu_result_decl);
+ }
+
+ /* Set the line number in the decl to correspond to that of the body so that
+ the line number notes are written correctly. */
+ Sloc_to_locus (Sloc (gnat_node), &DECL_SOURCE_LOCATION (gnu_subprog_decl));
+
+ /* Initialize the information structure for the function. */
+ allocate_struct_function (gnu_subprog_decl, false);
+ gnu_subprog_language = ggc_alloc_cleared_language_function ();
+ DECL_STRUCT_FUNCTION (gnu_subprog_decl)->language = gnu_subprog_language;
+ set_cfun (NULL);
+
+ begin_subprog_body (gnu_subprog_decl);
+
+ /* If there are In Out or Out parameters, we need to ensure that the return
+ statement properly copies them out. We do this by making a new block and
+ converting any return into a goto to a label at the end of the block. */
+ if (gnu_cico_list)
+ {
+ tree gnu_return_var = NULL_TREE;
+
+ vec_safe_push (gnu_return_label_stack,
+ create_artificial_label (input_location));
+
+ start_stmt_group ();
+ gnat_pushlevel ();
+
+ /* If this is a function with In Out or Out parameters, we also need a
+ variable for the return value to be placed. */
+ if (gnu_return_var_elmt)
+ {
+ tree gnu_return_type
+ = TREE_TYPE (TREE_PURPOSE (gnu_return_var_elmt));
+
+ /* If the function returns by invisible reference, make it
+ explicit in the function body. See gnat_to_gnu_entity,
+ E_Subprogram_Type case. */
+ if (TREE_ADDRESSABLE (gnu_subprog_type))
+ gnu_return_type = build_reference_type (gnu_return_type);
+
+ gnu_return_var
+ = create_var_decl (get_identifier ("RETVAL"), NULL_TREE,
+ gnu_return_type, NULL_TREE, false, false,
+ false, false, NULL, gnat_subprog_id);
+ TREE_VALUE (gnu_return_var_elmt) = gnu_return_var;
+ }
+
+ vec_safe_push (gnu_return_var_stack, gnu_return_var);
+
+ /* See whether there are parameters for which we don't have a GCC tree
+ yet. These must be Out parameters. Make a VAR_DECL for them and
+ put it into TYPE_CI_CO_LIST, which must contain an empty entry too.
+ We can match up the entries because TYPE_CI_CO_LIST is in the order
+ of the parameters. */
+ for (gnat_param = First_Formal_With_Extras (gnat_subprog_id);
+ Present (gnat_param);
+ gnat_param = Next_Formal_With_Extras (gnat_param))
+ if (!present_gnu_tree (gnat_param))
+ {
+ tree gnu_cico_entry = gnu_cico_list;
+ tree gnu_decl;
+
+ /* Skip any entries that have been already filled in; they must
+ correspond to In Out parameters. */
+ while (gnu_cico_entry && TREE_VALUE (gnu_cico_entry))
+ gnu_cico_entry = TREE_CHAIN (gnu_cico_entry);
+
+ /* Do any needed dereferences for by-ref objects. */
+ gnu_decl = gnat_to_gnu_entity (gnat_param, NULL_TREE, 1);
+ gcc_assert (DECL_P (gnu_decl));
+ if (DECL_BY_REF_P (gnu_decl))
+ gnu_decl = build_unary_op (INDIRECT_REF, NULL_TREE, gnu_decl);
+
+ /* Do any needed references for padded types. */
+ TREE_VALUE (gnu_cico_entry)
+ = convert (TREE_TYPE (TREE_PURPOSE (gnu_cico_entry)), gnu_decl);
+ }
+ }
+ else
+ vec_safe_push (gnu_return_label_stack, NULL_TREE);
+
+ /* Get a tree corresponding to the code for the subprogram. */
+ start_stmt_group ();
+ gnat_pushlevel ();
+
+ /* On VMS, establish our condition handler to possibly turn a condition into
+ the corresponding exception if the subprogram has a foreign convention or
+ is exported.
+
+ To ensure proper execution of local finalizations on condition instances,
+ we must turn a condition into the corresponding exception even if there
+ is no applicable Ada handler, and need at least one condition handler per
+ possible call chain involving GNAT code. OTOH, establishing the handler
+ has a cost so we want to minimize the number of subprograms into which
+ this happens. The foreign or exported condition is expected to satisfy
+ all the constraints. */
+ if (TARGET_ABI_OPEN_VMS
+ && (Has_Foreign_Convention (gnat_subprog_id)
+ || Is_Exported (gnat_subprog_id)))
+ establish_gnat_vms_condition_handler ();
+
+ process_decls (Declarations (gnat_node), Empty, Empty, true, true);
+
+ /* Generate the code of the subprogram itself. A return statement will be
+ present and any Out parameters will be handled there. */
+ add_stmt (gnat_to_gnu (Handled_Statement_Sequence (gnat_node)));
+ gnat_poplevel ();
+ gnu_result = end_stmt_group ();
+
+ /* If we populated the parameter attributes cache, we need to make sure that
+ the cached expressions are evaluated on all the possible paths leading to
+ their uses. So we force their evaluation on entry of the function. */
+ cache = gnu_subprog_language->parm_attr_cache;
+ if (cache)
+ {
+ struct parm_attr_d *pa;
+ int i;
+
+ start_stmt_group ();
+
+ FOR_EACH_VEC_ELT (*cache, i, pa)
+ {
+ if (pa->first)
+ add_stmt_with_node_force (pa->first, gnat_node);
+ if (pa->last)
+ add_stmt_with_node_force (pa->last, gnat_node);
+ if (pa->length)
+ add_stmt_with_node_force (pa->length, gnat_node);
+ }
+
+ add_stmt (gnu_result);
+ gnu_result = end_stmt_group ();
+
+ gnu_subprog_language->parm_attr_cache = NULL;
+ }
+
+ /* If we are dealing with a return from an Ada procedure with parameters
+ passed by copy-in/copy-out, we need to return a record containing the
+ final values of these parameters. If the list contains only one entry,
+ return just that entry though.
+
+ For a full description of the copy-in/copy-out parameter mechanism, see
+ the part of the gnat_to_gnu_entity routine dealing with the translation
+ of subprograms.
+
+ We need to make a block that contains the definition of that label and
+ the copying of the return value. It first contains the function, then
+ the label and copy statement. */
+ if (gnu_cico_list)
+ {
+ tree gnu_retval;
+
+ gnu_return_var_stack->pop ();
+
+ add_stmt (gnu_result);
+ add_stmt (build1 (LABEL_EXPR, void_type_node,
+ gnu_return_label_stack->last ()));
+
+ if (list_length (gnu_cico_list) == 1)
+ gnu_retval = TREE_VALUE (gnu_cico_list);
+ else
+ gnu_retval = build_constructor_from_list (TREE_TYPE (gnu_subprog_type),
+ gnu_cico_list);
+
+ add_stmt_with_node (build_return_expr (gnu_result_decl, gnu_retval),
+ End_Label (Handled_Statement_Sequence (gnat_node)));
+ gnat_poplevel ();
+ gnu_result = end_stmt_group ();
+ }
+
+ gnu_return_label_stack->pop ();
+
+ /* Attempt setting the end_locus of our GCC body tree, typically a
+ BIND_EXPR or STATEMENT_LIST, then the end_locus of our GCC subprogram
+ declaration tree. */
+ set_end_locus_from_node (gnu_result, gnat_node);
+ set_end_locus_from_node (gnu_subprog_decl, gnat_node);
+
+ /* On SEH targets, install an exception handler around the main entry
+ point to catch unhandled exceptions. */
+ if (DECL_NAME (gnu_subprog_decl) == main_identifier_node
+ && targetm_common.except_unwind_info (&global_options) == UI_SEH)
+ {
+ tree t;
+ tree etype;
+
+ t = build_call_expr (builtin_decl_explicit (BUILT_IN_EH_POINTER),
+ 1, integer_zero_node);
+ t = build_call_n_expr (unhandled_except_decl, 1, t);
+
+ etype = build_unary_op (ADDR_EXPR, NULL_TREE, unhandled_others_decl);
+ etype = tree_cons (NULL_TREE, etype, NULL_TREE);
+
+ t = build2 (CATCH_EXPR, void_type_node, etype, t);
+ gnu_result = build2 (TRY_CATCH_EXPR, TREE_TYPE (gnu_result),
+ gnu_result, t);
+ }
+
+ end_subprog_body (gnu_result);
+
+ /* Finally annotate the parameters and disconnect the trees for parameters
+ that we have turned into variables since they are now unusable. */
+ for (gnat_param = First_Formal_With_Extras (gnat_subprog_id);
+ Present (gnat_param);
+ gnat_param = Next_Formal_With_Extras (gnat_param))
+ {
+ tree gnu_param = get_gnu_tree (gnat_param);
+ bool is_var_decl = (TREE_CODE (gnu_param) == VAR_DECL);
+
+ annotate_object (gnat_param, TREE_TYPE (gnu_param), NULL_TREE,
+ DECL_BY_REF_P (gnu_param));
+
+ if (is_var_decl)
+ save_gnu_tree (gnat_param, NULL_TREE, false);
+ }
+
+ /* Disconnect the variable created for the return value. */
+ if (gnu_return_var_elmt)
+ TREE_VALUE (gnu_return_var_elmt) = void_type_node;
+
+ /* If the function returns an aggregate type and we have candidates for
+ a Named Return Value, finalize the optimization. */
+ if (optimize && gnu_subprog_language->named_ret_val)
+ {
+ finalize_nrv (gnu_subprog_decl,
+ gnu_subprog_language->named_ret_val,
+ gnu_subprog_language->other_ret_val,
+ gnu_subprog_language->gnat_ret);
+ gnu_subprog_language->named_ret_val = NULL;
+ gnu_subprog_language->other_ret_val = NULL;
+ }
+
+ rest_of_subprog_body_compilation (gnu_subprog_decl);
+
+ /* If there is a stub associated with the function, build it now. */
+ if (DECL_FUNCTION_STUB (gnu_subprog_decl))
+ build_function_stub (gnu_subprog_decl, gnat_subprog_id);
+}
+
+/* Return true if GNAT_NODE requires atomic synchronization. */
+
+static bool
+atomic_sync_required_p (Node_Id gnat_node)
+{
+ const Node_Id gnat_parent = Parent (gnat_node);
+ Node_Kind kind;
+ unsigned char attr_id;
+
+ /* First, scan the node to find the Atomic_Sync_Required flag. */
+ kind = Nkind (gnat_node);
+ if (kind == N_Type_Conversion || kind == N_Unchecked_Type_Conversion)
+ {
+ gnat_node = Expression (gnat_node);
+ kind = Nkind (gnat_node);
+ }
+
+ switch (kind)
+ {
+ case N_Expanded_Name:
+ case N_Explicit_Dereference:
+ case N_Identifier:
+ case N_Indexed_Component:
+ case N_Selected_Component:
+ if (!Atomic_Sync_Required (gnat_node))
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+
+ /* Then, scan the parent to find out cases where the flag is irrelevant. */
+ kind = Nkind (gnat_parent);
+ switch (kind)
+ {
+ case N_Attribute_Reference:
+ attr_id = Get_Attribute_Id (Attribute_Name (gnat_parent));
+ /* Do not mess up machine code insertions. */
+ if (attr_id == Attr_Asm_Input || attr_id == Attr_Asm_Output)
+ return false;
+ break;
+
+ case N_Object_Renaming_Declaration:
+ /* Do not generate a function call as a renamed object. */
+ return false;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+/* Create a temporary variable with PREFIX and TYPE, and return it. */
+
+static tree
+create_temporary (const char *prefix, tree type)
+{
+ tree gnu_temp = create_var_decl (create_tmp_var_name (prefix), NULL_TREE,
+ type, NULL_TREE, false, false, false, false,
+ NULL, Empty);
+ DECL_ARTIFICIAL (gnu_temp) = 1;
+ DECL_IGNORED_P (gnu_temp) = 1;
+
+ return gnu_temp;
+}
+
+/* Create a temporary variable with PREFIX and initialize it with GNU_INIT.
+ Put the initialization statement into GNU_INIT_STMT and annotate it with
+ the SLOC of GNAT_NODE. Return the temporary variable. */
+
+static tree
+create_init_temporary (const char *prefix, tree gnu_init, tree *gnu_init_stmt,
+ Node_Id gnat_node)
+{
+ tree gnu_temp = create_temporary (prefix, TREE_TYPE (gnu_init));
+
+ *gnu_init_stmt = build_binary_op (INIT_EXPR, NULL_TREE, gnu_temp, gnu_init);
+ set_expr_location_from_node (*gnu_init_stmt, gnat_node);
+
+ return gnu_temp;
+}
+
+/* Subroutine of gnat_to_gnu to translate gnat_node, either an N_Function_Call
+ or an N_Procedure_Call_Statement, to a GCC tree, which is returned.
+ GNU_RESULT_TYPE_P is a pointer to where we should place the result type.
+ If GNU_TARGET is non-null, this must be a function call on the RHS of a
+ N_Assignment_Statement and the result is to be placed into that object.
+ If, in addition, ATOMIC_SYNC is true, then the assignment to GNU_TARGET
+ requires atomic synchronization. */
+
+static tree
+Call_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, tree gnu_target,
+ bool atomic_sync)
+{
+ const bool function_call = (Nkind (gnat_node) == N_Function_Call);
+ const bool returning_value = (function_call && !gnu_target);
+ /* The GCC node corresponding to the GNAT subprogram name. This can either
+ be a FUNCTION_DECL node if we are dealing with a standard subprogram call,
+ or an indirect reference expression (an INDIRECT_REF node) pointing to a
+ subprogram. */
+ tree gnu_subprog = gnat_to_gnu (Name (gnat_node));
+ /* The FUNCTION_TYPE node giving the GCC type of the subprogram. */
+ tree gnu_subprog_type = TREE_TYPE (gnu_subprog);
+ /* The return type of the FUNCTION_TYPE. */
+ tree gnu_result_type = TREE_TYPE (gnu_subprog_type);
+ tree gnu_subprog_addr = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_subprog);
+ vec<tree, va_gc> *gnu_actual_vec = NULL;
+ tree gnu_name_list = NULL_TREE;
+ tree gnu_stmt_list = NULL_TREE;
+ tree gnu_after_list = NULL_TREE;
+ tree gnu_retval = NULL_TREE;
+ tree gnu_call, gnu_result;
+ bool went_into_elab_proc = false;
+ bool pushed_binding_level = false;
+ Entity_Id gnat_formal;
+ Node_Id gnat_actual;
+
+ gcc_assert (TREE_CODE (gnu_subprog_type) == FUNCTION_TYPE);
+
+ /* If we are calling a stubbed function, raise Program_Error, but Elaborate
+ all our args first. */
+ if (TREE_CODE (gnu_subprog) == FUNCTION_DECL && DECL_STUBBED_P (gnu_subprog))
+ {
+ tree call_expr = build_call_raise (PE_Stubbed_Subprogram_Called,
+ gnat_node, N_Raise_Program_Error);
+
+ for (gnat_actual = First_Actual (gnat_node);
+ Present (gnat_actual);
+ gnat_actual = Next_Actual (gnat_actual))
+ add_stmt (gnat_to_gnu (gnat_actual));
+
+ if (returning_value)
+ {
+ *gnu_result_type_p = gnu_result_type;
+ return build1 (NULL_EXPR, gnu_result_type, call_expr);
+ }
+
+ return call_expr;
+ }
+
+ /* The only way we can be making a call via an access type is if Name is an
+ explicit dereference. In that case, get the list of formal args from the
+ type the access type is pointing to. Otherwise, get the formals from the
+ entity being called. */
+ if (Nkind (Name (gnat_node)) == N_Explicit_Dereference)
+ gnat_formal = First_Formal_With_Extras (Etype (Name (gnat_node)));
+ else if (Nkind (Name (gnat_node)) == N_Attribute_Reference)
+ /* Assume here that this must be 'Elab_Body or 'Elab_Spec. */
+ gnat_formal = Empty;
+ else
+ gnat_formal = First_Formal_With_Extras (Entity (Name (gnat_node)));
+
+ /* The lifetime of the temporaries created for the call ends right after the
+ return value is copied, so we can give them the scope of the elaboration
+ routine at top level. */
+ if (!current_function_decl)
+ {
+ current_function_decl = get_elaboration_procedure ();
+ went_into_elab_proc = true;
+ }
+
+ /* First, create the temporary for the return value when:
+
+ 1. There is no target and the function has copy-in/copy-out parameters,
+ because we need to preserve the return value before copying back the
+ parameters.
+
+ 2. There is no target and this is not an object declaration, and the
+ return type has variable size, because in these cases the gimplifier
+ cannot create the temporary.
+
+ 3. There is a target and it is a slice or an array with fixed size,
+ and the return type has variable size, because the gimplifier
+ doesn't handle these cases.
+
+ This must be done before we push a binding level around the call, since
+ we will pop it before copying the return value. */
+ if (function_call
+ && ((!gnu_target && TYPE_CI_CO_LIST (gnu_subprog_type))
+ || (!gnu_target
+ && Nkind (Parent (gnat_node)) != N_Object_Declaration
+ && TREE_CODE (TYPE_SIZE (gnu_result_type)) != INTEGER_CST)
+ || (gnu_target
+ && (TREE_CODE (gnu_target) == ARRAY_RANGE_REF
+ || (TREE_CODE (TREE_TYPE (gnu_target)) == ARRAY_TYPE
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (gnu_target)))
+ == INTEGER_CST))
+ && TREE_CODE (TYPE_SIZE (gnu_result_type)) != INTEGER_CST)))
+ gnu_retval = create_temporary ("R", gnu_result_type);
+
+ /* Create the list of the actual parameters as GCC expects it, namely a
+ chain of TREE_LIST nodes in which the TREE_VALUE field of each node
+ is an expression and the TREE_PURPOSE field is null. But skip Out
+ parameters not passed by reference and that need not be copied in. */
+ for (gnat_actual = First_Actual (gnat_node);
+ Present (gnat_actual);
+ gnat_formal = Next_Formal_With_Extras (gnat_formal),
+ gnat_actual = Next_Actual (gnat_actual))
+ {
+ tree gnu_formal = present_gnu_tree (gnat_formal)
+ ? get_gnu_tree (gnat_formal) : NULL_TREE;
+ tree gnu_formal_type = gnat_to_gnu_type (Etype (gnat_formal));
+ const bool is_true_formal_parm
+ = gnu_formal && TREE_CODE (gnu_formal) == PARM_DECL;
+ const bool is_by_ref_formal_parm
+ = is_true_formal_parm
+ && (DECL_BY_REF_P (gnu_formal)
+ || DECL_BY_COMPONENT_PTR_P (gnu_formal)
+ || DECL_BY_DESCRIPTOR_P (gnu_formal));
+ /* In the Out or In Out case, we must suppress conversions that yield
+ an lvalue but can nevertheless cause the creation of a temporary,
+ because we need the real object in this case, either to pass its
+ address if it's passed by reference or as target of the back copy
+ done after the call if it uses the copy-in/copy-out mechanism.
+ We do it in the In case too, except for an unchecked conversion
+ because it alone can cause the actual to be misaligned and the
+ addressability test is applied to the real object. */
+ const bool suppress_type_conversion
+ = ((Nkind (gnat_actual) == N_Unchecked_Type_Conversion
+ && Ekind (gnat_formal) != E_In_Parameter)
+ || (Nkind (gnat_actual) == N_Type_Conversion
+ && Is_Composite_Type (Underlying_Type (Etype (gnat_formal)))));
+ Node_Id gnat_name = suppress_type_conversion
+ ? Expression (gnat_actual) : gnat_actual;
+ tree gnu_name = gnat_to_gnu (gnat_name), gnu_name_type;
+ tree gnu_actual;
+
+ /* If it's possible we may need to use this expression twice, make sure
+ that any side-effects are handled via SAVE_EXPRs; likewise if we need
+ to force side-effects before the call.
+ ??? This is more conservative than we need since we don't need to do
+ this for pass-by-ref with no conversion. */
+ if (Ekind (gnat_formal) != E_In_Parameter)
+ gnu_name = gnat_stabilize_reference (gnu_name, true, NULL);
+
+ /* If we are passing a non-addressable parameter by reference, pass the
+ address of a copy. In the Out or In Out case, set up to copy back
+ out after the call. */
+ if (is_by_ref_formal_parm
+ && (gnu_name_type = gnat_to_gnu_type (Etype (gnat_name)))
+ && !addressable_p (gnu_name, gnu_name_type))
+ {
+ bool in_param = (Ekind (gnat_formal) == E_In_Parameter);
+ tree gnu_orig = gnu_name, gnu_temp, gnu_stmt;
+
+ /* Do not issue warnings for CONSTRUCTORs since this is not a copy
+ but sort of an instantiation for them. */
+ if (TREE_CODE (gnu_name) == CONSTRUCTOR)
+ ;
+
+ /* If the type is passed by reference, a copy is not allowed. */
+ else if (TYPE_IS_BY_REFERENCE_P (gnu_formal_type))
+ post_error ("misaligned actual cannot be passed by reference",
+ gnat_actual);
+
+ /* For users of Starlet we issue a warning because the interface
+ apparently assumes that by-ref parameters outlive the procedure
+ invocation. The code still will not work as intended, but we
+ cannot do much better since low-level parts of the back-end
+ would allocate temporaries at will because of the misalignment
+ if we did not do so here. */
+ else if (Is_Valued_Procedure (Entity (Name (gnat_node))))
+ {
+ post_error
+ ("?possible violation of implicit assumption", gnat_actual);
+ post_error_ne
+ ("?made by pragma Import_Valued_Procedure on &", gnat_actual,
+ Entity (Name (gnat_node)));
+ post_error_ne ("?because of misalignment of &", gnat_actual,
+ gnat_formal);
+ }
+
+ /* If the actual type of the object is already the nominal type,
+ we have nothing to do, except if the size is self-referential
+ in which case we'll remove the unpadding below. */
+ if (TREE_TYPE (gnu_name) == gnu_name_type
+ && !CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_name_type)))
+ ;
+
+ /* Otherwise remove the unpadding from all the objects. */
+ else if (TREE_CODE (gnu_name) == COMPONENT_REF
+ && TYPE_IS_PADDING_P
+ (TREE_TYPE (TREE_OPERAND (gnu_name, 0))))
+ gnu_orig = gnu_name = TREE_OPERAND (gnu_name, 0);
+
+ /* Otherwise convert to the nominal type of the object if needed.
+ There are several cases in which we need to make the temporary
+ using this type instead of the actual type of the object when
+ they are distinct, because the expectations of the callee would
+ otherwise not be met:
+ - if it's a justified modular type,
+ - if the actual type is a smaller form of it,
+ - if it's a smaller form of the actual type. */
+ else if ((TREE_CODE (gnu_name_type) == RECORD_TYPE
+ && (TYPE_JUSTIFIED_MODULAR_P (gnu_name_type)
+ || smaller_form_type_p (TREE_TYPE (gnu_name),
+ gnu_name_type)))
+ || (INTEGRAL_TYPE_P (gnu_name_type)
+ && smaller_form_type_p (gnu_name_type,
+ TREE_TYPE (gnu_name))))
+ gnu_name = convert (gnu_name_type, gnu_name);
+
+ /* If this is an In Out or Out parameter and we're returning a value,
+ we need to create a temporary for the return value because we must
+ preserve it before copying back at the very end. */
+ if (!in_param && returning_value && !gnu_retval)
+ gnu_retval = create_temporary ("R", gnu_result_type);
+
+ /* If we haven't pushed a binding level, push a new one. This will
+ narrow the lifetime of the temporary we are about to make as much
+ as possible. The drawback is that we'd need to create a temporary
+ for the return value, if any (see comment before the loop). So do
+ it only when this temporary was already created just above. */
+ if (!pushed_binding_level && !(in_param && returning_value))
+ {
+ start_stmt_group ();
+ gnat_pushlevel ();
+ pushed_binding_level = true;
+ }
+
+ /* Create an explicit temporary holding the copy. */
+ gnu_temp
+ = create_init_temporary ("A", gnu_name, &gnu_stmt, gnat_actual);
+
+ /* But initialize it on the fly like for an implicit temporary as
+ we aren't necessarily having a statement list. */
+ gnu_name = build_compound_expr (TREE_TYPE (gnu_name), gnu_stmt,
+ gnu_temp);
+
+ /* Set up to move the copy back to the original if needed. */
+ if (!in_param)
+ {
+ /* If the original is a COND_EXPR whose first arm isn't meant to
+ be further used, just deal with the second arm. This is very
+ likely the conditional expression built for a check. */
+ if (TREE_CODE (gnu_orig) == COND_EXPR
+ && TREE_CODE (TREE_OPERAND (gnu_orig, 1)) == COMPOUND_EXPR
+ && integer_zerop
+ (TREE_OPERAND (TREE_OPERAND (gnu_orig, 1), 1)))
+ gnu_orig = TREE_OPERAND (gnu_orig, 2);
+
+ gnu_stmt
+ = build_binary_op (MODIFY_EXPR, NULL_TREE, gnu_orig, gnu_temp);
+ set_expr_location_from_node (gnu_stmt, gnat_node);
+
+ append_to_statement_list (gnu_stmt, &gnu_after_list);
+ }
+ }
+
+ /* Start from the real object and build the actual. */
+ gnu_actual = gnu_name;
+
+ /* If this is an atomic access of an In or In Out parameter for which
+ synchronization is required, build the atomic load. */
+ if (is_true_formal_parm
+ && !is_by_ref_formal_parm
+ && Ekind (gnat_formal) != E_Out_Parameter
+ && atomic_sync_required_p (gnat_actual))
+ gnu_actual = build_atomic_load (gnu_actual);
+
+ /* If this was a procedure call, we may not have removed any padding.
+ So do it here for the part we will use as an input, if any. */
+ if (Ekind (gnat_formal) != E_Out_Parameter
+ && TYPE_IS_PADDING_P (TREE_TYPE (gnu_actual)))
+ gnu_actual
+ = convert (get_unpadded_type (Etype (gnat_actual)), gnu_actual);
+
+ /* Put back the conversion we suppressed above in the computation of the
+ real object. And even if we didn't suppress any conversion there, we
+ may have suppressed a conversion to the Etype of the actual earlier,
+ since the parent is a procedure call, so put it back here. */
+ if (suppress_type_conversion
+ && Nkind (gnat_actual) == N_Unchecked_Type_Conversion)
+ gnu_actual
+ = unchecked_convert (gnat_to_gnu_type (Etype (gnat_actual)),
+ gnu_actual, No_Truncation (gnat_actual));
+ else
+ gnu_actual
+ = convert (gnat_to_gnu_type (Etype (gnat_actual)), gnu_actual);
+
+ /* Make sure that the actual is in range of the formal's type. */
+ if (Ekind (gnat_formal) != E_Out_Parameter
+ && Do_Range_Check (gnat_actual))
+ gnu_actual
+ = emit_range_check (gnu_actual, Etype (gnat_formal), gnat_actual);
+
+ /* Unless this is an In parameter, we must remove any justified modular
+ building from GNU_NAME to get an lvalue. */
+ if (Ekind (gnat_formal) != E_In_Parameter
+ && TREE_CODE (gnu_name) == CONSTRUCTOR
+ && TREE_CODE (TREE_TYPE (gnu_name)) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (TREE_TYPE (gnu_name)))
+ gnu_name
+ = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_name))), gnu_name);
+
+ /* First see if the parameter is passed by reference. */
+ if (is_true_formal_parm && DECL_BY_REF_P (gnu_formal))
+ {
+ if (Ekind (gnat_formal) != E_In_Parameter)
+ {
+ /* In Out or Out parameters passed by reference don't use the
+ copy-in/copy-out mechanism so the address of the real object
+ must be passed to the function. */
+ gnu_actual = gnu_name;
+
+ /* If we have a padded type, be sure we've removed padding. */
+ if (TYPE_IS_PADDING_P (TREE_TYPE (gnu_actual)))
+ gnu_actual = convert (get_unpadded_type (Etype (gnat_actual)),
+ gnu_actual);
+
+ /* If we have the constructed subtype of an aliased object
+ with an unconstrained nominal subtype, the type of the
+ actual includes the template, although it is formally
+ constrained. So we need to convert it back to the real
+ constructed subtype to retrieve the constrained part
+ and takes its address. */
+ if (TREE_CODE (TREE_TYPE (gnu_actual)) == RECORD_TYPE
+ && TYPE_CONTAINS_TEMPLATE_P (TREE_TYPE (gnu_actual))
+ && Is_Constr_Subt_For_UN_Aliased (Etype (gnat_actual))
+ && (Is_Array_Type (Etype (gnat_actual))
+ || (Is_Private_Type (Etype (gnat_actual))
+ && Is_Array_Type (Full_View (Etype (gnat_actual))))))
+ gnu_actual = convert (gnat_to_gnu_type (Etype (gnat_actual)),
+ gnu_actual);
+ }
+
+ /* There is no need to convert the actual to the formal's type before
+ taking its address. The only exception is for unconstrained array
+ types because of the way we build fat pointers. */
+ if (TREE_CODE (gnu_formal_type) == UNCONSTRAINED_ARRAY_TYPE)
+ {
+ /* Put back a view conversion for In Out or Out parameters. */
+ if (Ekind (gnat_formal) != E_In_Parameter)
+ gnu_actual = convert (gnat_to_gnu_type (Etype (gnat_actual)),
+ gnu_actual);
+ gnu_actual = convert (gnu_formal_type, gnu_actual);
+ }
+
+ /* The symmetry of the paths to the type of an entity is broken here
+ since arguments don't know that they will be passed by ref. */
+ gnu_formal_type = TREE_TYPE (gnu_formal);
+ gnu_actual = build_unary_op (ADDR_EXPR, gnu_formal_type, gnu_actual);
+ }
+
+ /* Then see if the parameter is an array passed to a foreign convention
+ subprogram. */
+ else if (is_true_formal_parm && DECL_BY_COMPONENT_PTR_P (gnu_formal))
+ {
+ gnu_formal_type = TREE_TYPE (gnu_formal);
+ gnu_actual = maybe_implicit_deref (gnu_actual);
+ gnu_actual = maybe_unconstrained_array (gnu_actual);
+
+ if (TYPE_IS_PADDING_P (gnu_formal_type))
+ {
+ gnu_formal_type = TREE_TYPE (TYPE_FIELDS (gnu_formal_type));
+ gnu_actual = convert (gnu_formal_type, gnu_actual);
+ }
+
+ /* Take the address of the object and convert to the proper pointer
+ type. We'd like to actually compute the address of the beginning
+ of the array using an ADDR_EXPR of an ARRAY_REF, but there's a
+ possibility that the ARRAY_REF might return a constant and we'd be
+ getting the wrong address. Neither approach is exactly correct,
+ but this is the most likely to work in all cases. */
+ gnu_actual = build_unary_op (ADDR_EXPR, gnu_formal_type, gnu_actual);
+ }
+
+ /* Then see if the parameter is passed by descriptor. */
+ else if (is_true_formal_parm && DECL_BY_DESCRIPTOR_P (gnu_formal))
+ {
+ gnu_actual = convert (gnu_formal_type, gnu_actual);
+
+ /* If this is 'Null_Parameter, pass a zero descriptor. */
+ if ((TREE_CODE (gnu_actual) == INDIRECT_REF
+ || TREE_CODE (gnu_actual) == UNCONSTRAINED_ARRAY_REF)
+ && TREE_PRIVATE (gnu_actual))
+ gnu_actual
+ = convert (DECL_ARG_TYPE (gnu_formal), integer_zero_node);
+ else
+ gnu_actual = build_unary_op (ADDR_EXPR, NULL_TREE,
+ fill_vms_descriptor
+ (TREE_TYPE (TREE_TYPE (gnu_formal)),
+ gnu_actual, gnat_actual));
+ }
+
+ /* Otherwise the parameter is passed by copy. */
+ else
+ {
+ tree gnu_size;
+
+ if (Ekind (gnat_formal) != E_In_Parameter)
+ gnu_name_list = tree_cons (NULL_TREE, gnu_name, gnu_name_list);
+
+ /* If we didn't create a PARM_DECL for the formal, this means that
+ it is an Out parameter not passed by reference and that need not
+ be copied in. In this case, the value of the actual need not be
+ read. However, we still need to make sure that its side-effects
+ are evaluated before the call, so we evaluate its address. */
+ if (!is_true_formal_parm)
+ {
+ if (TREE_SIDE_EFFECTS (gnu_name))
+ {
+ tree addr = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_name);
+ append_to_statement_list (addr, &gnu_stmt_list);
+ }
+ continue;
+ }
+
+ gnu_actual = convert (gnu_formal_type, gnu_actual);
+
+ /* If this is 'Null_Parameter, pass a zero even though we are
+ dereferencing it. */
+ if (TREE_CODE (gnu_actual) == INDIRECT_REF
+ && TREE_PRIVATE (gnu_actual)
+ && (gnu_size = TYPE_SIZE (TREE_TYPE (gnu_actual)))
+ && TREE_CODE (gnu_size) == INTEGER_CST
+ && compare_tree_int (gnu_size, BITS_PER_WORD) <= 0)
+ gnu_actual
+ = unchecked_convert (DECL_ARG_TYPE (gnu_formal),
+ convert (gnat_type_for_size
+ (TREE_INT_CST_LOW (gnu_size), 1),
+ integer_zero_node),
+ false);
+ else
+ gnu_actual = convert (DECL_ARG_TYPE (gnu_formal), gnu_actual);
+ }
+
+ vec_safe_push (gnu_actual_vec, gnu_actual);
+ }
+
+ gnu_call
+ = build_call_vec (gnu_result_type, gnu_subprog_addr, gnu_actual_vec);
+ set_expr_location_from_node (gnu_call, gnat_node);
+
+ /* If we have created a temporary for the return value, initialize it. */
+ if (gnu_retval)
+ {
+ tree gnu_stmt
+ = build_binary_op (INIT_EXPR, NULL_TREE, gnu_retval, gnu_call);
+ set_expr_location_from_node (gnu_stmt, gnat_node);
+ append_to_statement_list (gnu_stmt, &gnu_stmt_list);
+ gnu_call = gnu_retval;
+ }
+
+ /* If this is a subprogram with copy-in/copy-out parameters, we need to
+ unpack the valued returned from the function into the In Out or Out
+ parameters. We deal with the function return (if this is an Ada
+ function) below. */
+ if (TYPE_CI_CO_LIST (gnu_subprog_type))
+ {
+ /* List of FIELD_DECLs associated with the PARM_DECLs of the copy-in/
+ copy-out parameters. */
+ tree gnu_cico_list = TYPE_CI_CO_LIST (gnu_subprog_type);
+ const int length = list_length (gnu_cico_list);
+
+ /* The call sequence must contain one and only one call, even though the
+ function is pure. Save the result into a temporary if needed. */
+ if (length > 1)
+ {
+ if (!gnu_retval)
+ {
+ tree gnu_stmt;
+ /* If we haven't pushed a binding level, push a new one. This
+ will narrow the lifetime of the temporary we are about to
+ make as much as possible. */
+ if (!pushed_binding_level)
+ {
+ start_stmt_group ();
+ gnat_pushlevel ();
+ pushed_binding_level = true;
+ }
+ gnu_call
+ = create_init_temporary ("P", gnu_call, &gnu_stmt, gnat_node);
+ append_to_statement_list (gnu_stmt, &gnu_stmt_list);
+ }
+
+ gnu_name_list = nreverse (gnu_name_list);
+ }
+
+ /* The first entry is for the actual return value if this is a
+ function, so skip it. */
+ if (function_call)
+ gnu_cico_list = TREE_CHAIN (gnu_cico_list);
+
+ if (Nkind (Name (gnat_node)) == N_Explicit_Dereference)
+ gnat_formal = First_Formal_With_Extras (Etype (Name (gnat_node)));
+ else
+ gnat_formal = First_Formal_With_Extras (Entity (Name (gnat_node)));
+
+ for (gnat_actual = First_Actual (gnat_node);
+ Present (gnat_actual);
+ gnat_formal = Next_Formal_With_Extras (gnat_formal),
+ gnat_actual = Next_Actual (gnat_actual))
+ /* If we are dealing with a copy-in/copy-out parameter, we must
+ retrieve its value from the record returned in the call. */
+ if (!(present_gnu_tree (gnat_formal)
+ && TREE_CODE (get_gnu_tree (gnat_formal)) == PARM_DECL
+ && (DECL_BY_REF_P (get_gnu_tree (gnat_formal))
+ || (TREE_CODE (get_gnu_tree (gnat_formal)) == PARM_DECL
+ && ((DECL_BY_COMPONENT_PTR_P (get_gnu_tree (gnat_formal))
+ || (DECL_BY_DESCRIPTOR_P
+ (get_gnu_tree (gnat_formal))))))))
+ && Ekind (gnat_formal) != E_In_Parameter)
+ {
+ /* Get the value to assign to this Out or In Out parameter. It is
+ either the result of the function if there is only a single such
+ parameter or the appropriate field from the record returned. */
+ tree gnu_result
+ = length == 1
+ ? gnu_call
+ : build_component_ref (gnu_call, NULL_TREE,
+ TREE_PURPOSE (gnu_cico_list), false);
+
+ /* If the actual is a conversion, get the inner expression, which
+ will be the real destination, and convert the result to the
+ type of the actual parameter. */
+ tree gnu_actual
+ = maybe_unconstrained_array (TREE_VALUE (gnu_name_list));
+
+ /* If the result is a padded type, remove the padding. */
+ if (TYPE_IS_PADDING_P (TREE_TYPE (gnu_result)))
+ gnu_result
+ = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_result))),
+ gnu_result);
+
+ /* If the actual is a type conversion, the real target object is
+ denoted by the inner Expression and we need to convert the
+ result to the associated type.
+ We also need to convert our gnu assignment target to this type
+ if the corresponding GNU_NAME was constructed from the GNAT
+ conversion node and not from the inner Expression. */
+ if (Nkind (gnat_actual) == N_Type_Conversion)
+ {
+ gnu_result
+ = convert_with_check
+ (Etype (Expression (gnat_actual)), gnu_result,
+ Do_Overflow_Check (gnat_actual),
+ Do_Range_Check (Expression (gnat_actual)),
+ Float_Truncate (gnat_actual), gnat_actual);
+
+ if (!Is_Composite_Type (Underlying_Type (Etype (gnat_formal))))
+ gnu_actual = convert (TREE_TYPE (gnu_result), gnu_actual);
+ }
+
+ /* Unchecked conversions as actuals for Out parameters are not
+ allowed in user code because they are not variables, but do
+ occur in front-end expansions. The associated GNU_NAME is
+ always obtained from the inner expression in such cases. */
+ else if (Nkind (gnat_actual) == N_Unchecked_Type_Conversion)
+ gnu_result = unchecked_convert (TREE_TYPE (gnu_actual),
+ gnu_result,
+ No_Truncation (gnat_actual));
+ else
+ {
+ if (Do_Range_Check (gnat_actual))
+ gnu_result
+ = emit_range_check (gnu_result, Etype (gnat_actual),
+ gnat_actual);
+
+ if (!(!TREE_CONSTANT (TYPE_SIZE (TREE_TYPE (gnu_actual)))
+ && TREE_CONSTANT (TYPE_SIZE (TREE_TYPE (gnu_result)))))
+ gnu_result = convert (TREE_TYPE (gnu_actual), gnu_result);
+ }
+
+ if (atomic_sync_required_p (gnat_actual))
+ gnu_result = build_atomic_store (gnu_actual, gnu_result);
+ else
+ gnu_result = build_binary_op (MODIFY_EXPR, NULL_TREE,
+ gnu_actual, gnu_result);
+ set_expr_location_from_node (gnu_result, gnat_node);
+ append_to_statement_list (gnu_result, &gnu_stmt_list);
+ gnu_cico_list = TREE_CHAIN (gnu_cico_list);
+ gnu_name_list = TREE_CHAIN (gnu_name_list);
+ }
+ }
+
+ /* If this is a function call, the result is the call expression unless a
+ target is specified, in which case we copy the result into the target
+ and return the assignment statement. */
+ if (function_call)
+ {
+ /* If this is a function with copy-in/copy-out parameters, extract the
+ return value from it and update the return type. */
+ if (TYPE_CI_CO_LIST (gnu_subprog_type))
+ {
+ tree gnu_elmt = TYPE_CI_CO_LIST (gnu_subprog_type);
+ gnu_call = build_component_ref (gnu_call, NULL_TREE,
+ TREE_PURPOSE (gnu_elmt), false);
+ gnu_result_type = TREE_TYPE (gnu_call);
+ }
+
+ /* If the function returns an unconstrained array or by direct reference,
+ we have to dereference the pointer. */
+ if (TYPE_RETURN_UNCONSTRAINED_P (gnu_subprog_type)
+ || TYPE_RETURN_BY_DIRECT_REF_P (gnu_subprog_type))
+ gnu_call = build_unary_op (INDIRECT_REF, NULL_TREE, gnu_call);
+
+ if (gnu_target)
+ {
+ Node_Id gnat_parent = Parent (gnat_node);
+ enum tree_code op_code;
+
+ /* If range check is needed, emit code to generate it. */
+ if (Do_Range_Check (gnat_node))
+ gnu_call
+ = emit_range_check (gnu_call, Etype (Name (gnat_parent)),
+ gnat_parent);
+
+ /* ??? If the return type has variable size, then force the return
+ slot optimization as we would not be able to create a temporary.
+ Likewise if it was unconstrained as we would copy too much data.
+ That's what has been done historically. */
+ if (TREE_CODE (TYPE_SIZE (gnu_result_type)) != INTEGER_CST
+ || (TYPE_IS_PADDING_P (gnu_result_type)
+ && CONTAINS_PLACEHOLDER_P
+ (TYPE_SIZE (TREE_TYPE (TYPE_FIELDS (gnu_result_type))))))
+ op_code = INIT_EXPR;
+ else
+ op_code = MODIFY_EXPR;
+
+ if (atomic_sync)
+ gnu_call = build_atomic_store (gnu_target, gnu_call);
+ else
+ gnu_call
+ = build_binary_op (op_code, NULL_TREE, gnu_target, gnu_call);
+ set_expr_location_from_node (gnu_call, gnat_parent);
+ append_to_statement_list (gnu_call, &gnu_stmt_list);
+ }
+ else
+ *gnu_result_type_p = get_unpadded_type (Etype (gnat_node));
+ }
+
+ /* Otherwise, if this is a procedure call statement without copy-in/copy-out
+ parameters, the result is just the call statement. */
+ else if (!TYPE_CI_CO_LIST (gnu_subprog_type))
+ append_to_statement_list (gnu_call, &gnu_stmt_list);
+
+ /* Finally, add the copy back statements, if any. */
+ append_to_statement_list (gnu_after_list, &gnu_stmt_list);
+
+ if (went_into_elab_proc)
+ current_function_decl = NULL_TREE;
+
+ /* If we have pushed a binding level, pop it and finish up the enclosing
+ statement group. */
+ if (pushed_binding_level)
+ {
+ add_stmt (gnu_stmt_list);
+ gnat_poplevel ();
+ gnu_result = end_stmt_group ();
+ }
+
+ /* Otherwise, retrieve the statement list, if any. */
+ else if (gnu_stmt_list)
+ gnu_result = gnu_stmt_list;
+
+ /* Otherwise, just return the call expression. */
+ else
+ return gnu_call;
+
+ /* If we nevertheless need a value, make a COMPOUND_EXPR to return it.
+ But first simplify if we have only one statement in the list. */
+ if (returning_value)
+ {
+ tree first = expr_first (gnu_result), last = expr_last (gnu_result);
+ if (first == last)
+ gnu_result = first;
+ gnu_result
+ = build_compound_expr (TREE_TYPE (gnu_call), gnu_result, gnu_call);
+ }
+
+ return gnu_result;
+}
+
+/* Subroutine of gnat_to_gnu to translate gnat_node, an
+ N_Handled_Sequence_Of_Statements, to a GCC tree, which is returned. */
+
+static tree
+Handled_Sequence_Of_Statements_to_gnu (Node_Id gnat_node)
+{
+ tree gnu_jmpsave_decl = NULL_TREE;
+ tree gnu_jmpbuf_decl = NULL_TREE;
+ /* If just annotating, ignore all EH and cleanups. */
+ bool gcc_zcx = (!type_annotate_only
+ && Present (Exception_Handlers (gnat_node))
+ && Exception_Mechanism == Back_End_Exceptions);
+ bool setjmp_longjmp
+ = (!type_annotate_only && Present (Exception_Handlers (gnat_node))
+ && Exception_Mechanism == Setjmp_Longjmp);
+ bool at_end = !type_annotate_only && Present (At_End_Proc (gnat_node));
+ bool binding_for_block = (at_end || gcc_zcx || setjmp_longjmp);
+ tree gnu_inner_block; /* The statement(s) for the block itself. */
+ tree gnu_result;
+ tree gnu_expr;
+ Node_Id gnat_temp;
+ /* Node providing the sloc for the cleanup actions. */
+ Node_Id gnat_cleanup_loc_node = (Present (End_Label (gnat_node)) ?
+ End_Label (gnat_node) :
+ gnat_node);
+
+ /* The GCC exception handling mechanism can handle both ZCX and SJLJ schemes
+ and we have our own SJLJ mechanism. To call the GCC mechanism, we call
+ add_cleanup, and when we leave the binding, end_stmt_group will create
+ the TRY_FINALLY_EXPR.
+
+ ??? The region level calls down there have been specifically put in place
+ for a ZCX context and currently the order in which things are emitted
+ (region/handlers) is different from the SJLJ case. Instead of putting
+ other calls with different conditions at other places for the SJLJ case,
+ it seems cleaner to reorder things for the SJLJ case and generalize the
+ condition to make it not ZCX specific.
+
+ If there are any exceptions or cleanup processing involved, we need an
+ outer statement group (for Setjmp_Longjmp) and binding level. */
+ if (binding_for_block)
+ {
+ start_stmt_group ();
+ gnat_pushlevel ();
+ }
+
+ /* If using setjmp_longjmp, make the variables for the setjmp buffer and save
+ area for address of previous buffer. Do this first since we need to have
+ the setjmp buf known for any decls in this block. */
+ if (setjmp_longjmp)
+ {
+ gnu_jmpsave_decl
+ = create_var_decl (get_identifier ("JMPBUF_SAVE"), NULL_TREE,
+ jmpbuf_ptr_type,
+ build_call_n_expr (get_jmpbuf_decl, 0),
+ false, false, false, false, NULL, gnat_node);
+ DECL_ARTIFICIAL (gnu_jmpsave_decl) = 1;
+
+ /* The __builtin_setjmp receivers will immediately reinstall it. Now
+ because of the unstructured form of EH used by setjmp_longjmp, there
+ might be forward edges going to __builtin_setjmp receivers on which
+ it is uninitialized, although they will never be actually taken. */
+ TREE_NO_WARNING (gnu_jmpsave_decl) = 1;
+ gnu_jmpbuf_decl
+ = create_var_decl (get_identifier ("JMP_BUF"), NULL_TREE,
+ jmpbuf_type,
+ NULL_TREE,
+ false, false, false, false, NULL, gnat_node);
+ DECL_ARTIFICIAL (gnu_jmpbuf_decl) = 1;
+
+ set_block_jmpbuf_decl (gnu_jmpbuf_decl);
+
+ /* When we exit this block, restore the saved value. */
+ add_cleanup (build_call_n_expr (set_jmpbuf_decl, 1, gnu_jmpsave_decl),
+ gnat_cleanup_loc_node);
+ }
+
+ /* If we are to call a function when exiting this block, add a cleanup
+ to the binding level we made above. Note that add_cleanup is FIFO
+ so we must register this cleanup after the EH cleanup just above. */
+ if (at_end)
+ add_cleanup (build_call_n_expr (gnat_to_gnu (At_End_Proc (gnat_node)), 0),
+ gnat_cleanup_loc_node);
+
+ /* Now build the tree for the declarations and statements inside this block.
+ If this is SJLJ, set our jmp_buf as the current buffer. */
+ start_stmt_group ();
+
+ if (setjmp_longjmp)
+ add_stmt (build_call_n_expr (set_jmpbuf_decl, 1,
+ build_unary_op (ADDR_EXPR, NULL_TREE,
+ gnu_jmpbuf_decl)));
+
+ if (Present (First_Real_Statement (gnat_node)))
+ process_decls (Statements (gnat_node), Empty,
+ First_Real_Statement (gnat_node), true, true);
+
+ /* Generate code for each statement in the block. */
+ for (gnat_temp = (Present (First_Real_Statement (gnat_node))
+ ? First_Real_Statement (gnat_node)
+ : First (Statements (gnat_node)));
+ Present (gnat_temp); gnat_temp = Next (gnat_temp))
+ add_stmt (gnat_to_gnu (gnat_temp));
+ gnu_inner_block = end_stmt_group ();
+
+ /* Now generate code for the two exception models, if either is relevant for
+ this block. */
+ if (setjmp_longjmp)
+ {
+ tree *gnu_else_ptr = 0;
+ tree gnu_handler;
+
+ /* Make a binding level for the exception handling declarations and code
+ and set up gnu_except_ptr_stack for the handlers to use. */
+ start_stmt_group ();
+ gnat_pushlevel ();
+
+ vec_safe_push (gnu_except_ptr_stack,
+ create_var_decl (get_identifier ("EXCEPT_PTR"), NULL_TREE,
+ build_pointer_type (except_type_node),
+ build_call_n_expr (get_excptr_decl, 0),
+ false, false, false, false,
+ NULL, gnat_node));
+
+ /* Generate code for each handler. The N_Exception_Handler case does the
+ real work and returns a COND_EXPR for each handler, which we chain
+ together here. */
+ for (gnat_temp = First_Non_Pragma (Exception_Handlers (gnat_node));
+ Present (gnat_temp); gnat_temp = Next_Non_Pragma (gnat_temp))
+ {
+ gnu_expr = gnat_to_gnu (gnat_temp);
+
+ /* If this is the first one, set it as the outer one. Otherwise,
+ point the "else" part of the previous handler to us. Then point
+ to our "else" part. */
+ if (!gnu_else_ptr)
+ add_stmt (gnu_expr);
+ else
+ *gnu_else_ptr = gnu_expr;
+
+ gnu_else_ptr = &COND_EXPR_ELSE (gnu_expr);
+ }
+
+ /* If none of the exception handlers did anything, re-raise but do not
+ defer abortion. */
+ gnu_expr = build_call_n_expr (raise_nodefer_decl, 1,
+ gnu_except_ptr_stack->last ());
+ set_expr_location_from_node
+ (gnu_expr,
+ Present (End_Label (gnat_node)) ? End_Label (gnat_node) : gnat_node);
+
+ if (gnu_else_ptr)
+ *gnu_else_ptr = gnu_expr;
+ else
+ add_stmt (gnu_expr);
+
+ /* End the binding level dedicated to the exception handlers and get the
+ whole statement group. */
+ gnu_except_ptr_stack->pop ();
+ gnat_poplevel ();
+ gnu_handler = end_stmt_group ();
+
+ /* If the setjmp returns 1, we restore our incoming longjmp value and
+ then check the handlers. */
+ start_stmt_group ();
+ add_stmt_with_node (build_call_n_expr (set_jmpbuf_decl, 1,
+ gnu_jmpsave_decl),
+ gnat_node);
+ add_stmt (gnu_handler);
+ gnu_handler = end_stmt_group ();
+
+ /* This block is now "if (setjmp) ... <handlers> else <block>". */
+ gnu_result = build3 (COND_EXPR, void_type_node,
+ (build_call_n_expr
+ (setjmp_decl, 1,
+ build_unary_op (ADDR_EXPR, NULL_TREE,
+ gnu_jmpbuf_decl))),
+ gnu_handler, gnu_inner_block);
+ }
+ else if (gcc_zcx)
+ {
+ tree gnu_handlers;
+ location_t locus;
+
+ /* First make a block containing the handlers. */
+ start_stmt_group ();
+ for (gnat_temp = First_Non_Pragma (Exception_Handlers (gnat_node));
+ Present (gnat_temp);
+ gnat_temp = Next_Non_Pragma (gnat_temp))
+ add_stmt (gnat_to_gnu (gnat_temp));
+ gnu_handlers = end_stmt_group ();
+
+ /* Now make the TRY_CATCH_EXPR for the block. */
+ gnu_result = build2 (TRY_CATCH_EXPR, void_type_node,
+ gnu_inner_block, gnu_handlers);
+ /* Set a location. We need to find a unique location for the dispatching
+ code, otherwise we can get coverage or debugging issues. Try with
+ the location of the end label. */
+ if (Present (End_Label (gnat_node))
+ && Sloc_to_locus (Sloc (End_Label (gnat_node)), &locus))
+ SET_EXPR_LOCATION (gnu_result, locus);
+ else
+ /* Clear column information so that the exception handler of an
+ implicit transient block does not incorrectly inherit the slocs
+ of a decision, which would otherwise confuse control flow based
+ coverage analysis tools. */
+ set_expr_location_from_node1 (gnu_result, gnat_node, true);
+ }
+ else
+ gnu_result = gnu_inner_block;
+
+ /* Now close our outer block, if we had to make one. */
+ if (binding_for_block)
+ {
+ add_stmt (gnu_result);
+ gnat_poplevel ();
+ gnu_result = end_stmt_group ();
+ }
+
+ return gnu_result;
+}
+
+/* Subroutine of gnat_to_gnu to translate gnat_node, an N_Exception_Handler,
+ to a GCC tree, which is returned. This is the variant for Setjmp_Longjmp
+ exception handling. */
+
+static tree
+Exception_Handler_to_gnu_sjlj (Node_Id gnat_node)
+{
+ /* Unless this is "Others" or the special "Non-Ada" exception for Ada, make
+ an "if" statement to select the proper exceptions. For "Others", exclude
+ exceptions where Handled_By_Others is nonzero unless the All_Others flag
+ is set. For "Non-ada", accept an exception if "Lang" is 'V'. */
+ tree gnu_choice = boolean_false_node;
+ tree gnu_body = build_stmt_group (Statements (gnat_node), false);
+ Node_Id gnat_temp;
+
+ for (gnat_temp = First (Exception_Choices (gnat_node));
+ gnat_temp; gnat_temp = Next (gnat_temp))
+ {
+ tree this_choice;
+
+ if (Nkind (gnat_temp) == N_Others_Choice)
+ {
+ if (All_Others (gnat_temp))
+ this_choice = boolean_true_node;
+ else
+ this_choice
+ = build_binary_op
+ (EQ_EXPR, boolean_type_node,
+ convert
+ (integer_type_node,
+ build_component_ref
+ (build_unary_op
+ (INDIRECT_REF, NULL_TREE,
+ gnu_except_ptr_stack->last ()),
+ get_identifier ("not_handled_by_others"), NULL_TREE,
+ false)),
+ integer_zero_node);
+ }
+
+ else if (Nkind (gnat_temp) == N_Identifier
+ || Nkind (gnat_temp) == N_Expanded_Name)
+ {
+ Entity_Id gnat_ex_id = Entity (gnat_temp);
+ tree gnu_expr;
+
+ /* Exception may be a renaming. Recover original exception which is
+ the one elaborated and registered. */
+ if (Present (Renamed_Object (gnat_ex_id)))
+ gnat_ex_id = Renamed_Object (gnat_ex_id);
+
+ gnu_expr = gnat_to_gnu_entity (gnat_ex_id, NULL_TREE, 0);
+
+ this_choice
+ = build_binary_op
+ (EQ_EXPR, boolean_type_node,
+ gnu_except_ptr_stack->last (),
+ convert (TREE_TYPE (gnu_except_ptr_stack->last ()),
+ build_unary_op (ADDR_EXPR, NULL_TREE, gnu_expr)));
+
+ /* If this is the distinguished exception "Non_Ada_Error" (and we are
+ in VMS mode), also allow a non-Ada exception (a VMS condition) t
+ match. */
+ if (Is_Non_Ada_Error (Entity (gnat_temp)))
+ {
+ tree gnu_comp
+ = build_component_ref
+ (build_unary_op (INDIRECT_REF, NULL_TREE,
+ gnu_except_ptr_stack->last ()),
+ get_identifier ("lang"), NULL_TREE, false);
+
+ this_choice
+ = build_binary_op
+ (TRUTH_ORIF_EXPR, boolean_type_node,
+ build_binary_op (EQ_EXPR, boolean_type_node, gnu_comp,
+ build_int_cst (TREE_TYPE (gnu_comp), 'V')),
+ this_choice);
+ }
+ }
+ else
+ gcc_unreachable ();
+
+ gnu_choice = build_binary_op (TRUTH_ORIF_EXPR, boolean_type_node,
+ gnu_choice, this_choice);
+ }
+
+ return build3 (COND_EXPR, void_type_node, gnu_choice, gnu_body, NULL_TREE);
+}
+
+/* Subroutine of gnat_to_gnu to translate gnat_node, an N_Exception_Handler,
+ to a GCC tree, which is returned. This is the variant for ZCX. */
+
+static tree
+Exception_Handler_to_gnu_zcx (Node_Id gnat_node)
+{
+ tree gnu_etypes_list = NULL_TREE;
+ tree gnu_current_exc_ptr, prev_gnu_incoming_exc_ptr;
+ Node_Id gnat_temp;
+
+ /* We build a TREE_LIST of nodes representing what exception types this
+ handler can catch, with special cases for others and all others cases.
+
+ Each exception type is actually identified by a pointer to the exception
+ id, or to a dummy object for "others" and "all others". */
+ for (gnat_temp = First (Exception_Choices (gnat_node));
+ gnat_temp; gnat_temp = Next (gnat_temp))
+ {
+ tree gnu_expr, gnu_etype;
+
+ if (Nkind (gnat_temp) == N_Others_Choice)
+ {
+ gnu_expr = All_Others (gnat_temp) ? all_others_decl : others_decl;
+ gnu_etype = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_expr);
+ }
+ else if (Nkind (gnat_temp) == N_Identifier
+ || Nkind (gnat_temp) == N_Expanded_Name)
+ {
+ Entity_Id gnat_ex_id = Entity (gnat_temp);
+
+ /* Exception may be a renaming. Recover original exception which is
+ the one elaborated and registered. */
+ if (Present (Renamed_Object (gnat_ex_id)))
+ gnat_ex_id = Renamed_Object (gnat_ex_id);
+
+ gnu_expr = gnat_to_gnu_entity (gnat_ex_id, NULL_TREE, 0);
+ gnu_etype = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_expr);
+
+ /* The Non_Ada_Error case for VMS exceptions is handled
+ by the personality routine. */
+ }
+ else
+ gcc_unreachable ();
+
+ /* The GCC interface expects NULL to be passed for catch all handlers, so
+ it would be quite tempting to set gnu_etypes_list to NULL if gnu_etype
+ is integer_zero_node. It would not work, however, because GCC's
+ notion of "catch all" is stronger than our notion of "others". Until
+ we correctly use the cleanup interface as well, doing that would
+ prevent the "all others" handlers from being seen, because nothing
+ can be caught beyond a catch all from GCC's point of view. */
+ gnu_etypes_list = tree_cons (NULL_TREE, gnu_etype, gnu_etypes_list);
+ }
+
+ start_stmt_group ();
+ gnat_pushlevel ();
+
+ /* Expand a call to the begin_handler hook at the beginning of the handler,
+ and arrange for a call to the end_handler hook to occur on every possible
+ exit path.
+
+ The hooks expect a pointer to the low level occurrence. This is required
+ for our stack management scheme because a raise inside the handler pushes
+ a new occurrence on top of the stack, which means that this top does not
+ necessarily match the occurrence this handler was dealing with.
+
+ __builtin_eh_pointer references the exception occurrence being
+ propagated. Upon handler entry, this is the exception for which the
+ handler is triggered. This might not be the case upon handler exit,
+ however, as we might have a new occurrence propagated by the handler's
+ body, and the end_handler hook called as a cleanup in this context.
+
+ We use a local variable to retrieve the incoming value at handler entry
+ time, and reuse it to feed the end_handler hook's argument at exit. */
+
+ gnu_current_exc_ptr
+ = build_call_expr (builtin_decl_explicit (BUILT_IN_EH_POINTER),
+ 1, integer_zero_node);
+ prev_gnu_incoming_exc_ptr = gnu_incoming_exc_ptr;
+ gnu_incoming_exc_ptr = create_var_decl (get_identifier ("EXPTR"), NULL_TREE,
+ ptr_type_node, gnu_current_exc_ptr,
+ false, false, false, false,
+ NULL, gnat_node);
+
+ add_stmt_with_node (build_call_n_expr (begin_handler_decl, 1,
+ gnu_incoming_exc_ptr),
+ gnat_node);
+
+ /* Declare and initialize the choice parameter, if present. */
+ if (Present (Choice_Parameter (gnat_node)))
+ {
+ tree gnu_param
+ = gnat_to_gnu_entity (Choice_Parameter (gnat_node), NULL_TREE, 1);
+
+ add_stmt (build_call_n_expr
+ (set_exception_parameter_decl, 2,
+ build_unary_op (ADDR_EXPR, NULL_TREE, gnu_param),
+ gnu_incoming_exc_ptr));
+ }
+
+ /* We don't have an End_Label at hand to set the location of the cleanup
+ actions, so we use that of the exception handler itself instead. */
+ add_cleanup (build_call_n_expr (end_handler_decl, 1, gnu_incoming_exc_ptr),
+ gnat_node);
+ add_stmt_list (Statements (gnat_node));
+ gnat_poplevel ();
+
+ gnu_incoming_exc_ptr = prev_gnu_incoming_exc_ptr;
+
+ return
+ build2 (CATCH_EXPR, void_type_node, gnu_etypes_list, end_stmt_group ());
+}
+
+/* Subroutine of gnat_to_gnu to generate code for an N_Compilation unit. */
+
+static void
+Compilation_Unit_to_gnu (Node_Id gnat_node)
+{
+ const Node_Id gnat_unit = Unit (gnat_node);
+ const bool body_p = (Nkind (gnat_unit) == N_Package_Body
+ || Nkind (gnat_unit) == N_Subprogram_Body);
+ const Entity_Id gnat_unit_entity = Defining_Entity (gnat_unit);
+ Node_Id gnat_pragma;
+ /* Make the decl for the elaboration procedure. */
+ tree gnu_elab_proc_decl
+ = create_subprog_decl
+ (create_concat_name (gnat_unit_entity, body_p ? "elabb" : "elabs"),
+ NULL_TREE, void_ftype, NULL_TREE, is_disabled, true, false, true, NULL,
+ gnat_unit);
+ struct elab_info *info;
+
+ vec_safe_push (gnu_elab_proc_stack, gnu_elab_proc_decl);
+ DECL_ELABORATION_PROC_P (gnu_elab_proc_decl) = 1;
+
+ /* Initialize the information structure for the function. */
+ allocate_struct_function (gnu_elab_proc_decl, false);
+ set_cfun (NULL);
+
+ current_function_decl = NULL_TREE;
+
+ start_stmt_group ();
+ gnat_pushlevel ();
+
+ /* For a body, first process the spec if there is one. */
+ if (Nkind (gnat_unit) == N_Package_Body
+ || (Nkind (gnat_unit) == N_Subprogram_Body && !Acts_As_Spec (gnat_node)))
+ add_stmt (gnat_to_gnu (Library_Unit (gnat_node)));
+
+ if (type_annotate_only && gnat_node == Cunit (Main_Unit))
+ {
+ elaborate_all_entities (gnat_node);
+
+ if (Nkind (gnat_unit) == N_Subprogram_Declaration
+ || Nkind (gnat_unit) == N_Generic_Package_Declaration
+ || Nkind (gnat_unit) == N_Generic_Subprogram_Declaration)
+ return;
+ }
+
+ /* Then process any pragmas and declarations preceding the unit. */
+ for (gnat_pragma = First (Context_Items (gnat_node));
+ Present (gnat_pragma);
+ gnat_pragma = Next (gnat_pragma))
+ if (Nkind (gnat_pragma) == N_Pragma)
+ add_stmt (gnat_to_gnu (gnat_pragma));
+ process_decls (Declarations (Aux_Decls_Node (gnat_node)), Empty, Empty,
+ true, true);
+
+ /* Process the unit itself. */
+ add_stmt (gnat_to_gnu (gnat_unit));
+
+ /* If we can inline, generate code for all the inlined subprograms. */
+ if (optimize)
+ {
+ Entity_Id gnat_entity;
+
+ for (gnat_entity = First_Inlined_Subprogram (gnat_node);
+ Present (gnat_entity);
+ gnat_entity = Next_Inlined_Subprogram (gnat_entity))
+ {
+ Node_Id gnat_body = Parent (Declaration_Node (gnat_entity));
+
+ if (Nkind (gnat_body) != N_Subprogram_Body)
+ {
+ /* ??? This really should always be present. */
+ if (No (Corresponding_Body (gnat_body)))
+ continue;
+ gnat_body
+ = Parent (Declaration_Node (Corresponding_Body (gnat_body)));
+ }
+
+ if (Present (gnat_body))
+ {
+ /* Define the entity first so we set DECL_EXTERNAL. */
+ gnat_to_gnu_entity (gnat_entity, NULL_TREE, 0);
+ add_stmt (gnat_to_gnu (gnat_body));
+ }
+ }
+ }
+
+ /* Process any pragmas and actions following the unit. */
+ add_stmt_list (Pragmas_After (Aux_Decls_Node (gnat_node)));
+ add_stmt_list (Actions (Aux_Decls_Node (gnat_node)));
+ finalize_from_limited_with ();
+
+ /* Save away what we've made so far and record this potential elaboration
+ procedure. */
+ info = ggc_alloc_elab_info ();
+ set_current_block_context (gnu_elab_proc_decl);
+ gnat_poplevel ();
+ DECL_SAVED_TREE (gnu_elab_proc_decl) = end_stmt_group ();
+
+ set_end_locus_from_node (gnu_elab_proc_decl, gnat_unit);
+
+ info->next = elab_info_list;
+ info->elab_proc = gnu_elab_proc_decl;
+ info->gnat_node = gnat_node;
+ elab_info_list = info;
+
+ /* Generate elaboration code for this unit, if necessary, and say whether
+ we did or not. */
+ gnu_elab_proc_stack->pop ();
+
+ /* Invalidate the global renaming pointers. This is necessary because
+ stabilization of the renamed entities may create SAVE_EXPRs which
+ have been tied to a specific elaboration routine just above. */
+ invalidate_global_renaming_pointers ();
+}
+
+/* Subroutine of gnat_to_gnu to translate gnat_node, an N_Raise_xxx_Error,
+ to a GCC tree, which is returned. GNU_RESULT_TYPE_P is a pointer to where
+ we should place the result type. LABEL_P is true if there is a label to
+ branch to for the exception. */
+
+static tree
+Raise_Error_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p)
+{
+ const Node_Kind kind = Nkind (gnat_node);
+ const int reason = UI_To_Int (Reason (gnat_node));
+ const Node_Id gnat_cond = Condition (gnat_node);
+ const bool with_extra_info
+ = Exception_Extra_Info
+ && !No_Exception_Handlers_Set ()
+ && !get_exception_label (kind);
+ tree gnu_result = NULL_TREE, gnu_cond = NULL_TREE;
+
+ *gnu_result_type_p = get_unpadded_type (Etype (gnat_node));
+
+ switch (reason)
+ {
+ case CE_Access_Check_Failed:
+ if (with_extra_info)
+ gnu_result = build_call_raise_column (reason, gnat_node);
+ break;
+
+ case CE_Index_Check_Failed:
+ case CE_Range_Check_Failed:
+ case CE_Invalid_Data:
+ if (Present (gnat_cond) && Nkind (gnat_cond) == N_Op_Not)
+ {
+ Node_Id gnat_range, gnat_index, gnat_type;
+ tree gnu_index, gnu_low_bound, gnu_high_bound;
+ struct range_check_info_d *rci;
+
+ switch (Nkind (Right_Opnd (gnat_cond)))
+ {
+ case N_In:
+ gnat_range = Right_Opnd (Right_Opnd (gnat_cond));
+ gcc_assert (Nkind (gnat_range) == N_Range);
+ gnu_low_bound = gnat_to_gnu (Low_Bound (gnat_range));
+ gnu_high_bound = gnat_to_gnu (High_Bound (gnat_range));
+ break;
+
+ case N_Op_Ge:
+ gnu_low_bound = gnat_to_gnu (Right_Opnd (Right_Opnd (gnat_cond)));
+ gnu_high_bound = NULL_TREE;
+ break;
+
+ case N_Op_Le:
+ gnu_low_bound = NULL_TREE;
+ gnu_high_bound = gnat_to_gnu (Right_Opnd (Right_Opnd (gnat_cond)));
+ break;
+
+ default:
+ goto common;
+ }
+
+ gnat_index = Left_Opnd (Right_Opnd (gnat_cond));
+ gnat_type = Etype (gnat_index);
+ gnu_index = gnat_to_gnu (gnat_index);
+
+ if (with_extra_info
+ && gnu_low_bound
+ && gnu_high_bound
+ && Known_Esize (gnat_type)
+ && UI_To_Int (Esize (gnat_type)) <= 32)
+ gnu_result
+ = build_call_raise_range (reason, gnat_node, gnu_index,
+ gnu_low_bound, gnu_high_bound);
+
+ /* If loop unswitching is enabled, we try to compute invariant
+ conditions for checks applied to iteration variables, i.e.
+ conditions that are both independent of the variable and
+ necessary in order for the check to fail in the course of
+ some iteration, and prepend them to the original condition
+ of the checks. This will make it possible later for the
+ loop unswitching pass to replace the loop with two loops,
+ one of which has the checks eliminated and the other has
+ the original checks reinstated, and a run time selection.
+ The former loop will be suitable for vectorization. */
+ if (flag_unswitch_loops
+ && (!gnu_low_bound
+ || (gnu_low_bound = gnat_invariant_expr (gnu_low_bound)))
+ && (!gnu_high_bound
+ || (gnu_high_bound = gnat_invariant_expr (gnu_high_bound)))
+ && (rci = push_range_check_info (gnu_index)))
+ {
+ rci->low_bound = gnu_low_bound;
+ rci->high_bound = gnu_high_bound;
+ rci->type = get_unpadded_type (gnat_type);
+ rci->invariant_cond = build1 (SAVE_EXPR, boolean_type_node,
+ boolean_true_node);
+ gnu_cond = build_binary_op (TRUTH_ANDIF_EXPR,
+ boolean_type_node,
+ rci->invariant_cond,
+ gnat_to_gnu (gnat_cond));
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+common:
+ if (!gnu_result)
+ gnu_result = build_call_raise (reason, gnat_node, kind);
+ set_expr_location_from_node (gnu_result, gnat_node);
+
+ /* If the type is VOID, this is a statement, so we need to generate the code
+ for the call. Handle a condition, if there is one. */
+ if (VOID_TYPE_P (*gnu_result_type_p))
+ {
+ if (Present (gnat_cond))
+ {
+ if (!gnu_cond)
+ gnu_cond = gnat_to_gnu (gnat_cond);
+ gnu_result = build3 (COND_EXPR, void_type_node, gnu_cond, gnu_result,
+ alloc_stmt_list ());
+ }
+ }
+ else
+ gnu_result = build1 (NULL_EXPR, *gnu_result_type_p, gnu_result);
+
+ return gnu_result;
+}
+
+/* Return true if GNAT_NODE is on the LHS of an assignment or an actual
+ parameter of a call. */
+
+static bool
+lhs_or_actual_p (Node_Id gnat_node)
+{
+ Node_Id gnat_parent = Parent (gnat_node);
+ Node_Kind kind = Nkind (gnat_parent);
+
+ if (kind == N_Assignment_Statement && Name (gnat_parent) == gnat_node)
+ return true;
+
+ if ((kind == N_Procedure_Call_Statement || kind == N_Function_Call)
+ && Name (gnat_parent) != gnat_node)
+ return true;
+
+ if (kind == N_Parameter_Association)
+ return true;
+
+ return false;
+}
+
+/* Return true if either GNAT_NODE or a view of GNAT_NODE is on the LHS
+ of an assignment or an actual parameter of a call. */
+
+static bool
+present_in_lhs_or_actual_p (Node_Id gnat_node)
+{
+ Node_Kind kind;
+
+ if (lhs_or_actual_p (gnat_node))
+ return true;
+
+ kind = Nkind (Parent (gnat_node));
+
+ if ((kind == N_Type_Conversion || kind == N_Unchecked_Type_Conversion)
+ && lhs_or_actual_p (Parent (gnat_node)))
+ return true;
+
+ return false;
+}
+
+/* Return true if GNAT_NODE, an unchecked type conversion, is a no-op as far
+ as gigi is concerned. This is used to avoid conversions on the LHS. */
+
+static bool
+unchecked_conversion_nop (Node_Id gnat_node)
+{
+ Entity_Id from_type, to_type;
+
+ /* The conversion must be on the LHS of an assignment or an actual parameter
+ of a call. Otherwise, even if the conversion was essentially a no-op, it
+ could de facto ensure type consistency and this should be preserved. */
+ if (!lhs_or_actual_p (gnat_node))
+ return false;
+
+ from_type = Etype (Expression (gnat_node));
+
+ /* We're interested in artificial conversions generated by the front-end
+ to make private types explicit, e.g. in Expand_Assign_Array. */
+ if (!Is_Private_Type (from_type))
+ return false;
+
+ from_type = Underlying_Type (from_type);
+ to_type = Etype (gnat_node);
+
+ /* The direct conversion to the underlying type is a no-op. */
+ if (to_type == from_type)
+ return true;
+
+ /* For an array subtype, the conversion to the PAT is a no-op. */
+ if (Ekind (from_type) == E_Array_Subtype
+ && to_type == Packed_Array_Type (from_type))
+ return true;
+
+ /* For a record subtype, the conversion to the type is a no-op. */
+ if (Ekind (from_type) == E_Record_Subtype
+ && to_type == Etype (from_type))
+ return true;
+
+ return false;
+}
+
+/* This function is the driver of the GNAT to GCC tree transformation process.
+ It is the entry point of the tree transformer. GNAT_NODE is the root of
+ some GNAT tree. Return the root of the corresponding GCC tree. If this
+ is an expression, return the GCC equivalent of the expression. If this
+ is a statement, return the statement or add it to the current statement
+ group, in which case anything returned is to be interpreted as occurring
+ after anything added. */
+
+tree
+gnat_to_gnu (Node_Id gnat_node)
+{
+ const Node_Kind kind = Nkind (gnat_node);
+ bool went_into_elab_proc = false;
+ tree gnu_result = error_mark_node; /* Default to no value. */
+ tree gnu_result_type = void_type_node;
+ tree gnu_expr, gnu_lhs, gnu_rhs;
+ Node_Id gnat_temp;
+
+ /* Save node number for error message and set location information. */
+ error_gnat_node = gnat_node;
+ Sloc_to_locus (Sloc (gnat_node), &input_location);
+
+ /* If this node is a statement and we are only annotating types, return an
+ empty statement list. */
+ if (type_annotate_only && IN (kind, N_Statement_Other_Than_Procedure_Call))
+ return alloc_stmt_list ();
+
+ /* If this node is a non-static subexpression and we are only annotating
+ types, make this into a NULL_EXPR. */
+ if (type_annotate_only
+ && IN (kind, N_Subexpr)
+ && kind != N_Identifier
+ && !Compile_Time_Known_Value (gnat_node))
+ return build1 (NULL_EXPR, get_unpadded_type (Etype (gnat_node)),
+ build_call_raise (CE_Range_Check_Failed, gnat_node,
+ N_Raise_Constraint_Error));
+
+ if ((IN (kind, N_Statement_Other_Than_Procedure_Call)
+ && kind != N_Null_Statement)
+ || kind == N_Procedure_Call_Statement
+ || kind == N_Label
+ || kind == N_Implicit_Label_Declaration
+ || kind == N_Handled_Sequence_Of_Statements
+ || (IN (kind, N_Raise_xxx_Error) && Ekind (Etype (gnat_node)) == E_Void))
+ {
+ tree current_elab_proc = get_elaboration_procedure ();
+
+ /* If this is a statement and we are at top level, it must be part of
+ the elaboration procedure, so mark us as being in that procedure. */
+ if (!current_function_decl)
+ {
+ current_function_decl = current_elab_proc;
+ went_into_elab_proc = true;
+ }
+
+ /* If we are in the elaboration procedure, check if we are violating a
+ No_Elaboration_Code restriction by having a statement there. Don't
+ check for a possible No_Elaboration_Code restriction violation on
+ N_Handled_Sequence_Of_Statements, as we want to signal an error on
+ every nested real statement instead. This also avoids triggering
+ spurious errors on dummy (empty) sequences created by the front-end
+ for package bodies in some cases. */
+ if (current_function_decl == current_elab_proc
+ && kind != N_Handled_Sequence_Of_Statements)
+ Check_Elaboration_Code_Allowed (gnat_node);
+ }
+
+ switch (kind)
+ {
+ /********************************/
+ /* Chapter 2: Lexical Elements */
+ /********************************/
+
+ case N_Identifier:
+ case N_Expanded_Name:
+ case N_Operator_Symbol:
+ case N_Defining_Identifier:
+ gnu_result = Identifier_to_gnu (gnat_node, &gnu_result_type);
+
+ /* If this is an atomic access on the RHS for which synchronization is
+ required, build the atomic load. */
+ if (atomic_sync_required_p (gnat_node)
+ && !present_in_lhs_or_actual_p (gnat_node))
+ gnu_result = build_atomic_load (gnu_result);
+ break;
+
+ case N_Integer_Literal:
+ {
+ tree gnu_type;
+
+ /* Get the type of the result, looking inside any padding and
+ justified modular types. Then get the value in that type. */
+ gnu_type = gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ if (TREE_CODE (gnu_type) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (gnu_type))
+ gnu_type = TREE_TYPE (TYPE_FIELDS (gnu_type));
+
+ gnu_result = UI_To_gnu (Intval (gnat_node), gnu_type);
+
+ /* If the result overflows (meaning it doesn't fit in its base type),
+ abort. We would like to check that the value is within the range
+ of the subtype, but that causes problems with subtypes whose usage
+ will raise Constraint_Error and with biased representation, so
+ we don't. */
+ gcc_assert (!TREE_OVERFLOW (gnu_result));
+ }
+ break;
+
+ case N_Character_Literal:
+ /* If a Entity is present, it means that this was one of the
+ literals in a user-defined character type. In that case,
+ just return the value in the CONST_DECL. Otherwise, use the
+ character code. In that case, the base type should be an
+ INTEGER_TYPE, but we won't bother checking for that. */
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ if (Present (Entity (gnat_node)))
+ gnu_result = DECL_INITIAL (get_gnu_tree (Entity (gnat_node)));
+ else
+ gnu_result
+ = build_int_cst_type
+ (gnu_result_type, UI_To_CC (Char_Literal_Value (gnat_node)));
+ break;
+
+ case N_Real_Literal:
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ /* If this is of a fixed-point type, the value we want is the
+ value of the corresponding integer. */
+ if (IN (Ekind (Underlying_Type (Etype (gnat_node))), Fixed_Point_Kind))
+ {
+ gnu_result = UI_To_gnu (Corresponding_Integer_Value (gnat_node),
+ gnu_result_type);
+ gcc_assert (!TREE_OVERFLOW (gnu_result));
+ }
+
+ /* Convert the Ureal to a vax float (represented on a signed type). */
+ else if (Vax_Float (Underlying_Type (Etype (gnat_node))))
+ {
+ gnu_result = UI_To_gnu (Get_Vax_Real_Literal_As_Signed (gnat_node),
+ gnu_result_type);
+ }
+
+ else
+ {
+ Ureal ur_realval = Realval (gnat_node);
+
+ /* First convert the real value to a machine number if it isn't
+ already. That forces BASE to 2 for non-zero values and simplifies
+ the rest of our logic. */
+
+ if (!Is_Machine_Number (gnat_node))
+ ur_realval
+ = Machine (Base_Type (Underlying_Type (Etype (gnat_node))),
+ ur_realval, Round_Even, gnat_node);
+
+ if (UR_Is_Zero (ur_realval))
+ gnu_result = convert (gnu_result_type, integer_zero_node);
+ else
+ {
+ REAL_VALUE_TYPE tmp;
+
+ gnu_result
+ = UI_To_gnu (Numerator (ur_realval), gnu_result_type);
+
+ /* The base must be 2 as Machine guarantees this, so we scale
+ the value, which we know can fit in the mantissa of the type
+ (hence the use of that type above). */
+
+ gcc_assert (Rbase (ur_realval) == 2);
+ real_ldexp (&tmp, &TREE_REAL_CST (gnu_result),
+ - UI_To_Int (Denominator (ur_realval)));
+ gnu_result = build_real (gnu_result_type, tmp);
+ }
+
+ /* Now see if we need to negate the result. Do it this way to
+ properly handle -0. */
+ if (UR_Is_Negative (Realval (gnat_node)))
+ gnu_result
+ = build_unary_op (NEGATE_EXPR, get_base_type (gnu_result_type),
+ gnu_result);
+ }
+
+ break;
+
+ case N_String_Literal:
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ if (TYPE_PRECISION (TREE_TYPE (gnu_result_type)) == HOST_BITS_PER_CHAR)
+ {
+ String_Id gnat_string = Strval (gnat_node);
+ int length = String_Length (gnat_string);
+ int i;
+ char *string;
+ if (length >= ALLOCA_THRESHOLD)
+ string = XNEWVEC (char, length + 1);
+ else
+ string = (char *) alloca (length + 1);
+
+ /* Build the string with the characters in the literal. Note
+ that Ada strings are 1-origin. */
+ for (i = 0; i < length; i++)
+ string[i] = Get_String_Char (gnat_string, i + 1);
+
+ /* Put a null at the end of the string in case it's in a context
+ where GCC will want to treat it as a C string. */
+ string[i] = 0;
+
+ gnu_result = build_string (length, string);
+
+ /* Strings in GCC don't normally have types, but we want
+ this to not be converted to the array type. */
+ TREE_TYPE (gnu_result) = gnu_result_type;
+
+ if (length >= ALLOCA_THRESHOLD)
+ free (string);
+ }
+ else
+ {
+ /* Build a list consisting of each character, then make
+ the aggregate. */
+ String_Id gnat_string = Strval (gnat_node);
+ int length = String_Length (gnat_string);
+ int i;
+ tree gnu_idx = TYPE_MIN_VALUE (TYPE_DOMAIN (gnu_result_type));
+ vec<constructor_elt, va_gc> *gnu_vec;
+ vec_alloc (gnu_vec, length);
+
+ for (i = 0; i < length; i++)
+ {
+ tree t = build_int_cst (TREE_TYPE (gnu_result_type),
+ Get_String_Char (gnat_string, i + 1));
+
+ CONSTRUCTOR_APPEND_ELT (gnu_vec, gnu_idx, t);
+ gnu_idx = int_const_binop (PLUS_EXPR, gnu_idx, integer_one_node);
+ }
+
+ gnu_result = gnat_build_constructor (gnu_result_type, gnu_vec);
+ }
+ break;
+
+ case N_Pragma:
+ gnu_result = Pragma_to_gnu (gnat_node);
+ break;
+
+ /**************************************/
+ /* Chapter 3: Declarations and Types */
+ /**************************************/
+
+ case N_Subtype_Declaration:
+ case N_Full_Type_Declaration:
+ case N_Incomplete_Type_Declaration:
+ case N_Private_Type_Declaration:
+ case N_Private_Extension_Declaration:
+ case N_Task_Type_Declaration:
+ process_type (Defining_Entity (gnat_node));
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Object_Declaration:
+ case N_Exception_Declaration:
+ gnat_temp = Defining_Entity (gnat_node);
+ gnu_result = alloc_stmt_list ();
+
+ /* If we are just annotating types and this object has an unconstrained
+ or task type, don't elaborate it. */
+ if (type_annotate_only
+ && (((Is_Array_Type (Etype (gnat_temp))
+ || Is_Record_Type (Etype (gnat_temp)))
+ && !Is_Constrained (Etype (gnat_temp)))
+ || Is_Concurrent_Type (Etype (gnat_temp))))
+ break;
+
+ if (Present (Expression (gnat_node))
+ && !(kind == N_Object_Declaration && No_Initialization (gnat_node))
+ && (!type_annotate_only
+ || Compile_Time_Known_Value (Expression (gnat_node))))
+ {
+ gnu_expr = gnat_to_gnu (Expression (gnat_node));
+ if (Do_Range_Check (Expression (gnat_node)))
+ gnu_expr
+ = emit_range_check (gnu_expr, Etype (gnat_temp), gnat_node);
+
+ /* If this object has its elaboration delayed, we must force
+ evaluation of GNU_EXPR right now and save it for when the object
+ is frozen. */
+ if (Present (Freeze_Node (gnat_temp)))
+ {
+ if (TREE_CONSTANT (gnu_expr))
+ ;
+ else if (global_bindings_p ())
+ gnu_expr
+ = create_var_decl (create_concat_name (gnat_temp, "init"),
+ NULL_TREE, TREE_TYPE (gnu_expr), gnu_expr,
+ false, false, false, false,
+ NULL, gnat_temp);
+ else
+ gnu_expr = gnat_save_expr (gnu_expr);
+
+ save_gnu_tree (gnat_node, gnu_expr, true);
+ }
+ }
+ else
+ gnu_expr = NULL_TREE;
+
+ if (type_annotate_only && gnu_expr && TREE_CODE (gnu_expr) == ERROR_MARK)
+ gnu_expr = NULL_TREE;
+
+ /* If this is a deferred constant with an address clause, we ignore the
+ full view since the clause is on the partial view and we cannot have
+ 2 different GCC trees for the object. The only bits of the full view
+ we will use is the initializer, but it will be directly fetched. */
+ if (Ekind(gnat_temp) == E_Constant
+ && Present (Address_Clause (gnat_temp))
+ && Present (Full_View (gnat_temp)))
+ save_gnu_tree (Full_View (gnat_temp), error_mark_node, true);
+
+ if (No (Freeze_Node (gnat_temp)))
+ gnat_to_gnu_entity (gnat_temp, gnu_expr, 1);
+ break;
+
+ case N_Object_Renaming_Declaration:
+ gnat_temp = Defining_Entity (gnat_node);
+
+ /* Don't do anything if this renaming is handled by the front end or if
+ we are just annotating types and this object has a composite or task
+ type, don't elaborate it. We return the result in case it has any
+ SAVE_EXPRs in it that need to be evaluated here. */
+ if (!Is_Renaming_Of_Object (gnat_temp)
+ && ! (type_annotate_only
+ && (Is_Array_Type (Etype (gnat_temp))
+ || Is_Record_Type (Etype (gnat_temp))
+ || Is_Concurrent_Type (Etype (gnat_temp)))))
+ gnu_result
+ = gnat_to_gnu_entity (gnat_temp,
+ gnat_to_gnu (Renamed_Object (gnat_temp)), 1);
+ else
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Implicit_Label_Declaration:
+ gnat_to_gnu_entity (Defining_Entity (gnat_node), NULL_TREE, 1);
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Exception_Renaming_Declaration:
+ case N_Number_Declaration:
+ case N_Package_Renaming_Declaration:
+ case N_Subprogram_Renaming_Declaration:
+ /* These are fully handled in the front end. */
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ /*************************************/
+ /* Chapter 4: Names and Expressions */
+ /*************************************/
+
+ case N_Explicit_Dereference:
+ gnu_result = gnat_to_gnu (Prefix (gnat_node));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_result = build_unary_op (INDIRECT_REF, NULL_TREE, gnu_result);
+
+ /* If this is an atomic access on the RHS for which synchronization is
+ required, build the atomic load. */
+ if (atomic_sync_required_p (gnat_node)
+ && !present_in_lhs_or_actual_p (gnat_node))
+ gnu_result = build_atomic_load (gnu_result);
+ break;
+
+ case N_Indexed_Component:
+ {
+ tree gnu_array_object = gnat_to_gnu (Prefix (gnat_node));
+ tree gnu_type;
+ int ndim;
+ int i;
+ Node_Id *gnat_expr_array;
+
+ gnu_array_object = maybe_implicit_deref (gnu_array_object);
+
+ /* Convert vector inputs to their representative array type, to fit
+ what the code below expects. */
+ if (VECTOR_TYPE_P (TREE_TYPE (gnu_array_object)))
+ {
+ if (present_in_lhs_or_actual_p (gnat_node))
+ gnat_mark_addressable (gnu_array_object);
+ gnu_array_object = maybe_vector_array (gnu_array_object);
+ }
+
+ gnu_array_object = maybe_unconstrained_array (gnu_array_object);
+
+ /* If we got a padded type, remove it too. */
+ if (TYPE_IS_PADDING_P (TREE_TYPE (gnu_array_object)))
+ gnu_array_object
+ = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_array_object))),
+ gnu_array_object);
+
+ gnu_result = gnu_array_object;
+
+ /* The failure of this assertion will very likely come from a missing
+ expansion for a packed array access. */
+ gcc_assert (TREE_CODE (TREE_TYPE (gnu_array_object)) == ARRAY_TYPE);
+
+ /* First compute the number of dimensions of the array, then
+ fill the expression array, the order depending on whether
+ this is a Convention_Fortran array or not. */
+ for (ndim = 1, gnu_type = TREE_TYPE (gnu_array_object);
+ TREE_CODE (TREE_TYPE (gnu_type)) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_type));
+ ndim++, gnu_type = TREE_TYPE (gnu_type))
+ ;
+
+ gnat_expr_array = XALLOCAVEC (Node_Id, ndim);
+
+ if (TYPE_CONVENTION_FORTRAN_P (TREE_TYPE (gnu_array_object)))
+ for (i = ndim - 1, gnat_temp = First (Expressions (gnat_node));
+ i >= 0;
+ i--, gnat_temp = Next (gnat_temp))
+ gnat_expr_array[i] = gnat_temp;
+ else
+ for (i = 0, gnat_temp = First (Expressions (gnat_node));
+ i < ndim;
+ i++, gnat_temp = Next (gnat_temp))
+ gnat_expr_array[i] = gnat_temp;
+
+ for (i = 0, gnu_type = TREE_TYPE (gnu_array_object);
+ i < ndim; i++, gnu_type = TREE_TYPE (gnu_type))
+ {
+ gcc_assert (TREE_CODE (gnu_type) == ARRAY_TYPE);
+ gnat_temp = gnat_expr_array[i];
+ gnu_expr = gnat_to_gnu (gnat_temp);
+
+ if (Do_Range_Check (gnat_temp))
+ gnu_expr
+ = emit_index_check
+ (gnu_array_object, gnu_expr,
+ TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type))),
+ TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type))),
+ gnat_temp);
+
+ gnu_result = build_binary_op (ARRAY_REF, NULL_TREE,
+ gnu_result, gnu_expr);
+ }
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ /* If this is an atomic access on the RHS for which synchronization is
+ required, build the atomic load. */
+ if (atomic_sync_required_p (gnat_node)
+ && !present_in_lhs_or_actual_p (gnat_node))
+ gnu_result = build_atomic_load (gnu_result);
+ }
+ break;
+
+ case N_Slice:
+ {
+ Node_Id gnat_range_node = Discrete_Range (gnat_node);
+ tree gnu_type;
+
+ gnu_result = gnat_to_gnu (Prefix (gnat_node));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ /* Do any implicit dereferences of the prefix and do any needed
+ range check. */
+ gnu_result = maybe_implicit_deref (gnu_result);
+ gnu_result = maybe_unconstrained_array (gnu_result);
+ gnu_type = TREE_TYPE (gnu_result);
+ if (Do_Range_Check (gnat_range_node))
+ {
+ /* Get the bounds of the slice. */
+ tree gnu_index_type
+ = TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_result_type));
+ tree gnu_min_expr = TYPE_MIN_VALUE (gnu_index_type);
+ tree gnu_max_expr = TYPE_MAX_VALUE (gnu_index_type);
+ /* Get the permitted bounds. */
+ tree gnu_base_index_type
+ = TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type));
+ tree gnu_base_min_expr = SUBSTITUTE_PLACEHOLDER_IN_EXPR
+ (TYPE_MIN_VALUE (gnu_base_index_type), gnu_result);
+ tree gnu_base_max_expr = SUBSTITUTE_PLACEHOLDER_IN_EXPR
+ (TYPE_MAX_VALUE (gnu_base_index_type), gnu_result);
+ tree gnu_expr_l, gnu_expr_h, gnu_expr_type;
+
+ gnu_min_expr = gnat_protect_expr (gnu_min_expr);
+ gnu_max_expr = gnat_protect_expr (gnu_max_expr);
+
+ /* Derive a good type to convert everything to. */
+ gnu_expr_type = get_base_type (gnu_index_type);
+
+ /* Test whether the minimum slice value is too small. */
+ gnu_expr_l = build_binary_op (LT_EXPR, boolean_type_node,
+ convert (gnu_expr_type,
+ gnu_min_expr),
+ convert (gnu_expr_type,
+ gnu_base_min_expr));
+
+ /* Test whether the maximum slice value is too large. */
+ gnu_expr_h = build_binary_op (GT_EXPR, boolean_type_node,
+ convert (gnu_expr_type,
+ gnu_max_expr),
+ convert (gnu_expr_type,
+ gnu_base_max_expr));
+
+ /* Build a slice index check that returns the low bound,
+ assuming the slice is not empty. */
+ gnu_expr = emit_check
+ (build_binary_op (TRUTH_ORIF_EXPR, boolean_type_node,
+ gnu_expr_l, gnu_expr_h),
+ gnu_min_expr, CE_Index_Check_Failed, gnat_node);
+
+ /* Build a conditional expression that does the index checks and
+ returns the low bound if the slice is not empty (max >= min),
+ and returns the naked low bound otherwise (max < min), unless
+ it is non-constant and the high bound is; this prevents VRP
+ from inferring bogus ranges on the unlikely path. */
+ gnu_expr = fold_build3 (COND_EXPR, gnu_expr_type,
+ build_binary_op (GE_EXPR, gnu_expr_type,
+ convert (gnu_expr_type,
+ gnu_max_expr),
+ convert (gnu_expr_type,
+ gnu_min_expr)),
+ gnu_expr,
+ TREE_CODE (gnu_min_expr) != INTEGER_CST
+ && TREE_CODE (gnu_max_expr) == INTEGER_CST
+ ? gnu_max_expr : gnu_min_expr);
+ }
+ else
+ /* Simply return the naked low bound. */
+ gnu_expr = TYPE_MIN_VALUE (TYPE_DOMAIN (gnu_result_type));
+
+ /* If this is a slice with non-constant size of an array with constant
+ size, set the maximum size for the allocation of temporaries. */
+ if (!TREE_CONSTANT (TYPE_SIZE_UNIT (gnu_result_type))
+ && TREE_CONSTANT (TYPE_SIZE_UNIT (gnu_type)))
+ TYPE_ARRAY_MAX_SIZE (gnu_result_type) = TYPE_SIZE_UNIT (gnu_type);
+
+ gnu_result = build_binary_op (ARRAY_RANGE_REF, gnu_result_type,
+ gnu_result, gnu_expr);
+ }
+ break;
+
+ case N_Selected_Component:
+ {
+ tree gnu_prefix = gnat_to_gnu (Prefix (gnat_node));
+ Entity_Id gnat_field = Entity (Selector_Name (gnat_node));
+ Entity_Id gnat_pref_type = Etype (Prefix (gnat_node));
+ tree gnu_field;
+
+ while (IN (Ekind (gnat_pref_type), Incomplete_Or_Private_Kind)
+ || IN (Ekind (gnat_pref_type), Access_Kind))
+ {
+ if (IN (Ekind (gnat_pref_type), Incomplete_Or_Private_Kind))
+ gnat_pref_type = Underlying_Type (gnat_pref_type);
+ else if (IN (Ekind (gnat_pref_type), Access_Kind))
+ gnat_pref_type = Designated_Type (gnat_pref_type);
+ }
+
+ gnu_prefix = maybe_implicit_deref (gnu_prefix);
+
+ /* For discriminant references in tagged types always substitute the
+ corresponding discriminant as the actual selected component. */
+ if (Is_Tagged_Type (gnat_pref_type))
+ while (Present (Corresponding_Discriminant (gnat_field)))
+ gnat_field = Corresponding_Discriminant (gnat_field);
+
+ /* For discriminant references of untagged types always substitute the
+ corresponding stored discriminant. */
+ else if (Present (Corresponding_Discriminant (gnat_field)))
+ gnat_field = Original_Record_Component (gnat_field);
+
+ /* Handle extracting the real or imaginary part of a complex.
+ The real part is the first field and the imaginary the last. */
+ if (TREE_CODE (TREE_TYPE (gnu_prefix)) == COMPLEX_TYPE)
+ gnu_result = build_unary_op (Present (Next_Entity (gnat_field))
+ ? REALPART_EXPR : IMAGPART_EXPR,
+ NULL_TREE, gnu_prefix);
+ else
+ {
+ gnu_field = gnat_to_gnu_field_decl (gnat_field);
+
+ /* If there are discriminants, the prefix might be evaluated more
+ than once, which is a problem if it has side-effects. */
+ if (Has_Discriminants (Is_Access_Type (Etype (Prefix (gnat_node)))
+ ? Designated_Type (Etype
+ (Prefix (gnat_node)))
+ : Etype (Prefix (gnat_node))))
+ gnu_prefix = gnat_stabilize_reference (gnu_prefix, false, NULL);
+
+ gnu_result
+ = build_component_ref (gnu_prefix, NULL_TREE, gnu_field,
+ (Nkind (Parent (gnat_node))
+ == N_Attribute_Reference)
+ && lvalue_required_for_attribute_p
+ (Parent (gnat_node)));
+ }
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ /* If this is an atomic access on the RHS for which synchronization is
+ required, build the atomic load. */
+ if (atomic_sync_required_p (gnat_node)
+ && !present_in_lhs_or_actual_p (gnat_node))
+ gnu_result = build_atomic_load (gnu_result);
+ }
+ break;
+
+ case N_Attribute_Reference:
+ {
+ /* The attribute designator. */
+ const int attr = Get_Attribute_Id (Attribute_Name (gnat_node));
+
+ /* The Elab_Spec and Elab_Body attributes are special in that Prefix
+ is a unit, not an object with a GCC equivalent. */
+ if (attr == Attr_Elab_Spec || attr == Attr_Elab_Body)
+ return
+ create_subprog_decl (create_concat_name
+ (Entity (Prefix (gnat_node)),
+ attr == Attr_Elab_Body ? "elabb" : "elabs"),
+ NULL_TREE, void_ftype, NULL_TREE, is_disabled,
+ true, true, true, NULL, gnat_node);
+
+ gnu_result = Attribute_to_gnu (gnat_node, &gnu_result_type, attr);
+ }
+ break;
+
+ case N_Reference:
+ /* Like 'Access as far as we are concerned. */
+ gnu_result = gnat_to_gnu (Prefix (gnat_node));
+ gnu_result = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_result);
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ break;
+
+ case N_Aggregate:
+ case N_Extension_Aggregate:
+ {
+ tree gnu_aggr_type;
+
+ /* ??? It is wrong to evaluate the type now, but there doesn't
+ seem to be any other practical way of doing it. */
+
+ gcc_assert (!Expansion_Delayed (gnat_node));
+
+ gnu_aggr_type = gnu_result_type
+ = get_unpadded_type (Etype (gnat_node));
+
+ if (TREE_CODE (gnu_result_type) == RECORD_TYPE
+ && TYPE_CONTAINS_TEMPLATE_P (gnu_result_type))
+ gnu_aggr_type
+ = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_result_type)));
+ else if (TREE_CODE (gnu_result_type) == VECTOR_TYPE)
+ gnu_aggr_type = TYPE_REPRESENTATIVE_ARRAY (gnu_result_type);
+
+ if (Null_Record_Present (gnat_node))
+ gnu_result = gnat_build_constructor (gnu_aggr_type,
+ NULL);
+
+ else if (TREE_CODE (gnu_aggr_type) == RECORD_TYPE
+ || TREE_CODE (gnu_aggr_type) == UNION_TYPE)
+ gnu_result
+ = assoc_to_constructor (Etype (gnat_node),
+ First (Component_Associations (gnat_node)),
+ gnu_aggr_type);
+ else if (TREE_CODE (gnu_aggr_type) == ARRAY_TYPE)
+ gnu_result = pos_to_constructor (First (Expressions (gnat_node)),
+ gnu_aggr_type,
+ Component_Type (Etype (gnat_node)));
+ else if (TREE_CODE (gnu_aggr_type) == COMPLEX_TYPE)
+ gnu_result
+ = build_binary_op
+ (COMPLEX_EXPR, gnu_aggr_type,
+ gnat_to_gnu (Expression (First
+ (Component_Associations (gnat_node)))),
+ gnat_to_gnu (Expression
+ (Next
+ (First (Component_Associations (gnat_node))))));
+ else
+ gcc_unreachable ();
+
+ gnu_result = convert (gnu_result_type, gnu_result);
+ }
+ break;
+
+ case N_Null:
+ if (TARGET_VTABLE_USES_DESCRIPTORS
+ && Ekind (Etype (gnat_node)) == E_Access_Subprogram_Type
+ && Is_Dispatch_Table_Entity (Etype (gnat_node)))
+ gnu_result = null_fdesc_node;
+ else
+ gnu_result = null_pointer_node;
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ break;
+
+ case N_Type_Conversion:
+ case N_Qualified_Expression:
+ /* Get the operand expression. */
+ gnu_result = gnat_to_gnu (Expression (gnat_node));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ /* If this is a qualified expression for a tagged type, we mark the type
+ as used. Because of polymorphism, this might be the only reference to
+ the tagged type in the program while objects have it as dynamic type.
+ The debugger needs to see it to display these objects properly. */
+ if (kind == N_Qualified_Expression && Is_Tagged_Type (Etype (gnat_node)))
+ used_types_insert (gnu_result_type);
+
+ gnu_result
+ = convert_with_check (Etype (gnat_node), gnu_result,
+ Do_Overflow_Check (gnat_node),
+ Do_Range_Check (Expression (gnat_node)),
+ kind == N_Type_Conversion
+ && Float_Truncate (gnat_node), gnat_node);
+ break;
+
+ case N_Unchecked_Type_Conversion:
+ gnu_result = gnat_to_gnu (Expression (gnat_node));
+
+ /* Skip further processing if the conversion is deemed a no-op. */
+ if (unchecked_conversion_nop (gnat_node))
+ {
+ gnu_result_type = TREE_TYPE (gnu_result);
+ break;
+ }
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ /* If the result is a pointer type, see if we are improperly
+ converting to a stricter alignment. */
+ if (STRICT_ALIGNMENT && POINTER_TYPE_P (gnu_result_type)
+ && IN (Ekind (Etype (gnat_node)), Access_Kind))
+ {
+ unsigned int align = known_alignment (gnu_result);
+ tree gnu_obj_type = TREE_TYPE (gnu_result_type);
+ unsigned int oalign = TYPE_ALIGN (gnu_obj_type);
+
+ if (align != 0 && align < oalign && !TYPE_ALIGN_OK (gnu_obj_type))
+ post_error_ne_tree_2
+ ("?source alignment (^) '< alignment of & (^)",
+ gnat_node, Designated_Type (Etype (gnat_node)),
+ size_int (align / BITS_PER_UNIT), oalign / BITS_PER_UNIT);
+ }
+
+ /* If we are converting a descriptor to a function pointer, first
+ build the pointer. */
+ if (TARGET_VTABLE_USES_DESCRIPTORS
+ && TREE_TYPE (gnu_result) == fdesc_type_node
+ && POINTER_TYPE_P (gnu_result_type))
+ gnu_result = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_result);
+
+ gnu_result = unchecked_convert (gnu_result_type, gnu_result,
+ No_Truncation (gnat_node));
+ break;
+
+ case N_In:
+ case N_Not_In:
+ {
+ tree gnu_obj = gnat_to_gnu (Left_Opnd (gnat_node));
+ Node_Id gnat_range = Right_Opnd (gnat_node);
+ tree gnu_low, gnu_high;
+
+ /* GNAT_RANGE is either an N_Range node or an identifier denoting a
+ subtype. */
+ if (Nkind (gnat_range) == N_Range)
+ {
+ gnu_low = gnat_to_gnu (Low_Bound (gnat_range));
+ gnu_high = gnat_to_gnu (High_Bound (gnat_range));
+ }
+ else if (Nkind (gnat_range) == N_Identifier
+ || Nkind (gnat_range) == N_Expanded_Name)
+ {
+ tree gnu_range_type = get_unpadded_type (Entity (gnat_range));
+
+ gnu_low = TYPE_MIN_VALUE (gnu_range_type);
+ gnu_high = TYPE_MAX_VALUE (gnu_range_type);
+ }
+ else
+ gcc_unreachable ();
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ /* If LOW and HIGH are identical, perform an equality test. Otherwise,
+ ensure that GNU_OBJ is evaluated only once and perform a full range
+ test. */
+ if (operand_equal_p (gnu_low, gnu_high, 0))
+ gnu_result
+ = build_binary_op (EQ_EXPR, gnu_result_type, gnu_obj, gnu_low);
+ else
+ {
+ tree t1, t2;
+ gnu_obj = gnat_protect_expr (gnu_obj);
+ t1 = build_binary_op (GE_EXPR, gnu_result_type, gnu_obj, gnu_low);
+ if (EXPR_P (t1))
+ set_expr_location_from_node (t1, gnat_node);
+ t2 = build_binary_op (LE_EXPR, gnu_result_type, gnu_obj, gnu_high);
+ if (EXPR_P (t2))
+ set_expr_location_from_node (t2, gnat_node);
+ gnu_result
+ = build_binary_op (TRUTH_ANDIF_EXPR, gnu_result_type, t1, t2);
+ }
+
+ if (kind == N_Not_In)
+ gnu_result
+ = invert_truthvalue_loc (EXPR_LOCATION (gnu_result), gnu_result);
+ }
+ break;
+
+ case N_Op_Divide:
+ gnu_lhs = gnat_to_gnu (Left_Opnd (gnat_node));
+ gnu_rhs = gnat_to_gnu (Right_Opnd (gnat_node));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_result = build_binary_op (FLOAT_TYPE_P (gnu_result_type)
+ ? RDIV_EXPR
+ : (Rounded_Result (gnat_node)
+ ? ROUND_DIV_EXPR : TRUNC_DIV_EXPR),
+ gnu_result_type, gnu_lhs, gnu_rhs);
+ break;
+
+ case N_Op_Or: case N_Op_And: case N_Op_Xor:
+ /* These can either be operations on booleans or on modular types.
+ Fall through for boolean types since that's the way GNU_CODES is
+ set up. */
+ if (IN (Ekind (Underlying_Type (Etype (gnat_node))),
+ Modular_Integer_Kind))
+ {
+ enum tree_code code
+ = (kind == N_Op_Or ? BIT_IOR_EXPR
+ : kind == N_Op_And ? BIT_AND_EXPR
+ : BIT_XOR_EXPR);
+
+ gnu_lhs = gnat_to_gnu (Left_Opnd (gnat_node));
+ gnu_rhs = gnat_to_gnu (Right_Opnd (gnat_node));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_result = build_binary_op (code, gnu_result_type,
+ gnu_lhs, gnu_rhs);
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case N_Op_Eq: case N_Op_Ne: case N_Op_Lt:
+ case N_Op_Le: case N_Op_Gt: case N_Op_Ge:
+ case N_Op_Add: case N_Op_Subtract: case N_Op_Multiply:
+ case N_Op_Mod: case N_Op_Rem:
+ case N_Op_Rotate_Left:
+ case N_Op_Rotate_Right:
+ case N_Op_Shift_Left:
+ case N_Op_Shift_Right:
+ case N_Op_Shift_Right_Arithmetic:
+ case N_And_Then: case N_Or_Else:
+ {
+ enum tree_code code = gnu_codes[kind];
+ bool ignore_lhs_overflow = false;
+ location_t saved_location = input_location;
+ tree gnu_type;
+
+ gnu_lhs = gnat_to_gnu (Left_Opnd (gnat_node));
+ gnu_rhs = gnat_to_gnu (Right_Opnd (gnat_node));
+ gnu_type = gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ /* Pending generic support for efficient vector logical operations in
+ GCC, convert vectors to their representative array type view and
+ fallthrough. */
+ gnu_lhs = maybe_vector_array (gnu_lhs);
+ gnu_rhs = maybe_vector_array (gnu_rhs);
+
+ /* If this is a comparison operator, convert any references to
+ an unconstrained array value into a reference to the
+ actual array. */
+ if (TREE_CODE_CLASS (code) == tcc_comparison)
+ {
+ gnu_lhs = maybe_unconstrained_array (gnu_lhs);
+ gnu_rhs = maybe_unconstrained_array (gnu_rhs);
+ }
+
+ /* If the result type is a private type, its full view may be a
+ numeric subtype. The representation we need is that of its base
+ type, given that it is the result of an arithmetic operation. */
+ else if (Is_Private_Type (Etype (gnat_node)))
+ gnu_type = gnu_result_type
+ = get_unpadded_type (Base_Type (Full_View (Etype (gnat_node))));
+
+ /* If this is a shift whose count is not guaranteed to be correct,
+ we need to adjust the shift count. */
+ if (IN (kind, N_Op_Shift) && !Shift_Count_OK (gnat_node))
+ {
+ tree gnu_count_type = get_base_type (TREE_TYPE (gnu_rhs));
+ tree gnu_max_shift
+ = convert (gnu_count_type, TYPE_SIZE (gnu_type));
+
+ if (kind == N_Op_Rotate_Left || kind == N_Op_Rotate_Right)
+ gnu_rhs = build_binary_op (TRUNC_MOD_EXPR, gnu_count_type,
+ gnu_rhs, gnu_max_shift);
+ else if (kind == N_Op_Shift_Right_Arithmetic)
+ gnu_rhs
+ = build_binary_op
+ (MIN_EXPR, gnu_count_type,
+ build_binary_op (MINUS_EXPR,
+ gnu_count_type,
+ gnu_max_shift,
+ convert (gnu_count_type,
+ integer_one_node)),
+ gnu_rhs);
+ }
+
+ /* For right shifts, the type says what kind of shift to do,
+ so we may need to choose a different type. In this case,
+ we have to ignore integer overflow lest it propagates all
+ the way down and causes a CE to be explicitly raised. */
+ if (kind == N_Op_Shift_Right && !TYPE_UNSIGNED (gnu_type))
+ {
+ gnu_type = gnat_unsigned_type (gnu_type);
+ ignore_lhs_overflow = true;
+ }
+ else if (kind == N_Op_Shift_Right_Arithmetic
+ && TYPE_UNSIGNED (gnu_type))
+ {
+ gnu_type = gnat_signed_type (gnu_type);
+ ignore_lhs_overflow = true;
+ }
+
+ if (gnu_type != gnu_result_type)
+ {
+ tree gnu_old_lhs = gnu_lhs;
+ gnu_lhs = convert (gnu_type, gnu_lhs);
+ if (TREE_CODE (gnu_lhs) == INTEGER_CST && ignore_lhs_overflow)
+ TREE_OVERFLOW (gnu_lhs) = TREE_OVERFLOW (gnu_old_lhs);
+ gnu_rhs = convert (gnu_type, gnu_rhs);
+ }
+
+ /* Instead of expanding overflow checks for addition, subtraction
+ and multiplication itself, the front end will leave this to
+ the back end when Backend_Overflow_Checks_On_Target is set.
+ As the GCC back end itself does not know yet how to properly
+ do overflow checking, do it here. The goal is to push
+ the expansions further into the back end over time. */
+ if (Do_Overflow_Check (gnat_node) && Backend_Overflow_Checks_On_Target
+ && (kind == N_Op_Add
+ || kind == N_Op_Subtract
+ || kind == N_Op_Multiply)
+ && !TYPE_UNSIGNED (gnu_type)
+ && !FLOAT_TYPE_P (gnu_type))
+ gnu_result = build_binary_op_trapv (code, gnu_type,
+ gnu_lhs, gnu_rhs, gnat_node);
+ else
+ {
+ /* Some operations, e.g. comparisons of arrays, generate complex
+ trees that need to be annotated while they are being built. */
+ input_location = saved_location;
+ gnu_result = build_binary_op (code, gnu_type, gnu_lhs, gnu_rhs);
+ }
+
+ /* If this is a logical shift with the shift count not verified,
+ we must return zero if it is too large. We cannot compensate
+ above in this case. */
+ if ((kind == N_Op_Shift_Left || kind == N_Op_Shift_Right)
+ && !Shift_Count_OK (gnat_node))
+ gnu_result
+ = build_cond_expr
+ (gnu_type,
+ build_binary_op (GE_EXPR, boolean_type_node,
+ gnu_rhs,
+ convert (TREE_TYPE (gnu_rhs),
+ TYPE_SIZE (gnu_type))),
+ convert (gnu_type, integer_zero_node),
+ gnu_result);
+ }
+ break;
+
+ case N_If_Expression:
+ {
+ tree gnu_cond = gnat_to_gnu (First (Expressions (gnat_node)));
+ tree gnu_true = gnat_to_gnu (Next (First (Expressions (gnat_node))));
+ tree gnu_false
+ = gnat_to_gnu (Next (Next (First (Expressions (gnat_node)))));
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_result
+ = build_cond_expr (gnu_result_type, gnu_cond, gnu_true, gnu_false);
+ }
+ break;
+
+ case N_Op_Plus:
+ gnu_result = gnat_to_gnu (Right_Opnd (gnat_node));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ break;
+
+ case N_Op_Not:
+ /* This case can apply to a boolean or a modular type.
+ Fall through for a boolean operand since GNU_CODES is set
+ up to handle this. */
+ if (Is_Modular_Integer_Type (Etype (gnat_node))
+ || (Is_Private_Type (Etype (gnat_node))
+ && Is_Modular_Integer_Type (Full_View (Etype (gnat_node)))))
+ {
+ gnu_expr = gnat_to_gnu (Right_Opnd (gnat_node));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_result = build_unary_op (BIT_NOT_EXPR, gnu_result_type,
+ gnu_expr);
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case N_Op_Minus: case N_Op_Abs:
+ gnu_expr = gnat_to_gnu (Right_Opnd (gnat_node));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+
+ if (Do_Overflow_Check (gnat_node)
+ && !TYPE_UNSIGNED (gnu_result_type)
+ && !FLOAT_TYPE_P (gnu_result_type))
+ gnu_result
+ = build_unary_op_trapv (gnu_codes[kind],
+ gnu_result_type, gnu_expr, gnat_node);
+ else
+ gnu_result = build_unary_op (gnu_codes[kind],
+ gnu_result_type, gnu_expr);
+ break;
+
+ case N_Allocator:
+ {
+ tree gnu_init = 0;
+ tree gnu_type;
+ bool ignore_init_type = false;
+
+ gnat_temp = Expression (gnat_node);
+
+ /* The Expression operand can either be an N_Identifier or
+ Expanded_Name, which must represent a type, or a
+ N_Qualified_Expression, which contains both the object type and an
+ initial value for the object. */
+ if (Nkind (gnat_temp) == N_Identifier
+ || Nkind (gnat_temp) == N_Expanded_Name)
+ gnu_type = gnat_to_gnu_type (Entity (gnat_temp));
+ else if (Nkind (gnat_temp) == N_Qualified_Expression)
+ {
+ Entity_Id gnat_desig_type
+ = Designated_Type (Underlying_Type (Etype (gnat_node)));
+
+ ignore_init_type = Has_Constrained_Partial_View (gnat_desig_type);
+ gnu_init = gnat_to_gnu (Expression (gnat_temp));
+
+ gnu_init = maybe_unconstrained_array (gnu_init);
+ if (Do_Range_Check (Expression (gnat_temp)))
+ gnu_init
+ = emit_range_check (gnu_init, gnat_desig_type, gnat_temp);
+
+ if (Is_Elementary_Type (gnat_desig_type)
+ || Is_Constrained (gnat_desig_type))
+ gnu_type = gnat_to_gnu_type (gnat_desig_type);
+ else
+ {
+ gnu_type = gnat_to_gnu_type (Etype (Expression (gnat_temp)));
+ if (TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE)
+ gnu_type = TREE_TYPE (gnu_init);
+ }
+
+ /* See the N_Qualified_Expression case for the rationale. */
+ if (Is_Tagged_Type (gnat_desig_type))
+ used_types_insert (gnu_type);
+
+ gnu_init = convert (gnu_type, gnu_init);
+ }
+ else
+ gcc_unreachable ();
+
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ return build_allocator (gnu_type, gnu_init, gnu_result_type,
+ Procedure_To_Call (gnat_node),
+ Storage_Pool (gnat_node), gnat_node,
+ ignore_init_type);
+ }
+ break;
+
+ /**************************/
+ /* Chapter 5: Statements */
+ /**************************/
+
+ case N_Label:
+ gnu_result = build1 (LABEL_EXPR, void_type_node,
+ gnat_to_gnu (Identifier (gnat_node)));
+ break;
+
+ case N_Null_Statement:
+ /* When not optimizing, turn null statements from source into gotos to
+ the next statement that the middle-end knows how to preserve. */
+ if (!optimize && Comes_From_Source (gnat_node))
+ {
+ tree stmt, label = create_label_decl (NULL_TREE, gnat_node);
+ DECL_IGNORED_P (label) = 1;
+ start_stmt_group ();
+ stmt = build1 (GOTO_EXPR, void_type_node, label);
+ set_expr_location_from_node (stmt, gnat_node);
+ add_stmt (stmt);
+ stmt = build1 (LABEL_EXPR, void_type_node, label);
+ set_expr_location_from_node (stmt, gnat_node);
+ add_stmt (stmt);
+ gnu_result = end_stmt_group ();
+ }
+ else
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Assignment_Statement:
+ /* Get the LHS and RHS of the statement and convert any reference to an
+ unconstrained array into a reference to the underlying array. */
+ gnu_lhs = maybe_unconstrained_array (gnat_to_gnu (Name (gnat_node)));
+
+ /* If the type has a size that overflows, convert this into raise of
+ Storage_Error: execution shouldn't have gotten here anyway. */
+ if (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (gnu_lhs))) == INTEGER_CST
+ && !valid_constant_size_p (TYPE_SIZE_UNIT (TREE_TYPE (gnu_lhs))))
+ gnu_result = build_call_raise (SE_Object_Too_Large, gnat_node,
+ N_Raise_Storage_Error);
+ else if (Nkind (Expression (gnat_node)) == N_Function_Call)
+ gnu_result
+ = Call_to_gnu (Expression (gnat_node), &gnu_result_type, gnu_lhs,
+ atomic_sync_required_p (Name (gnat_node)));
+ else
+ {
+ gnu_rhs
+ = maybe_unconstrained_array (gnat_to_gnu (Expression (gnat_node)));
+
+ /* If range check is needed, emit code to generate it. */
+ if (Do_Range_Check (Expression (gnat_node)))
+ gnu_rhs = emit_range_check (gnu_rhs, Etype (Name (gnat_node)),
+ gnat_node);
+
+ if (atomic_sync_required_p (Name (gnat_node)))
+ gnu_result = build_atomic_store (gnu_lhs, gnu_rhs);
+ else
+ gnu_result
+ = build_binary_op (MODIFY_EXPR, NULL_TREE, gnu_lhs, gnu_rhs);
+
+ /* If the type being assigned is an array type and the two sides are
+ not completely disjoint, play safe and use memmove. But don't do
+ it for a bit-packed array as it might not be byte-aligned. */
+ if (TREE_CODE (gnu_result) == MODIFY_EXPR
+ && Is_Array_Type (Etype (Name (gnat_node)))
+ && !Is_Bit_Packed_Array (Etype (Name (gnat_node)))
+ && !(Forwards_OK (gnat_node) && Backwards_OK (gnat_node)))
+ {
+ tree to, from, size, to_ptr, from_ptr, t;
+
+ to = TREE_OPERAND (gnu_result, 0);
+ from = TREE_OPERAND (gnu_result, 1);
+
+ size = TYPE_SIZE_UNIT (TREE_TYPE (from));
+ size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, from);
+
+ to_ptr = build_fold_addr_expr (to);
+ from_ptr = build_fold_addr_expr (from);
+
+ t = builtin_decl_implicit (BUILT_IN_MEMMOVE);
+ gnu_result = build_call_expr (t, 3, to_ptr, from_ptr, size);
+ }
+ }
+ break;
+
+ case N_If_Statement:
+ {
+ tree *gnu_else_ptr; /* Point to put next "else if" or "else". */
+
+ /* Make the outer COND_EXPR. Avoid non-determinism. */
+ gnu_result = build3 (COND_EXPR, void_type_node,
+ gnat_to_gnu (Condition (gnat_node)),
+ NULL_TREE, NULL_TREE);
+ COND_EXPR_THEN (gnu_result)
+ = build_stmt_group (Then_Statements (gnat_node), false);
+ TREE_SIDE_EFFECTS (gnu_result) = 1;
+ gnu_else_ptr = &COND_EXPR_ELSE (gnu_result);
+
+ /* Now make a COND_EXPR for each of the "else if" parts. Put each
+ into the previous "else" part and point to where to put any
+ outer "else". Also avoid non-determinism. */
+ if (Present (Elsif_Parts (gnat_node)))
+ for (gnat_temp = First (Elsif_Parts (gnat_node));
+ Present (gnat_temp); gnat_temp = Next (gnat_temp))
+ {
+ gnu_expr = build3 (COND_EXPR, void_type_node,
+ gnat_to_gnu (Condition (gnat_temp)),
+ NULL_TREE, NULL_TREE);
+ COND_EXPR_THEN (gnu_expr)
+ = build_stmt_group (Then_Statements (gnat_temp), false);
+ TREE_SIDE_EFFECTS (gnu_expr) = 1;
+ set_expr_location_from_node (gnu_expr, gnat_temp);
+ *gnu_else_ptr = gnu_expr;
+ gnu_else_ptr = &COND_EXPR_ELSE (gnu_expr);
+ }
+
+ *gnu_else_ptr = build_stmt_group (Else_Statements (gnat_node), false);
+ }
+ break;
+
+ case N_Case_Statement:
+ gnu_result = Case_Statement_to_gnu (gnat_node);
+ break;
+
+ case N_Loop_Statement:
+ gnu_result = Loop_Statement_to_gnu (gnat_node);
+ break;
+
+ case N_Block_Statement:
+ /* The only way to enter the block is to fall through to it. */
+ if (stmt_group_may_fallthru ())
+ {
+ start_stmt_group ();
+ gnat_pushlevel ();
+ process_decls (Declarations (gnat_node), Empty, Empty, true, true);
+ add_stmt (gnat_to_gnu (Handled_Statement_Sequence (gnat_node)));
+ gnat_poplevel ();
+ gnu_result = end_stmt_group ();
+ }
+ else
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Exit_Statement:
+ gnu_result
+ = build2 (EXIT_STMT, void_type_node,
+ (Present (Condition (gnat_node))
+ ? gnat_to_gnu (Condition (gnat_node)) : NULL_TREE),
+ (Present (Name (gnat_node))
+ ? get_gnu_tree (Entity (Name (gnat_node)))
+ : LOOP_STMT_LABEL (gnu_loop_stack->last ()->stmt)));
+ break;
+
+ case N_Simple_Return_Statement:
+ {
+ tree gnu_ret_obj, gnu_ret_val;
+
+ /* If the subprogram is a function, we must return the expression. */
+ if (Present (Expression (gnat_node)))
+ {
+ tree gnu_subprog_type = TREE_TYPE (current_function_decl);
+
+ /* If this function has copy-in/copy-out parameters, get the real
+ object for the return. See Subprogram_to_gnu. */
+ if (TYPE_CI_CO_LIST (gnu_subprog_type))
+ gnu_ret_obj = gnu_return_var_stack->last ();
+ else
+ gnu_ret_obj = DECL_RESULT (current_function_decl);
+
+ /* Get the GCC tree for the expression to be returned. */
+ gnu_ret_val = gnat_to_gnu (Expression (gnat_node));
+
+ /* Do not remove the padding from GNU_RET_VAL if the inner type is
+ self-referential since we want to allocate the fixed size. */
+ if (TREE_CODE (gnu_ret_val) == COMPONENT_REF
+ && TYPE_IS_PADDING_P
+ (TREE_TYPE (TREE_OPERAND (gnu_ret_val, 0)))
+ && CONTAINS_PLACEHOLDER_P
+ (TYPE_SIZE (TREE_TYPE (gnu_ret_val))))
+ gnu_ret_val = TREE_OPERAND (gnu_ret_val, 0);
+
+ /* If the function returns by direct reference, return a pointer
+ to the return value. */
+ if (TYPE_RETURN_BY_DIRECT_REF_P (gnu_subprog_type)
+ || By_Ref (gnat_node))
+ gnu_ret_val = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_ret_val);
+
+ /* Otherwise, if it returns an unconstrained array, we have to
+ allocate a new version of the result and return it. */
+ else if (TYPE_RETURN_UNCONSTRAINED_P (gnu_subprog_type))
+ {
+ gnu_ret_val = maybe_unconstrained_array (gnu_ret_val);
+
+ /* And find out whether this is a candidate for Named Return
+ Value. If so, record it. */
+ if (!TYPE_CI_CO_LIST (gnu_subprog_type) && optimize)
+ {
+ tree ret_val = gnu_ret_val;
+
+ /* Strip useless conversions around the return value. */
+ if (gnat_useless_type_conversion (ret_val))
+ ret_val = TREE_OPERAND (ret_val, 0);
+
+ /* Strip unpadding around the return value. */
+ if (TREE_CODE (ret_val) == COMPONENT_REF
+ && TYPE_IS_PADDING_P
+ (TREE_TYPE (TREE_OPERAND (ret_val, 0))))
+ ret_val = TREE_OPERAND (ret_val, 0);
+
+ /* Now apply the test to the return value. */
+ if (return_value_ok_for_nrv_p (NULL_TREE, ret_val))
+ {
+ if (!f_named_ret_val)
+ f_named_ret_val = BITMAP_GGC_ALLOC ();
+ bitmap_set_bit (f_named_ret_val, DECL_UID (ret_val));
+ if (!f_gnat_ret)
+ f_gnat_ret = gnat_node;
+ }
+ }
+
+ gnu_ret_val = build_allocator (TREE_TYPE (gnu_ret_val),
+ gnu_ret_val,
+ TREE_TYPE (gnu_ret_obj),
+ Procedure_To_Call (gnat_node),
+ Storage_Pool (gnat_node),
+ gnat_node, false);
+ }
+
+ /* Otherwise, if it returns by invisible reference, dereference
+ the pointer it is passed using the type of the return value
+ and build the copy operation manually. This ensures that we
+ don't copy too much data, for example if the return type is
+ unconstrained with a maximum size. */
+ else if (TREE_ADDRESSABLE (gnu_subprog_type))
+ {
+ tree gnu_ret_deref
+ = build_unary_op (INDIRECT_REF, TREE_TYPE (gnu_ret_val),
+ gnu_ret_obj);
+ gnu_result = build_binary_op (MODIFY_EXPR, NULL_TREE,
+ gnu_ret_deref, gnu_ret_val);
+ add_stmt_with_node (gnu_result, gnat_node);
+ gnu_ret_val = NULL_TREE;
+ }
+ }
+
+ else
+ gnu_ret_obj = gnu_ret_val = NULL_TREE;
+
+ /* If we have a return label defined, convert this into a branch to
+ that label. The return proper will be handled elsewhere. */
+ if (gnu_return_label_stack->last ())
+ {
+ if (gnu_ret_obj)
+ add_stmt (build_binary_op (MODIFY_EXPR, NULL_TREE, gnu_ret_obj,
+ gnu_ret_val));
+
+ gnu_result = build1 (GOTO_EXPR, void_type_node,
+ gnu_return_label_stack->last ());
+
+ /* When not optimizing, make sure the return is preserved. */
+ if (!optimize && Comes_From_Source (gnat_node))
+ DECL_ARTIFICIAL (gnu_return_label_stack->last ()) = 0;
+ }
+
+ /* Otherwise, build a regular return. */
+ else
+ gnu_result = build_return_expr (gnu_ret_obj, gnu_ret_val);
+ }
+ break;
+
+ case N_Goto_Statement:
+ gnu_result
+ = build1 (GOTO_EXPR, void_type_node, gnat_to_gnu (Name (gnat_node)));
+ break;
+
+ /***************************/
+ /* Chapter 6: Subprograms */
+ /***************************/
+
+ case N_Subprogram_Declaration:
+ /* Unless there is a freeze node, declare the subprogram. We consider
+ this a "definition" even though we're not generating code for
+ the subprogram because we will be making the corresponding GCC
+ node here. */
+
+ if (No (Freeze_Node (Defining_Entity (Specification (gnat_node)))))
+ gnat_to_gnu_entity (Defining_Entity (Specification (gnat_node)),
+ NULL_TREE, 1);
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Abstract_Subprogram_Declaration:
+ /* This subprogram doesn't exist for code generation purposes, but we
+ have to elaborate the types of any parameters and result, unless
+ they are imported types (nothing to generate in this case).
+
+ The parameter list may contain types with freeze nodes, e.g. not null
+ subtypes, so the subprogram itself may carry a freeze node, in which
+ case its elaboration must be deferred. */
+
+ /* Process the parameter types first. */
+ if (No (Freeze_Node (Defining_Entity (Specification (gnat_node)))))
+ for (gnat_temp
+ = First_Formal_With_Extras
+ (Defining_Entity (Specification (gnat_node)));
+ Present (gnat_temp);
+ gnat_temp = Next_Formal_With_Extras (gnat_temp))
+ if (Is_Itype (Etype (gnat_temp))
+ && !From_Limited_With (Etype (gnat_temp)))
+ gnat_to_gnu_entity (Etype (gnat_temp), NULL_TREE, 0);
+
+ /* Then the result type, set to Standard_Void_Type for procedures. */
+ {
+ Entity_Id gnat_temp_type
+ = Etype (Defining_Entity (Specification (gnat_node)));
+
+ if (Is_Itype (gnat_temp_type) && !From_Limited_With (gnat_temp_type))
+ gnat_to_gnu_entity (Etype (gnat_temp_type), NULL_TREE, 0);
+ }
+
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Defining_Program_Unit_Name:
+ /* For a child unit identifier go up a level to get the specification.
+ We get this when we try to find the spec of a child unit package
+ that is the compilation unit being compiled. */
+ gnu_result = gnat_to_gnu (Parent (gnat_node));
+ break;
+
+ case N_Subprogram_Body:
+ Subprogram_Body_to_gnu (gnat_node);
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Function_Call:
+ case N_Procedure_Call_Statement:
+ gnu_result = Call_to_gnu (gnat_node, &gnu_result_type, NULL_TREE, false);
+ break;
+
+ /************************/
+ /* Chapter 7: Packages */
+ /************************/
+
+ case N_Package_Declaration:
+ gnu_result = gnat_to_gnu (Specification (gnat_node));
+ break;
+
+ case N_Package_Specification:
+
+ start_stmt_group ();
+ process_decls (Visible_Declarations (gnat_node),
+ Private_Declarations (gnat_node), Empty, true, true);
+ gnu_result = end_stmt_group ();
+ break;
+
+ case N_Package_Body:
+
+ /* If this is the body of a generic package - do nothing. */
+ if (Ekind (Corresponding_Spec (gnat_node)) == E_Generic_Package)
+ {
+ gnu_result = alloc_stmt_list ();
+ break;
+ }
+
+ start_stmt_group ();
+ process_decls (Declarations (gnat_node), Empty, Empty, true, true);
+
+ if (Present (Handled_Statement_Sequence (gnat_node)))
+ add_stmt (gnat_to_gnu (Handled_Statement_Sequence (gnat_node)));
+
+ gnu_result = end_stmt_group ();
+ break;
+
+ /********************************/
+ /* Chapter 8: Visibility Rules */
+ /********************************/
+
+ case N_Use_Package_Clause:
+ case N_Use_Type_Clause:
+ /* Nothing to do here - but these may appear in list of declarations. */
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ /*********************/
+ /* Chapter 9: Tasks */
+ /*********************/
+
+ case N_Protected_Type_Declaration:
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Single_Task_Declaration:
+ gnat_to_gnu_entity (Defining_Entity (gnat_node), NULL_TREE, 1);
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ /*********************************************************/
+ /* Chapter 10: Program Structure and Compilation Issues */
+ /*********************************************************/
+
+ case N_Compilation_Unit:
+ /* This is not called for the main unit on which gigi is invoked. */
+ Compilation_Unit_to_gnu (gnat_node);
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Subprogram_Body_Stub:
+ case N_Package_Body_Stub:
+ case N_Protected_Body_Stub:
+ case N_Task_Body_Stub:
+ /* Simply process whatever unit is being inserted. */
+ if (Present (Library_Unit (gnat_node)))
+ gnu_result = gnat_to_gnu (Unit (Library_Unit (gnat_node)));
+ else
+ {
+ gcc_assert (type_annotate_only);
+ gnu_result = alloc_stmt_list ();
+ }
+ break;
+
+ case N_Subunit:
+ gnu_result = gnat_to_gnu (Proper_Body (gnat_node));
+ break;
+
+ /***************************/
+ /* Chapter 11: Exceptions */
+ /***************************/
+
+ case N_Handled_Sequence_Of_Statements:
+ /* If there is an At_End procedure attached to this node, and the EH
+ mechanism is SJLJ, we must have at least a corresponding At_End
+ handler, unless the No_Exception_Handlers restriction is set. */
+ gcc_assert (type_annotate_only
+ || Exception_Mechanism != Setjmp_Longjmp
+ || No (At_End_Proc (gnat_node))
+ || Present (Exception_Handlers (gnat_node))
+ || No_Exception_Handlers_Set ());
+
+ gnu_result = Handled_Sequence_Of_Statements_to_gnu (gnat_node);
+ break;
+
+ case N_Exception_Handler:
+ if (Exception_Mechanism == Setjmp_Longjmp)
+ gnu_result = Exception_Handler_to_gnu_sjlj (gnat_node);
+ else if (Exception_Mechanism == Back_End_Exceptions)
+ gnu_result = Exception_Handler_to_gnu_zcx (gnat_node);
+ else
+ gcc_unreachable ();
+ break;
+
+ case N_Raise_Statement:
+ /* Only for reraise in back-end exceptions mode. */
+ gcc_assert (No (Name (gnat_node))
+ && Exception_Mechanism == Back_End_Exceptions);
+
+ start_stmt_group ();
+ gnat_pushlevel ();
+
+ /* Clear the current exception pointer so that the occurrence won't be
+ deallocated. */
+ gnu_expr = create_var_decl (get_identifier ("SAVED_EXPTR"), NULL_TREE,
+ ptr_type_node, gnu_incoming_exc_ptr,
+ false, false, false, false, NULL, gnat_node);
+
+ add_stmt (build_binary_op (MODIFY_EXPR, NULL_TREE, gnu_incoming_exc_ptr,
+ convert (ptr_type_node, integer_zero_node)));
+ add_stmt (build_call_n_expr (reraise_zcx_decl, 1, gnu_expr));
+ gnat_poplevel ();
+ gnu_result = end_stmt_group ();
+ break;
+
+ case N_Push_Constraint_Error_Label:
+ push_exception_label_stack (&gnu_constraint_error_label_stack,
+ Exception_Label (gnat_node));
+ break;
+
+ case N_Push_Storage_Error_Label:
+ push_exception_label_stack (&gnu_storage_error_label_stack,
+ Exception_Label (gnat_node));
+ break;
+
+ case N_Push_Program_Error_Label:
+ push_exception_label_stack (&gnu_program_error_label_stack,
+ Exception_Label (gnat_node));
+ break;
+
+ case N_Pop_Constraint_Error_Label:
+ gnu_constraint_error_label_stack->pop ();
+ break;
+
+ case N_Pop_Storage_Error_Label:
+ gnu_storage_error_label_stack->pop ();
+ break;
+
+ case N_Pop_Program_Error_Label:
+ gnu_program_error_label_stack->pop ();
+ break;
+
+ /******************************/
+ /* Chapter 12: Generic Units */
+ /******************************/
+
+ case N_Generic_Function_Renaming_Declaration:
+ case N_Generic_Package_Renaming_Declaration:
+ case N_Generic_Procedure_Renaming_Declaration:
+ case N_Generic_Package_Declaration:
+ case N_Generic_Subprogram_Declaration:
+ case N_Package_Instantiation:
+ case N_Procedure_Instantiation:
+ case N_Function_Instantiation:
+ /* These nodes can appear on a declaration list but there is nothing to
+ to be done with them. */
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ /**************************************************/
+ /* Chapter 13: Representation Clauses and */
+ /* Implementation-Dependent Features */
+ /**************************************************/
+
+ case N_Attribute_Definition_Clause:
+ gnu_result = alloc_stmt_list ();
+
+ /* The only one we need to deal with is 'Address since, for the others,
+ the front-end puts the information elsewhere. */
+ if (Get_Attribute_Id (Chars (gnat_node)) != Attr_Address)
+ break;
+
+ /* And we only deal with 'Address if the object has a Freeze node. */
+ gnat_temp = Entity (Name (gnat_node));
+ if (No (Freeze_Node (gnat_temp)))
+ break;
+
+ /* Get the value to use as the address and save it as the equivalent
+ for the object. When it is frozen, gnat_to_gnu_entity will do the
+ right thing. */
+ save_gnu_tree (gnat_temp, gnat_to_gnu (Expression (gnat_node)), true);
+ break;
+
+ case N_Enumeration_Representation_Clause:
+ case N_Record_Representation_Clause:
+ case N_At_Clause:
+ /* We do nothing with these. SEM puts the information elsewhere. */
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Code_Statement:
+ if (!type_annotate_only)
+ {
+ tree gnu_template = gnat_to_gnu (Asm_Template (gnat_node));
+ tree gnu_inputs = NULL_TREE, gnu_outputs = NULL_TREE;
+ tree gnu_clobbers = NULL_TREE, tail;
+ bool allows_mem, allows_reg, fake;
+ int ninputs, noutputs, i;
+ const char **oconstraints;
+ const char *constraint;
+ char *clobber;
+
+ /* First retrieve the 3 operand lists built by the front-end. */
+ Setup_Asm_Outputs (gnat_node);
+ while (Present (gnat_temp = Asm_Output_Variable ()))
+ {
+ tree gnu_value = gnat_to_gnu (gnat_temp);
+ tree gnu_constr = build_tree_list (NULL_TREE, gnat_to_gnu
+ (Asm_Output_Constraint ()));
+
+ gnu_outputs = tree_cons (gnu_constr, gnu_value, gnu_outputs);
+ Next_Asm_Output ();
+ }
+
+ Setup_Asm_Inputs (gnat_node);
+ while (Present (gnat_temp = Asm_Input_Value ()))
+ {
+ tree gnu_value = gnat_to_gnu (gnat_temp);
+ tree gnu_constr = build_tree_list (NULL_TREE, gnat_to_gnu
+ (Asm_Input_Constraint ()));
+
+ gnu_inputs = tree_cons (gnu_constr, gnu_value, gnu_inputs);
+ Next_Asm_Input ();
+ }
+
+ Clobber_Setup (gnat_node);
+ while ((clobber = Clobber_Get_Next ()))
+ gnu_clobbers
+ = tree_cons (NULL_TREE,
+ build_string (strlen (clobber) + 1, clobber),
+ gnu_clobbers);
+
+ /* Then perform some standard checking and processing on the
+ operands. In particular, mark them addressable if needed. */
+ gnu_outputs = nreverse (gnu_outputs);
+ noutputs = list_length (gnu_outputs);
+ gnu_inputs = nreverse (gnu_inputs);
+ ninputs = list_length (gnu_inputs);
+ oconstraints = XALLOCAVEC (const char *, noutputs);
+
+ for (i = 0, tail = gnu_outputs; tail; ++i, tail = TREE_CHAIN (tail))
+ {
+ tree output = TREE_VALUE (tail);
+ constraint
+ = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail)));
+ oconstraints[i] = constraint;
+
+ if (parse_output_constraint (&constraint, i, ninputs, noutputs,
+ &allows_mem, &allows_reg, &fake))
+ {
+ /* If the operand is going to end up in memory,
+ mark it addressable. Note that we don't test
+ allows_mem like in the input case below; this
+ is modelled on the C front-end. */
+ if (!allows_reg)
+ {
+ output = remove_conversions (output, false);
+ if (TREE_CODE (output) == CONST_DECL
+ && DECL_CONST_CORRESPONDING_VAR (output))
+ output = DECL_CONST_CORRESPONDING_VAR (output);
+ if (!gnat_mark_addressable (output))
+ output = error_mark_node;
+ }
+ }
+ else
+ output = error_mark_node;
+
+ TREE_VALUE (tail) = output;
+ }
+
+ for (i = 0, tail = gnu_inputs; tail; ++i, tail = TREE_CHAIN (tail))
+ {
+ tree input = TREE_VALUE (tail);
+ constraint
+ = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail)));
+
+ if (parse_input_constraint (&constraint, i, ninputs, noutputs,
+ 0, oconstraints,
+ &allows_mem, &allows_reg))
+ {
+ /* If the operand is going to end up in memory,
+ mark it addressable. */
+ if (!allows_reg && allows_mem)
+ {
+ input = remove_conversions (input, false);
+ if (TREE_CODE (input) == CONST_DECL
+ && DECL_CONST_CORRESPONDING_VAR (input))
+ input = DECL_CONST_CORRESPONDING_VAR (input);
+ if (!gnat_mark_addressable (input))
+ input = error_mark_node;
+ }
+ }
+ else
+ input = error_mark_node;
+
+ TREE_VALUE (tail) = input;
+ }
+
+ gnu_result = build5 (ASM_EXPR, void_type_node,
+ gnu_template, gnu_outputs,
+ gnu_inputs, gnu_clobbers, NULL_TREE);
+ ASM_VOLATILE_P (gnu_result) = Is_Asm_Volatile (gnat_node);
+ }
+ else
+ gnu_result = alloc_stmt_list ();
+
+ break;
+
+ /****************/
+ /* Added Nodes */
+ /****************/
+
+ case N_Expression_With_Actions:
+ /* This construct doesn't define a scope so we don't push a binding level
+ around the statement list; but we wrap it in a SAVE_EXPR to protect it
+ from unsharing. */
+ gnu_result = build_stmt_group (Actions (gnat_node), false);
+ gnu_result = build1 (SAVE_EXPR, void_type_node, gnu_result);
+ TREE_SIDE_EFFECTS (gnu_result) = 1;
+ gnu_expr = gnat_to_gnu (Expression (gnat_node));
+ gnu_result
+ = build_compound_expr (TREE_TYPE (gnu_expr), gnu_result, gnu_expr);
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ break;
+
+ case N_Freeze_Entity:
+ start_stmt_group ();
+ process_freeze_entity (gnat_node);
+ process_decls (Actions (gnat_node), Empty, Empty, true, true);
+ gnu_result = end_stmt_group ();
+ break;
+
+ case N_Freeze_Generic_Entity:
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Itype_Reference:
+ if (!present_gnu_tree (Itype (gnat_node)))
+ process_type (Itype (gnat_node));
+
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Free_Statement:
+ if (!type_annotate_only)
+ {
+ tree gnu_ptr = gnat_to_gnu (Expression (gnat_node));
+ tree gnu_ptr_type = TREE_TYPE (gnu_ptr);
+ tree gnu_obj_type, gnu_actual_obj_type;
+
+ /* If this is a thin pointer, we must first dereference it to create
+ a fat pointer, then go back below to a thin pointer. The reason
+ for this is that we need to have a fat pointer someplace in order
+ to properly compute the size. */
+ if (TYPE_IS_THIN_POINTER_P (TREE_TYPE (gnu_ptr)))
+ gnu_ptr = build_unary_op (ADDR_EXPR, NULL_TREE,
+ build_unary_op (INDIRECT_REF, NULL_TREE,
+ gnu_ptr));
+
+ /* If this is a fat pointer, the object must have been allocated with
+ the template in front of the array. So pass the template address,
+ and get the total size; do it by converting to a thin pointer. */
+ if (TYPE_IS_FAT_POINTER_P (TREE_TYPE (gnu_ptr)))
+ gnu_ptr
+ = convert (build_pointer_type
+ (TYPE_OBJECT_RECORD_TYPE
+ (TYPE_UNCONSTRAINED_ARRAY (TREE_TYPE (gnu_ptr)))),
+ gnu_ptr);
+
+ gnu_obj_type = TREE_TYPE (TREE_TYPE (gnu_ptr));
+
+ /* If this is a thin pointer, the object must have been allocated with
+ the template in front of the array. So pass the template address,
+ and get the total size. */
+ if (TYPE_IS_THIN_POINTER_P (TREE_TYPE (gnu_ptr)))
+ gnu_ptr
+ = build_binary_op (POINTER_PLUS_EXPR, TREE_TYPE (gnu_ptr),
+ gnu_ptr,
+ fold_build1 (NEGATE_EXPR, sizetype,
+ byte_position
+ (DECL_CHAIN
+ TYPE_FIELDS ((gnu_obj_type)))));
+
+ /* If we have a special dynamic constrained subtype on the node, use
+ it to compute the size; otherwise, use the designated subtype. */
+ if (Present (Actual_Designated_Subtype (gnat_node)))
+ {
+ gnu_actual_obj_type
+ = gnat_to_gnu_type (Actual_Designated_Subtype (gnat_node));
+
+ if (TYPE_IS_FAT_OR_THIN_POINTER_P (gnu_ptr_type))
+ gnu_actual_obj_type
+ = build_unc_object_type_from_ptr (gnu_ptr_type,
+ gnu_actual_obj_type,
+ get_identifier ("DEALLOC"),
+ false);
+ }
+ else
+ gnu_actual_obj_type = gnu_obj_type;
+
+ gnu_result
+ = build_call_alloc_dealloc (gnu_ptr,
+ TYPE_SIZE_UNIT (gnu_actual_obj_type),
+ gnu_obj_type,
+ Procedure_To_Call (gnat_node),
+ Storage_Pool (gnat_node),
+ gnat_node);
+ }
+ break;
+
+ case N_Raise_Constraint_Error:
+ case N_Raise_Program_Error:
+ case N_Raise_Storage_Error:
+ if (type_annotate_only)
+ gnu_result = alloc_stmt_list ();
+ else
+ gnu_result = Raise_Error_to_gnu (gnat_node, &gnu_result_type);
+ break;
+
+ case N_Validate_Unchecked_Conversion:
+ /* The only validation we currently do on an unchecked conversion is
+ that of aliasing assumptions. */
+ if (flag_strict_aliasing)
+ gnat_validate_uc_list.safe_push (gnat_node);
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ case N_Function_Specification:
+ case N_Procedure_Specification:
+ case N_Op_Concat:
+ case N_Component_Association:
+ case N_Protected_Body:
+ case N_Task_Body:
+ /* These nodes should only be present when annotating types. */
+ gcc_assert (type_annotate_only);
+ gnu_result = alloc_stmt_list ();
+ break;
+
+ default:
+ /* Other nodes are not supposed to reach here. */
+ gcc_unreachable ();
+ }
+
+ /* If we pushed the processing of the elaboration routine, pop it back. */
+ if (went_into_elab_proc)
+ current_function_decl = NULL_TREE;
+
+ /* When not optimizing, turn boolean rvalues B into B != false tests
+ so that the code just below can put the location information of the
+ reference to B on the inequality operator for better debug info. */
+ if (!optimize
+ && TREE_CODE (gnu_result) != INTEGER_CST
+ && (kind == N_Identifier
+ || kind == N_Expanded_Name
+ || kind == N_Explicit_Dereference
+ || kind == N_Function_Call
+ || kind == N_Indexed_Component
+ || kind == N_Selected_Component)
+ && TREE_CODE (get_base_type (gnu_result_type)) == BOOLEAN_TYPE
+ && !lvalue_required_p (gnat_node, gnu_result_type, false, false, false))
+ gnu_result = build_binary_op (NE_EXPR, gnu_result_type,
+ convert (gnu_result_type, gnu_result),
+ convert (gnu_result_type,
+ boolean_false_node));
+
+ /* Set the location information on the result. Note that we may have
+ no result if we tried to build a CALL_EXPR node to a procedure with
+ no side-effects and optimization is enabled. */
+ if (gnu_result && EXPR_P (gnu_result))
+ set_gnu_expr_location_from_node (gnu_result, gnat_node);
+
+ /* If we're supposed to return something of void_type, it means we have
+ something we're elaborating for effect, so just return. */
+ if (TREE_CODE (gnu_result_type) == VOID_TYPE)
+ return gnu_result;
+
+ /* If the result is a constant that overflowed, raise Constraint_Error. */
+ if (TREE_CODE (gnu_result) == INTEGER_CST && TREE_OVERFLOW (gnu_result))
+ {
+ post_error ("?`Constraint_Error` will be raised at run time", gnat_node);
+ gnu_result
+ = build1 (NULL_EXPR, gnu_result_type,
+ build_call_raise (CE_Overflow_Check_Failed, gnat_node,
+ N_Raise_Constraint_Error));
+ }
+
+ /* If the result has side-effects and is of an unconstrained type, make a
+ SAVE_EXPR so that we can be sure it will only be referenced once. But
+ this is useless for a call to a function that returns an unconstrained
+ type with default discriminant, as we cannot compute the size of the
+ actual returned object. We must do this before any conversions. */
+ if (TREE_SIDE_EFFECTS (gnu_result)
+ && !(TREE_CODE (gnu_result) == CALL_EXPR
+ && TYPE_IS_PADDING_P (TREE_TYPE (gnu_result)))
+ && (TREE_CODE (gnu_result_type) == UNCONSTRAINED_ARRAY_TYPE
+ || CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_result_type))))
+ gnu_result = gnat_stabilize_reference (gnu_result, false, NULL);
+
+ /* Now convert the result to the result type, unless we are in one of the
+ following cases:
+
+ 1. If this is the LHS of an assignment or an actual parameter of a
+ call, return the result almost unmodified since the RHS will have
+ to be converted to our type in that case, unless the result type
+ has a simpler size. Likewise if there is just a no-op unchecked
+ conversion in-between. Similarly, don't convert integral types
+ that are the operands of an unchecked conversion since we need
+ to ignore those conversions (for 'Valid).
+
+ 2. If we have a label (which doesn't have any well-defined type), a
+ field or an error, return the result almost unmodified. Similarly,
+ if the two types are record types with the same name, don't convert.
+ This will be the case when we are converting from a packable version
+ of a type to its original type and we need those conversions to be
+ NOPs in order for assignments into these types to work properly.
+
+ 3. If the type is void or if we have no result, return error_mark_node
+ to show we have no result.
+
+ 4. If this a call to a function that returns an unconstrained type with
+ default discriminant, return the call expression unmodified since we
+ cannot compute the size of the actual returned object.
+
+ 5. Finally, if the type of the result is already correct. */
+
+ if (Present (Parent (gnat_node))
+ && (lhs_or_actual_p (gnat_node)
+ || (Nkind (Parent (gnat_node)) == N_Unchecked_Type_Conversion
+ && unchecked_conversion_nop (Parent (gnat_node)))
+ || (Nkind (Parent (gnat_node)) == N_Unchecked_Type_Conversion
+ && !AGGREGATE_TYPE_P (gnu_result_type)
+ && !AGGREGATE_TYPE_P (TREE_TYPE (gnu_result))))
+ && !(TYPE_SIZE (gnu_result_type)
+ && TYPE_SIZE (TREE_TYPE (gnu_result))
+ && (AGGREGATE_TYPE_P (gnu_result_type)
+ == AGGREGATE_TYPE_P (TREE_TYPE (gnu_result)))
+ && ((TREE_CODE (TYPE_SIZE (gnu_result_type)) == INTEGER_CST
+ && (TREE_CODE (TYPE_SIZE (TREE_TYPE (gnu_result)))
+ != INTEGER_CST))
+ || (TREE_CODE (TYPE_SIZE (gnu_result_type)) != INTEGER_CST
+ && !CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_result_type))
+ && (CONTAINS_PLACEHOLDER_P
+ (TYPE_SIZE (TREE_TYPE (gnu_result))))))
+ && !(TREE_CODE (gnu_result_type) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (gnu_result_type))))
+ {
+ /* Remove padding only if the inner object is of self-referential
+ size: in that case it must be an object of unconstrained type
+ with a default discriminant and we want to avoid copying too
+ much data. */
+ if (TYPE_IS_PADDING_P (TREE_TYPE (gnu_result))
+ && CONTAINS_PLACEHOLDER_P (TYPE_SIZE (TREE_TYPE (TYPE_FIELDS
+ (TREE_TYPE (gnu_result))))))
+ gnu_result = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_result))),
+ gnu_result);
+ }
+
+ else if (TREE_CODE (gnu_result) == LABEL_DECL
+ || TREE_CODE (gnu_result) == FIELD_DECL
+ || TREE_CODE (gnu_result) == ERROR_MARK
+ || (TYPE_NAME (gnu_result_type)
+ == TYPE_NAME (TREE_TYPE (gnu_result))
+ && TREE_CODE (gnu_result_type) == RECORD_TYPE
+ && TREE_CODE (TREE_TYPE (gnu_result)) == RECORD_TYPE))
+ {
+ /* Remove any padding. */
+ if (TYPE_IS_PADDING_P (TREE_TYPE (gnu_result)))
+ gnu_result = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_result))),
+ gnu_result);
+ }
+
+ else if (gnu_result == error_mark_node || gnu_result_type == void_type_node)
+ gnu_result = error_mark_node;
+
+ else if (TREE_CODE (gnu_result) == CALL_EXPR
+ && TYPE_IS_PADDING_P (TREE_TYPE (gnu_result))
+ && TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_result)))
+ == gnu_result_type
+ && CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_result_type)))
+ ;
+
+ else if (TREE_TYPE (gnu_result) != gnu_result_type)
+ gnu_result = convert (gnu_result_type, gnu_result);
+
+ /* We don't need any NOP_EXPR or NON_LVALUE_EXPR on the result. */
+ while ((TREE_CODE (gnu_result) == NOP_EXPR
+ || TREE_CODE (gnu_result) == NON_LVALUE_EXPR)
+ && TREE_TYPE (TREE_OPERAND (gnu_result, 0)) == TREE_TYPE (gnu_result))
+ gnu_result = TREE_OPERAND (gnu_result, 0);
+
+ return gnu_result;
+}
+
+/* Subroutine of above to push the exception label stack. GNU_STACK is
+ a pointer to the stack to update and GNAT_LABEL, if present, is the
+ label to push onto the stack. */
+
+static void
+push_exception_label_stack (vec<tree, va_gc> **gnu_stack, Entity_Id gnat_label)
+{
+ tree gnu_label = (Present (gnat_label)
+ ? gnat_to_gnu_entity (gnat_label, NULL_TREE, 0)
+ : NULL_TREE);
+
+ vec_safe_push (*gnu_stack, gnu_label);
+}
+
+/* Record the current code position in GNAT_NODE. */
+
+static void
+record_code_position (Node_Id gnat_node)
+{
+ tree stmt_stmt = build1 (STMT_STMT, void_type_node, NULL_TREE);
+
+ add_stmt_with_node (stmt_stmt, gnat_node);
+ save_gnu_tree (gnat_node, stmt_stmt, true);
+}
+
+/* Insert the code for GNAT_NODE at the position saved for that node. */
+
+static void
+insert_code_for (Node_Id gnat_node)
+{
+ STMT_STMT_STMT (get_gnu_tree (gnat_node)) = gnat_to_gnu (gnat_node);
+ save_gnu_tree (gnat_node, NULL_TREE, true);
+}
+
+/* Start a new statement group chained to the previous group. */
+
+void
+start_stmt_group (void)
+{
+ struct stmt_group *group = stmt_group_free_list;
+
+ /* First see if we can get one from the free list. */
+ if (group)
+ stmt_group_free_list = group->previous;
+ else
+ group = ggc_alloc_stmt_group ();
+
+ group->previous = current_stmt_group;
+ group->stmt_list = group->block = group->cleanups = NULL_TREE;
+ current_stmt_group = group;
+}
+
+/* Add GNU_STMT to the current statement group. If it is an expression with
+ no effects, it is ignored. */
+
+void
+add_stmt (tree gnu_stmt)
+{
+ append_to_statement_list (gnu_stmt, &current_stmt_group->stmt_list);
+}
+
+/* Similar, but the statement is always added, regardless of side-effects. */
+
+void
+add_stmt_force (tree gnu_stmt)
+{
+ append_to_statement_list_force (gnu_stmt, &current_stmt_group->stmt_list);
+}
+
+/* Like add_stmt, but set the location of GNU_STMT to that of GNAT_NODE. */
+
+void
+add_stmt_with_node (tree gnu_stmt, Node_Id gnat_node)
+{
+ if (Present (gnat_node))
+ set_expr_location_from_node (gnu_stmt, gnat_node);
+ add_stmt (gnu_stmt);
+}
+
+/* Similar, but the statement is always added, regardless of side-effects. */
+
+void
+add_stmt_with_node_force (tree gnu_stmt, Node_Id gnat_node)
+{
+ if (Present (gnat_node))
+ set_expr_location_from_node (gnu_stmt, gnat_node);
+ add_stmt_force (gnu_stmt);
+}
+
+/* Add a declaration statement for GNU_DECL to the current statement group.
+ Get SLOC from Entity_Id. */
+
+void
+add_decl_expr (tree gnu_decl, Entity_Id gnat_entity)
+{
+ tree type = TREE_TYPE (gnu_decl);
+ tree gnu_stmt, gnu_init, t;
+
+ /* If this is a variable that Gigi is to ignore, we may have been given
+ an ERROR_MARK. So test for it. We also might have been given a
+ reference for a renaming. So only do something for a decl. Also
+ ignore a TYPE_DECL for an UNCONSTRAINED_ARRAY_TYPE. */
+ if (!DECL_P (gnu_decl)
+ || (TREE_CODE (gnu_decl) == TYPE_DECL
+ && TREE_CODE (type) == UNCONSTRAINED_ARRAY_TYPE))
+ return;
+
+ gnu_stmt = build1 (DECL_EXPR, void_type_node, gnu_decl);
+
+ /* If we are external or global, we don't want to output the DECL_EXPR for
+ this DECL node since we already have evaluated the expressions in the
+ sizes and positions as globals and doing it again would be wrong. */
+ if (DECL_EXTERNAL (gnu_decl) || global_bindings_p ())
+ {
+ /* Mark everything as used to prevent node sharing with subprograms.
+ Note that walk_tree knows how to deal with TYPE_DECL, but neither
+ VAR_DECL nor CONST_DECL. This appears to be somewhat arbitrary. */
+ MARK_VISITED (gnu_stmt);
+ if (TREE_CODE (gnu_decl) == VAR_DECL
+ || TREE_CODE (gnu_decl) == CONST_DECL)
+ {
+ MARK_VISITED (DECL_SIZE (gnu_decl));
+ MARK_VISITED (DECL_SIZE_UNIT (gnu_decl));
+ MARK_VISITED (DECL_INITIAL (gnu_decl));
+ }
+ /* In any case, we have to deal with our own TYPE_ADA_SIZE field. */
+ else if (TREE_CODE (gnu_decl) == TYPE_DECL
+ && RECORD_OR_UNION_TYPE_P (type)
+ && !TYPE_FAT_POINTER_P (type))
+ MARK_VISITED (TYPE_ADA_SIZE (type));
+ }
+ else
+ add_stmt_with_node (gnu_stmt, gnat_entity);
+
+ /* If this is a variable and an initializer is attached to it, it must be
+ valid for the context. Similar to init_const in create_var_decl_1. */
+ if (TREE_CODE (gnu_decl) == VAR_DECL
+ && (gnu_init = DECL_INITIAL (gnu_decl)) != NULL_TREE
+ && (!gnat_types_compatible_p (type, TREE_TYPE (gnu_init))
+ || (TREE_STATIC (gnu_decl)
+ && !initializer_constant_valid_p (gnu_init,
+ TREE_TYPE (gnu_init)))))
+ {
+ /* If GNU_DECL has a padded type, convert it to the unpadded
+ type so the assignment is done properly. */
+ if (TYPE_IS_PADDING_P (type))
+ t = convert (TREE_TYPE (TYPE_FIELDS (type)), gnu_decl);
+ else
+ t = gnu_decl;
+
+ gnu_stmt = build_binary_op (INIT_EXPR, NULL_TREE, t, gnu_init);
+
+ DECL_INITIAL (gnu_decl) = NULL_TREE;
+ if (TREE_READONLY (gnu_decl))
+ {
+ TREE_READONLY (gnu_decl) = 0;
+ DECL_READONLY_ONCE_ELAB (gnu_decl) = 1;
+ }
+
+ add_stmt_with_node (gnu_stmt, gnat_entity);
+ }
+}
+
+/* Callback for walk_tree to mark the visited trees rooted at *TP. */
+
+static tree
+mark_visited_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
+{
+ tree t = *tp;
+
+ if (TREE_VISITED (t))
+ *walk_subtrees = 0;
+
+ /* Don't mark a dummy type as visited because we want to mark its sizes
+ and fields once it's filled in. */
+ else if (!TYPE_IS_DUMMY_P (t))
+ TREE_VISITED (t) = 1;
+
+ if (TYPE_P (t))
+ TYPE_SIZES_GIMPLIFIED (t) = 1;
+
+ return NULL_TREE;
+}
+
+/* Mark nodes rooted at T with TREE_VISITED and types as having their
+ sized gimplified. We use this to indicate all variable sizes and
+ positions in global types may not be shared by any subprogram. */
+
+void
+mark_visited (tree t)
+{
+ walk_tree (&t, mark_visited_r, NULL, NULL);
+}
+
+/* Add GNU_CLEANUP, a cleanup action, to the current code group and
+ set its location to that of GNAT_NODE if present, but with column info
+ cleared so that conditional branches generated as part of the cleanup
+ code do not interfere with coverage analysis tools. */
+
+static void
+add_cleanup (tree gnu_cleanup, Node_Id gnat_node)
+{
+ if (Present (gnat_node))
+ set_expr_location_from_node1 (gnu_cleanup, gnat_node, true);
+ append_to_statement_list (gnu_cleanup, &current_stmt_group->cleanups);
+}
+
+/* Set the BLOCK node corresponding to the current code group to GNU_BLOCK. */
+
+void
+set_block_for_group (tree gnu_block)
+{
+ gcc_assert (!current_stmt_group->block);
+ current_stmt_group->block = gnu_block;
+}
+
+/* Return code corresponding to the current code group. It is normally
+ a STATEMENT_LIST, but may also be a BIND_EXPR or TRY_FINALLY_EXPR if
+ BLOCK or cleanups were set. */
+
+tree
+end_stmt_group (void)
+{
+ struct stmt_group *group = current_stmt_group;
+ tree gnu_retval = group->stmt_list;
+
+ /* If this is a null list, allocate a new STATEMENT_LIST. Then, if there
+ are cleanups, make a TRY_FINALLY_EXPR. Last, if there is a BLOCK,
+ make a BIND_EXPR. Note that we nest in that because the cleanup may
+ reference variables in the block. */
+ if (gnu_retval == NULL_TREE)
+ gnu_retval = alloc_stmt_list ();
+
+ if (group->cleanups)
+ gnu_retval = build2 (TRY_FINALLY_EXPR, void_type_node, gnu_retval,
+ group->cleanups);
+
+ if (current_stmt_group->block)
+ gnu_retval = build3 (BIND_EXPR, void_type_node, BLOCK_VARS (group->block),
+ gnu_retval, group->block);
+
+ /* Remove this group from the stack and add it to the free list. */
+ current_stmt_group = group->previous;
+ group->previous = stmt_group_free_list;
+ stmt_group_free_list = group;
+
+ return gnu_retval;
+}
+
+/* Return whether the current statement group may fall through. */
+
+static inline bool
+stmt_group_may_fallthru (void)
+{
+ if (current_stmt_group->stmt_list)
+ return block_may_fallthru (current_stmt_group->stmt_list);
+ else
+ return true;
+}
+
+/* Add a list of statements from GNAT_LIST, a possibly-empty list of
+ statements.*/
+
+static void
+add_stmt_list (List_Id gnat_list)
+{
+ Node_Id gnat_node;
+
+ if (Present (gnat_list))
+ for (gnat_node = First (gnat_list); Present (gnat_node);
+ gnat_node = Next (gnat_node))
+ add_stmt (gnat_to_gnu (gnat_node));
+}
+
+/* Build a tree from GNAT_LIST, a possibly-empty list of statements.
+ If BINDING_P is true, push and pop a binding level around the list. */
+
+static tree
+build_stmt_group (List_Id gnat_list, bool binding_p)
+{
+ start_stmt_group ();
+ if (binding_p)
+ gnat_pushlevel ();
+
+ add_stmt_list (gnat_list);
+ if (binding_p)
+ gnat_poplevel ();
+
+ return end_stmt_group ();
+}
+
+/* Generate GIMPLE in place for the expression at *EXPR_P. */
+
+int
+gnat_gimplify_expr (tree *expr_p, gimple_seq *pre_p,
+ gimple_seq *post_p ATTRIBUTE_UNUSED)
+{
+ tree expr = *expr_p;
+ tree op;
+
+ if (IS_ADA_STMT (expr))
+ return gnat_gimplify_stmt (expr_p);
+
+ switch (TREE_CODE (expr))
+ {
+ case NULL_EXPR:
+ /* If this is for a scalar, just make a VAR_DECL for it. If for
+ an aggregate, get a null pointer of the appropriate type and
+ dereference it. */
+ if (AGGREGATE_TYPE_P (TREE_TYPE (expr)))
+ *expr_p = build1 (INDIRECT_REF, TREE_TYPE (expr),
+ convert (build_pointer_type (TREE_TYPE (expr)),
+ integer_zero_node));
+ else
+ {
+ *expr_p = create_tmp_var (TREE_TYPE (expr), NULL);
+ TREE_NO_WARNING (*expr_p) = 1;
+ }
+
+ gimplify_and_add (TREE_OPERAND (expr, 0), pre_p);
+ return GS_OK;
+
+ case UNCONSTRAINED_ARRAY_REF:
+ /* We should only do this if we are just elaborating for side-effects,
+ but we can't know that yet. */
+ *expr_p = TREE_OPERAND (*expr_p, 0);
+ return GS_OK;
+
+ case ADDR_EXPR:
+ op = TREE_OPERAND (expr, 0);
+
+ /* If we are taking the address of a constant CONSTRUCTOR, make sure it
+ is put into static memory. We know that it's going to be read-only
+ given the semantics we have and it must be in static memory when the
+ reference is in an elaboration procedure. */
+ if (TREE_CODE (op) == CONSTRUCTOR && TREE_CONSTANT (op))
+ {
+ tree addr = build_fold_addr_expr (tree_output_constant_def (op));
+ *expr_p = fold_convert (TREE_TYPE (expr), addr);
+ return GS_ALL_DONE;
+ }
+
+ return GS_UNHANDLED;
+
+ case VIEW_CONVERT_EXPR:
+ op = TREE_OPERAND (expr, 0);
+
+ /* If we are view-converting a CONSTRUCTOR or a call from an aggregate
+ type to a scalar one, explicitly create the local temporary. That's
+ required if the type is passed by reference. */
+ if ((TREE_CODE (op) == CONSTRUCTOR || TREE_CODE (op) == CALL_EXPR)
+ && AGGREGATE_TYPE_P (TREE_TYPE (op))
+ && !AGGREGATE_TYPE_P (TREE_TYPE (expr)))
+ {
+ tree mod, new_var = create_tmp_var_raw (TREE_TYPE (op), "C");
+ gimple_add_tmp_var (new_var);
+
+ mod = build2 (INIT_EXPR, TREE_TYPE (new_var), new_var, op);
+ gimplify_and_add (mod, pre_p);
+
+ TREE_OPERAND (expr, 0) = new_var;
+ return GS_OK;
+ }
+
+ return GS_UNHANDLED;
+
+ case DECL_EXPR:
+ op = DECL_EXPR_DECL (expr);
+
+ /* The expressions for the RM bounds must be gimplified to ensure that
+ they are properly elaborated. See gimplify_decl_expr. */
+ if ((TREE_CODE (op) == TYPE_DECL || TREE_CODE (op) == VAR_DECL)
+ && !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (op)))
+ switch (TREE_CODE (TREE_TYPE (op)))
+ {
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ case REAL_TYPE:
+ {
+ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (op)), t, val;
+
+ val = TYPE_RM_MIN_VALUE (type);
+ if (val)
+ {
+ gimplify_one_sizepos (&val, pre_p);
+ for (t = type; t; t = TYPE_NEXT_VARIANT (t))
+ SET_TYPE_RM_MIN_VALUE (t, val);
+ }
+
+ val = TYPE_RM_MAX_VALUE (type);
+ if (val)
+ {
+ gimplify_one_sizepos (&val, pre_p);
+ for (t = type; t; t = TYPE_NEXT_VARIANT (t))
+ SET_TYPE_RM_MAX_VALUE (t, val);
+ }
+
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* ... fall through ... */
+
+ default:
+ return GS_UNHANDLED;
+ }
+}
+
+/* Generate GIMPLE in place for the statement at *STMT_P. */
+
+static enum gimplify_status
+gnat_gimplify_stmt (tree *stmt_p)
+{
+ tree stmt = *stmt_p;
+
+ switch (TREE_CODE (stmt))
+ {
+ case STMT_STMT:
+ *stmt_p = STMT_STMT_STMT (stmt);
+ return GS_OK;
+
+ case LOOP_STMT:
+ {
+ tree gnu_start_label = create_artificial_label (input_location);
+ tree gnu_cond = LOOP_STMT_COND (stmt);
+ tree gnu_update = LOOP_STMT_UPDATE (stmt);
+ tree gnu_end_label = LOOP_STMT_LABEL (stmt);
+ tree t;
+
+ /* Build the condition expression from the test, if any. */
+ if (gnu_cond)
+ gnu_cond
+ = build3 (COND_EXPR, void_type_node, gnu_cond, alloc_stmt_list (),
+ build1 (GOTO_EXPR, void_type_node, gnu_end_label));
+
+ /* Set to emit the statements of the loop. */
+ *stmt_p = NULL_TREE;
+
+ /* We first emit the start label and then a conditional jump to the
+ end label if there's a top condition, then the update if it's at
+ the top, then the body of the loop, then a conditional jump to
+ the end label if there's a bottom condition, then the update if
+ it's at the bottom, and finally a jump to the start label and the
+ definition of the end label. */
+ append_to_statement_list (build1 (LABEL_EXPR, void_type_node,
+ gnu_start_label),
+ stmt_p);
+
+ if (gnu_cond && !LOOP_STMT_BOTTOM_COND_P (stmt))
+ append_to_statement_list (gnu_cond, stmt_p);
+
+ if (gnu_update && LOOP_STMT_TOP_UPDATE_P (stmt))
+ append_to_statement_list (gnu_update, stmt_p);
+
+ append_to_statement_list (LOOP_STMT_BODY (stmt), stmt_p);
+
+ if (gnu_cond && LOOP_STMT_BOTTOM_COND_P (stmt))
+ append_to_statement_list (gnu_cond, stmt_p);
+
+ if (gnu_update && !LOOP_STMT_TOP_UPDATE_P (stmt))
+ append_to_statement_list (gnu_update, stmt_p);
+
+ t = build1 (GOTO_EXPR, void_type_node, gnu_start_label);
+ SET_EXPR_LOCATION (t, DECL_SOURCE_LOCATION (gnu_end_label));
+ append_to_statement_list (t, stmt_p);
+
+ append_to_statement_list (build1 (LABEL_EXPR, void_type_node,
+ gnu_end_label),
+ stmt_p);
+ return GS_OK;
+ }
+
+ case EXIT_STMT:
+ /* Build a statement to jump to the corresponding end label, then
+ see if it needs to be conditional. */
+ *stmt_p = build1 (GOTO_EXPR, void_type_node, EXIT_STMT_LABEL (stmt));
+ if (EXIT_STMT_COND (stmt))
+ *stmt_p = build3 (COND_EXPR, void_type_node,
+ EXIT_STMT_COND (stmt), *stmt_p, alloc_stmt_list ());
+ return GS_OK;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Force references to each of the entities in packages withed by GNAT_NODE.
+ Operate recursively but check that we aren't elaborating something more
+ than once.
+
+ This routine is exclusively called in type_annotate mode, to compute DDA
+ information for types in withed units, for ASIS use. */
+
+static void
+elaborate_all_entities (Node_Id gnat_node)
+{
+ Entity_Id gnat_with_clause, gnat_entity;
+
+ /* Process each unit only once. As we trace the context of all relevant
+ units transitively, including generic bodies, we may encounter the
+ same generic unit repeatedly. */
+ if (!present_gnu_tree (gnat_node))
+ save_gnu_tree (gnat_node, integer_zero_node, true);
+
+ /* Save entities in all context units. A body may have an implicit_with
+ on its own spec, if the context includes a child unit, so don't save
+ the spec twice. */
+ for (gnat_with_clause = First (Context_Items (gnat_node));
+ Present (gnat_with_clause);
+ gnat_with_clause = Next (gnat_with_clause))
+ if (Nkind (gnat_with_clause) == N_With_Clause
+ && !present_gnu_tree (Library_Unit (gnat_with_clause))
+ && Library_Unit (gnat_with_clause) != Library_Unit (Cunit (Main_Unit)))
+ {
+ elaborate_all_entities (Library_Unit (gnat_with_clause));
+
+ if (Ekind (Entity (Name (gnat_with_clause))) == E_Package)
+ {
+ for (gnat_entity = First_Entity (Entity (Name (gnat_with_clause)));
+ Present (gnat_entity);
+ gnat_entity = Next_Entity (gnat_entity))
+ if (Is_Public (gnat_entity)
+ && Convention (gnat_entity) != Convention_Intrinsic
+ && Ekind (gnat_entity) != E_Package
+ && Ekind (gnat_entity) != E_Package_Body
+ && Ekind (gnat_entity) != E_Operator
+ && !(IN (Ekind (gnat_entity), Type_Kind)
+ && !Is_Frozen (gnat_entity))
+ && !((Ekind (gnat_entity) == E_Procedure
+ || Ekind (gnat_entity) == E_Function)
+ && Is_Intrinsic_Subprogram (gnat_entity))
+ && !IN (Ekind (gnat_entity), Named_Kind)
+ && !IN (Ekind (gnat_entity), Generic_Unit_Kind))
+ gnat_to_gnu_entity (gnat_entity, NULL_TREE, 0);
+ }
+ else if (Ekind (Entity (Name (gnat_with_clause))) == E_Generic_Package)
+ {
+ Node_Id gnat_body
+ = Corresponding_Body (Unit (Library_Unit (gnat_with_clause)));
+
+ /* Retrieve compilation unit node of generic body. */
+ while (Present (gnat_body)
+ && Nkind (gnat_body) != N_Compilation_Unit)
+ gnat_body = Parent (gnat_body);
+
+ /* If body is available, elaborate its context. */
+ if (Present (gnat_body))
+ elaborate_all_entities (gnat_body);
+ }
+ }
+
+ if (Nkind (Unit (gnat_node)) == N_Package_Body)
+ elaborate_all_entities (Library_Unit (gnat_node));
+}
+
+/* Do the processing of GNAT_NODE, an N_Freeze_Entity. */
+
+static void
+process_freeze_entity (Node_Id gnat_node)
+{
+ const Entity_Id gnat_entity = Entity (gnat_node);
+ const Entity_Kind kind = Ekind (gnat_entity);
+ tree gnu_old, gnu_new;
+
+ /* If this is a package, we need to generate code for the package. */
+ if (kind == E_Package)
+ {
+ insert_code_for
+ (Parent (Corresponding_Body
+ (Parent (Declaration_Node (gnat_entity)))));
+ return;
+ }
+
+ /* Don't do anything for class-wide types as they are always transformed
+ into their root type. */
+ if (kind == E_Class_Wide_Type)
+ return;
+
+ /* Check for an old definition. This freeze node might be for an Itype. */
+ gnu_old
+ = present_gnu_tree (gnat_entity) ? get_gnu_tree (gnat_entity) : NULL_TREE;
+
+ /* If this entity has an address representation clause, GNU_OLD is the
+ address, so discard it here. */
+ if (Present (Address_Clause (gnat_entity)))
+ gnu_old = NULL_TREE;
+
+ /* Don't do anything for subprograms that may have been elaborated before
+ their freeze nodes. This can happen, for example, because of an inner
+ call in an instance body or because of previous compilation of a spec
+ for inlining purposes. */
+ if (gnu_old
+ && ((TREE_CODE (gnu_old) == FUNCTION_DECL
+ && (kind == E_Function || kind == E_Procedure))
+ || (TREE_CODE (TREE_TYPE (gnu_old)) == FUNCTION_TYPE
+ && kind == E_Subprogram_Type)))
+ return;
+
+ /* If we have a non-dummy type old tree, we have nothing to do, except
+ aborting if this is the public view of a private type whose full view was
+ not delayed, as this node was never delayed as it should have been. We
+ let this happen for concurrent types and their Corresponding_Record_Type,
+ however, because each might legitimately be elaborated before its own
+ freeze node, e.g. while processing the other. */
+ if (gnu_old
+ && !(TREE_CODE (gnu_old) == TYPE_DECL
+ && TYPE_IS_DUMMY_P (TREE_TYPE (gnu_old))))
+ {
+ gcc_assert ((IN (kind, Incomplete_Or_Private_Kind)
+ && Present (Full_View (gnat_entity))
+ && No (Freeze_Node (Full_View (gnat_entity))))
+ || Is_Concurrent_Type (gnat_entity)
+ || (IN (kind, Record_Kind)
+ && Is_Concurrent_Record_Type (gnat_entity)));
+ return;
+ }
+
+ /* Reset the saved tree, if any, and elaborate the object or type for real.
+ If there is a full view, elaborate it and use the result. And, if this
+ is the root type of a class-wide type, reuse it for the latter. */
+ if (gnu_old)
+ {
+ save_gnu_tree (gnat_entity, NULL_TREE, false);
+ if (IN (kind, Incomplete_Or_Private_Kind)
+ && Present (Full_View (gnat_entity))
+ && present_gnu_tree (Full_View (gnat_entity)))
+ save_gnu_tree (Full_View (gnat_entity), NULL_TREE, false);
+ if (IN (kind, Type_Kind)
+ && Present (Class_Wide_Type (gnat_entity))
+ && Root_Type (Class_Wide_Type (gnat_entity)) == gnat_entity)
+ save_gnu_tree (Class_Wide_Type (gnat_entity), NULL_TREE, false);
+ }
+
+ if (IN (kind, Incomplete_Or_Private_Kind)
+ && Present (Full_View (gnat_entity)))
+ {
+ gnu_new = gnat_to_gnu_entity (Full_View (gnat_entity), NULL_TREE, 1);
+
+ /* Propagate back-annotations from full view to partial view. */
+ if (Unknown_Alignment (gnat_entity))
+ Set_Alignment (gnat_entity, Alignment (Full_View (gnat_entity)));
+
+ if (Unknown_Esize (gnat_entity))
+ Set_Esize (gnat_entity, Esize (Full_View (gnat_entity)));
+
+ if (Unknown_RM_Size (gnat_entity))
+ Set_RM_Size (gnat_entity, RM_Size (Full_View (gnat_entity)));
+
+ /* The above call may have defined this entity (the simplest example
+ of this is when we have a private enumeral type since the bounds
+ will have the public view). */
+ if (!present_gnu_tree (gnat_entity))
+ save_gnu_tree (gnat_entity, gnu_new, false);
+ }
+ else
+ {
+ tree gnu_init
+ = (Nkind (Declaration_Node (gnat_entity)) == N_Object_Declaration
+ && present_gnu_tree (Declaration_Node (gnat_entity)))
+ ? get_gnu_tree (Declaration_Node (gnat_entity)) : NULL_TREE;
+
+ gnu_new = gnat_to_gnu_entity (gnat_entity, gnu_init, 1);
+ }
+
+ if (IN (kind, Type_Kind)
+ && Present (Class_Wide_Type (gnat_entity))
+ && Root_Type (Class_Wide_Type (gnat_entity)) == gnat_entity)
+ save_gnu_tree (Class_Wide_Type (gnat_entity), gnu_new, false);
+
+ /* If we have an old type and we've made pointers to this type, update those
+ pointers. If this is a Taft amendment type in the main unit, we need to
+ mark the type as used since other units referencing it don't see the full
+ declaration and, therefore, cannot mark it as used themselves. */
+ if (gnu_old)
+ {
+ update_pointer_to (TYPE_MAIN_VARIANT (TREE_TYPE (gnu_old)),
+ TREE_TYPE (gnu_new));
+ if (DECL_TAFT_TYPE_P (gnu_old))
+ used_types_insert (TREE_TYPE (gnu_new));
+ }
+}
+
+/* Elaborate decls in the lists GNAT_DECLS and GNAT_DECLS2, if present.
+ We make two passes, one to elaborate anything other than bodies (but
+ we declare a function if there was no spec). The second pass
+ elaborates the bodies.
+
+ GNAT_END_LIST gives the element in the list past the end. Normally,
+ this is Empty, but can be First_Real_Statement for a
+ Handled_Sequence_Of_Statements.
+
+ We make a complete pass through both lists if PASS1P is true, then make
+ the second pass over both lists if PASS2P is true. The lists usually
+ correspond to the public and private parts of a package. */
+
+static void
+process_decls (List_Id gnat_decls, List_Id gnat_decls2,
+ Node_Id gnat_end_list, bool pass1p, bool pass2p)
+{
+ List_Id gnat_decl_array[2];
+ Node_Id gnat_decl;
+ int i;
+
+ gnat_decl_array[0] = gnat_decls, gnat_decl_array[1] = gnat_decls2;
+
+ if (pass1p)
+ for (i = 0; i <= 1; i++)
+ if (Present (gnat_decl_array[i]))
+ for (gnat_decl = First (gnat_decl_array[i]);
+ gnat_decl != gnat_end_list; gnat_decl = Next (gnat_decl))
+ {
+ /* For package specs, we recurse inside the declarations,
+ thus taking the two pass approach inside the boundary. */
+ if (Nkind (gnat_decl) == N_Package_Declaration
+ && (Nkind (Specification (gnat_decl)
+ == N_Package_Specification)))
+ process_decls (Visible_Declarations (Specification (gnat_decl)),
+ Private_Declarations (Specification (gnat_decl)),
+ Empty, true, false);
+
+ /* Similarly for any declarations in the actions of a
+ freeze node. */
+ else if (Nkind (gnat_decl) == N_Freeze_Entity)
+ {
+ process_freeze_entity (gnat_decl);
+ process_decls (Actions (gnat_decl), Empty, Empty, true, false);
+ }
+
+ /* Package bodies with freeze nodes get their elaboration deferred
+ until the freeze node, but the code must be placed in the right
+ place, so record the code position now. */
+ else if (Nkind (gnat_decl) == N_Package_Body
+ && Present (Freeze_Node (Corresponding_Spec (gnat_decl))))
+ record_code_position (gnat_decl);
+
+ else if (Nkind (gnat_decl) == N_Package_Body_Stub
+ && Present (Library_Unit (gnat_decl))
+ && Present (Freeze_Node
+ (Corresponding_Spec
+ (Proper_Body (Unit
+ (Library_Unit (gnat_decl)))))))
+ record_code_position
+ (Proper_Body (Unit (Library_Unit (gnat_decl))));
+
+ /* We defer most subprogram bodies to the second pass. */
+ else if (Nkind (gnat_decl) == N_Subprogram_Body)
+ {
+ if (Acts_As_Spec (gnat_decl))
+ {
+ Node_Id gnat_subprog_id = Defining_Entity (gnat_decl);
+
+ if (Ekind (gnat_subprog_id) != E_Generic_Procedure
+ && Ekind (gnat_subprog_id) != E_Generic_Function)
+ gnat_to_gnu_entity (gnat_subprog_id, NULL_TREE, 1);
+ }
+ }
+
+ /* For bodies and stubs that act as their own specs, the entity
+ itself must be elaborated in the first pass, because it may
+ be used in other declarations. */
+ else if (Nkind (gnat_decl) == N_Subprogram_Body_Stub)
+ {
+ Node_Id gnat_subprog_id
+ = Defining_Entity (Specification (gnat_decl));
+
+ if (Ekind (gnat_subprog_id) != E_Subprogram_Body
+ && Ekind (gnat_subprog_id) != E_Generic_Procedure
+ && Ekind (gnat_subprog_id) != E_Generic_Function)
+ gnat_to_gnu_entity (gnat_subprog_id, NULL_TREE, 1);
+ }
+
+ /* Concurrent stubs stand for the corresponding subprogram bodies,
+ which are deferred like other bodies. */
+ else if (Nkind (gnat_decl) == N_Task_Body_Stub
+ || Nkind (gnat_decl) == N_Protected_Body_Stub)
+ ;
+
+ else
+ add_stmt (gnat_to_gnu (gnat_decl));
+ }
+
+ /* Here we elaborate everything we deferred above except for package bodies,
+ which are elaborated at their freeze nodes. Note that we must also
+ go inside things (package specs and freeze nodes) the first pass did. */
+ if (pass2p)
+ for (i = 0; i <= 1; i++)
+ if (Present (gnat_decl_array[i]))
+ for (gnat_decl = First (gnat_decl_array[i]);
+ gnat_decl != gnat_end_list; gnat_decl = Next (gnat_decl))
+ {
+ if (Nkind (gnat_decl) == N_Subprogram_Body
+ || Nkind (gnat_decl) == N_Subprogram_Body_Stub
+ || Nkind (gnat_decl) == N_Task_Body_Stub
+ || Nkind (gnat_decl) == N_Protected_Body_Stub)
+ add_stmt (gnat_to_gnu (gnat_decl));
+
+ else if (Nkind (gnat_decl) == N_Package_Declaration
+ && (Nkind (Specification (gnat_decl)
+ == N_Package_Specification)))
+ process_decls (Visible_Declarations (Specification (gnat_decl)),
+ Private_Declarations (Specification (gnat_decl)),
+ Empty, false, true);
+
+ else if (Nkind (gnat_decl) == N_Freeze_Entity)
+ process_decls (Actions (gnat_decl), Empty, Empty, false, true);
+ }
+}
+
+/* Make a unary operation of kind CODE using build_unary_op, but guard
+ the operation by an overflow check. CODE can be one of NEGATE_EXPR
+ or ABS_EXPR. GNU_TYPE is the type desired for the result. Usually
+ the operation is to be performed in that type. GNAT_NODE is the gnat
+ node conveying the source location for which the error should be
+ signaled. */
+
+static tree
+build_unary_op_trapv (enum tree_code code, tree gnu_type, tree operand,
+ Node_Id gnat_node)
+{
+ gcc_assert (code == NEGATE_EXPR || code == ABS_EXPR);
+
+ operand = gnat_protect_expr (operand);
+
+ return emit_check (build_binary_op (EQ_EXPR, boolean_type_node,
+ operand, TYPE_MIN_VALUE (gnu_type)),
+ build_unary_op (code, gnu_type, operand),
+ CE_Overflow_Check_Failed, gnat_node);
+}
+
+/* Make a binary operation of kind CODE using build_binary_op, but guard
+ the operation by an overflow check. CODE can be one of PLUS_EXPR,
+ MINUS_EXPR or MULT_EXPR. GNU_TYPE is the type desired for the result.
+ Usually the operation is to be performed in that type. GNAT_NODE is
+ the GNAT node conveying the source location for which the error should
+ be signaled. */
+
+static tree
+build_binary_op_trapv (enum tree_code code, tree gnu_type, tree left,
+ tree right, Node_Id gnat_node)
+{
+ tree lhs = gnat_protect_expr (left);
+ tree rhs = gnat_protect_expr (right);
+ tree type_max = TYPE_MAX_VALUE (gnu_type);
+ tree type_min = TYPE_MIN_VALUE (gnu_type);
+ tree gnu_expr;
+ tree tmp1, tmp2;
+ tree zero = convert (gnu_type, integer_zero_node);
+ tree rhs_lt_zero;
+ tree check_pos;
+ tree check_neg;
+ tree check;
+ int precision = TYPE_PRECISION (gnu_type);
+
+ gcc_assert (!(precision & (precision - 1))); /* ensure power of 2 */
+
+ /* Prefer a constant or known-positive rhs to simplify checks. */
+ if (!TREE_CONSTANT (rhs)
+ && commutative_tree_code (code)
+ && (TREE_CONSTANT (lhs) || (!tree_expr_nonnegative_p (rhs)
+ && tree_expr_nonnegative_p (lhs))))
+ {
+ tree tmp = lhs;
+ lhs = rhs;
+ rhs = tmp;
+ }
+
+ rhs_lt_zero = tree_expr_nonnegative_p (rhs)
+ ? boolean_false_node
+ : build_binary_op (LT_EXPR, boolean_type_node, rhs, zero);
+
+ /* ??? Should use more efficient check for operand_equal_p (lhs, rhs, 0) */
+
+ /* Try a few strategies that may be cheaper than the general
+ code at the end of the function, if the rhs is not known.
+ The strategies are:
+ - Call library function for 64-bit multiplication (complex)
+ - Widen, if input arguments are sufficiently small
+ - Determine overflow using wrapped result for addition/subtraction. */
+
+ if (!TREE_CONSTANT (rhs))
+ {
+ /* Even for add/subtract double size to get another base type. */
+ int needed_precision = precision * 2;
+
+ if (code == MULT_EXPR && precision == 64)
+ {
+ tree int_64 = gnat_type_for_size (64, 0);
+
+ return convert (gnu_type, build_call_n_expr (mulv64_decl, 2,
+ convert (int_64, lhs),
+ convert (int_64, rhs)));
+ }
+
+ else if (needed_precision <= BITS_PER_WORD
+ || (code == MULT_EXPR
+ && needed_precision <= LONG_LONG_TYPE_SIZE))
+ {
+ tree wide_type = gnat_type_for_size (needed_precision, 0);
+
+ tree wide_result = build_binary_op (code, wide_type,
+ convert (wide_type, lhs),
+ convert (wide_type, rhs));
+
+ tree check = build_binary_op
+ (TRUTH_ORIF_EXPR, boolean_type_node,
+ build_binary_op (LT_EXPR, boolean_type_node, wide_result,
+ convert (wide_type, type_min)),
+ build_binary_op (GT_EXPR, boolean_type_node, wide_result,
+ convert (wide_type, type_max)));
+
+ tree result = convert (gnu_type, wide_result);
+
+ return
+ emit_check (check, result, CE_Overflow_Check_Failed, gnat_node);
+ }
+
+ else if (code == PLUS_EXPR || code == MINUS_EXPR)
+ {
+ tree unsigned_type = gnat_type_for_size (precision, 1);
+ tree wrapped_expr = convert
+ (gnu_type, build_binary_op (code, unsigned_type,
+ convert (unsigned_type, lhs),
+ convert (unsigned_type, rhs)));
+
+ tree result = convert
+ (gnu_type, build_binary_op (code, gnu_type, lhs, rhs));
+
+ /* Overflow when (rhs < 0) ^ (wrapped_expr < lhs)), for addition
+ or when (rhs < 0) ^ (wrapped_expr > lhs) for subtraction. */
+ tree check = build_binary_op
+ (TRUTH_XOR_EXPR, boolean_type_node, rhs_lt_zero,
+ build_binary_op (code == PLUS_EXPR ? LT_EXPR : GT_EXPR,
+ boolean_type_node, wrapped_expr, lhs));
+
+ return
+ emit_check (check, result, CE_Overflow_Check_Failed, gnat_node);
+ }
+ }
+
+ switch (code)
+ {
+ case PLUS_EXPR:
+ /* When rhs >= 0, overflow when lhs > type_max - rhs. */
+ check_pos = build_binary_op (GT_EXPR, boolean_type_node, lhs,
+ build_binary_op (MINUS_EXPR, gnu_type,
+ type_max, rhs)),
+
+ /* When rhs < 0, overflow when lhs < type_min - rhs. */
+ check_neg = build_binary_op (LT_EXPR, boolean_type_node, lhs,
+ build_binary_op (MINUS_EXPR, gnu_type,
+ type_min, rhs));
+ break;
+
+ case MINUS_EXPR:
+ /* When rhs >= 0, overflow when lhs < type_min + rhs. */
+ check_pos = build_binary_op (LT_EXPR, boolean_type_node, lhs,
+ build_binary_op (PLUS_EXPR, gnu_type,
+ type_min, rhs)),
+
+ /* When rhs < 0, overflow when lhs > type_max + rhs. */
+ check_neg = build_binary_op (GT_EXPR, boolean_type_node, lhs,
+ build_binary_op (PLUS_EXPR, gnu_type,
+ type_max, rhs));
+ break;
+
+ case MULT_EXPR:
+ /* The check here is designed to be efficient if the rhs is constant,
+ but it will work for any rhs by using integer division.
+ Four different check expressions determine whether X * C overflows,
+ depending on C.
+ C == 0 => false
+ C > 0 => X > type_max / C || X < type_min / C
+ C == -1 => X == type_min
+ C < -1 => X > type_min / C || X < type_max / C */
+
+ tmp1 = build_binary_op (TRUNC_DIV_EXPR, gnu_type, type_max, rhs);
+ tmp2 = build_binary_op (TRUNC_DIV_EXPR, gnu_type, type_min, rhs);
+
+ check_pos
+ = build_binary_op (TRUTH_ANDIF_EXPR, boolean_type_node,
+ build_binary_op (NE_EXPR, boolean_type_node, zero,
+ rhs),
+ build_binary_op (TRUTH_ORIF_EXPR, boolean_type_node,
+ build_binary_op (GT_EXPR,
+ boolean_type_node,
+ lhs, tmp1),
+ build_binary_op (LT_EXPR,
+ boolean_type_node,
+ lhs, tmp2)));
+
+ check_neg
+ = fold_build3 (COND_EXPR, boolean_type_node,
+ build_binary_op (EQ_EXPR, boolean_type_node, rhs,
+ build_int_cst (gnu_type, -1)),
+ build_binary_op (EQ_EXPR, boolean_type_node, lhs,
+ type_min),
+ build_binary_op (TRUTH_ORIF_EXPR, boolean_type_node,
+ build_binary_op (GT_EXPR,
+ boolean_type_node,
+ lhs, tmp2),
+ build_binary_op (LT_EXPR,
+ boolean_type_node,
+ lhs, tmp1)));
+ break;
+
+ default:
+ gcc_unreachable();
+ }
+
+ gnu_expr = build_binary_op (code, gnu_type, lhs, rhs);
+
+ /* If we can fold the expression to a constant, just return it.
+ The caller will deal with overflow, no need to generate a check. */
+ if (TREE_CONSTANT (gnu_expr))
+ return gnu_expr;
+
+ check = fold_build3 (COND_EXPR, boolean_type_node, rhs_lt_zero, check_neg,
+ check_pos);
+
+ return emit_check (check, gnu_expr, CE_Overflow_Check_Failed, gnat_node);
+}
+
+/* Emit code for a range check. GNU_EXPR is the expression to be checked,
+ GNAT_RANGE_TYPE the gnat type or subtype containing the bounds against
+ which we have to check. GNAT_NODE is the GNAT node conveying the source
+ location for which the error should be signaled. */
+
+static tree
+emit_range_check (tree gnu_expr, Entity_Id gnat_range_type, Node_Id gnat_node)
+{
+ tree gnu_range_type = get_unpadded_type (gnat_range_type);
+ tree gnu_compare_type = get_base_type (TREE_TYPE (gnu_expr));
+
+ /* If GNU_EXPR has GNAT_RANGE_TYPE as its base type, no check is needed.
+ This can for example happen when translating 'Val or 'Value. */
+ if (gnu_compare_type == gnu_range_type)
+ return gnu_expr;
+
+ /* Range checks can only be applied to types with ranges. */
+ gcc_assert (INTEGRAL_TYPE_P (gnu_range_type)
+ || SCALAR_FLOAT_TYPE_P (gnu_range_type));
+
+ /* If GNU_EXPR has an integral type that is narrower than GNU_RANGE_TYPE,
+ we can't do anything since we might be truncating the bounds. No
+ check is needed in this case. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (gnu_expr))
+ && (TYPE_PRECISION (gnu_compare_type)
+ < TYPE_PRECISION (get_base_type (gnu_range_type))))
+ return gnu_expr;
+
+ /* Checked expressions must be evaluated only once. */
+ gnu_expr = gnat_protect_expr (gnu_expr);
+
+ /* Note that the form of the check is
+ (not (expr >= lo)) or (not (expr <= hi))
+ the reason for this slightly convoluted form is that NaNs
+ are not considered to be in range in the float case. */
+ return emit_check
+ (build_binary_op (TRUTH_ORIF_EXPR, boolean_type_node,
+ invert_truthvalue
+ (build_binary_op (GE_EXPR, boolean_type_node,
+ convert (gnu_compare_type, gnu_expr),
+ convert (gnu_compare_type,
+ TYPE_MIN_VALUE
+ (gnu_range_type)))),
+ invert_truthvalue
+ (build_binary_op (LE_EXPR, boolean_type_node,
+ convert (gnu_compare_type, gnu_expr),
+ convert (gnu_compare_type,
+ TYPE_MAX_VALUE
+ (gnu_range_type))))),
+ gnu_expr, CE_Range_Check_Failed, gnat_node);
+}
+
+/* Emit code for an index check. GNU_ARRAY_OBJECT is the array object which
+ we are about to index, GNU_EXPR is the index expression to be checked,
+ GNU_LOW and GNU_HIGH are the lower and upper bounds against which GNU_EXPR
+ has to be checked. Note that for index checking we cannot simply use the
+ emit_range_check function (although very similar code needs to be generated
+ in both cases) since for index checking the array type against which we are
+ checking the indices may be unconstrained and consequently we need to get
+ the actual index bounds from the array object itself (GNU_ARRAY_OBJECT).
+ The place where we need to do that is in subprograms having unconstrained
+ array formal parameters. GNAT_NODE is the GNAT node conveying the source
+ location for which the error should be signaled. */
+
+static tree
+emit_index_check (tree gnu_array_object, tree gnu_expr, tree gnu_low,
+ tree gnu_high, Node_Id gnat_node)
+{
+ tree gnu_expr_check;
+
+ /* Checked expressions must be evaluated only once. */
+ gnu_expr = gnat_protect_expr (gnu_expr);
+
+ /* Must do this computation in the base type in case the expression's
+ type is an unsigned subtypes. */
+ gnu_expr_check = convert (get_base_type (TREE_TYPE (gnu_expr)), gnu_expr);
+
+ /* If GNU_LOW or GNU_HIGH are a PLACEHOLDER_EXPR, qualify them by
+ the object we are handling. */
+ gnu_low = SUBSTITUTE_PLACEHOLDER_IN_EXPR (gnu_low, gnu_array_object);
+ gnu_high = SUBSTITUTE_PLACEHOLDER_IN_EXPR (gnu_high, gnu_array_object);
+
+ return emit_check
+ (build_binary_op (TRUTH_ORIF_EXPR, boolean_type_node,
+ build_binary_op (LT_EXPR, boolean_type_node,
+ gnu_expr_check,
+ convert (TREE_TYPE (gnu_expr_check),
+ gnu_low)),
+ build_binary_op (GT_EXPR, boolean_type_node,
+ gnu_expr_check,
+ convert (TREE_TYPE (gnu_expr_check),
+ gnu_high))),
+ gnu_expr, CE_Index_Check_Failed, gnat_node);
+}
+
+/* GNU_COND contains the condition corresponding to an access, discriminant or
+ range check of value GNU_EXPR. Build a COND_EXPR that returns GNU_EXPR if
+ GNU_COND is false and raises a CONSTRAINT_ERROR if GNU_COND is true.
+ REASON is the code that says why the exception was raised. GNAT_NODE is
+ the GNAT node conveying the source location for which the error should be
+ signaled. */
+
+static tree
+emit_check (tree gnu_cond, tree gnu_expr, int reason, Node_Id gnat_node)
+{
+ tree gnu_call
+ = build_call_raise (reason, gnat_node, N_Raise_Constraint_Error);
+ tree gnu_result
+ = fold_build3 (COND_EXPR, TREE_TYPE (gnu_expr), gnu_cond,
+ build2 (COMPOUND_EXPR, TREE_TYPE (gnu_expr), gnu_call,
+ convert (TREE_TYPE (gnu_expr), integer_zero_node)),
+ gnu_expr);
+
+ /* GNU_RESULT has side effects if and only if GNU_EXPR has:
+ we don't need to evaluate it just for the check. */
+ TREE_SIDE_EFFECTS (gnu_result) = TREE_SIDE_EFFECTS (gnu_expr);
+
+ return gnu_result;
+}
+
+/* Return an expression that converts GNU_EXPR to GNAT_TYPE, doing overflow
+ checks if OVERFLOW_P is true and range checks if RANGE_P is true.
+ GNAT_TYPE is known to be an integral type. If TRUNCATE_P true, do a
+ float to integer conversion with truncation; otherwise round.
+ GNAT_NODE is the GNAT node conveying the source location for which the
+ error should be signaled. */
+
+static tree
+convert_with_check (Entity_Id gnat_type, tree gnu_expr, bool overflowp,
+ bool rangep, bool truncatep, Node_Id gnat_node)
+{
+ tree gnu_type = get_unpadded_type (gnat_type);
+ tree gnu_in_type = TREE_TYPE (gnu_expr);
+ tree gnu_in_basetype = get_base_type (gnu_in_type);
+ tree gnu_base_type = get_base_type (gnu_type);
+ tree gnu_result = gnu_expr;
+
+ /* If we are not doing any checks, the output is an integral type, and
+ the input is not a floating type, just do the conversion. This
+ shortcut is required to avoid problems with packed array types
+ and simplifies code in all cases anyway. */
+ if (!rangep && !overflowp && INTEGRAL_TYPE_P (gnu_base_type)
+ && !FLOAT_TYPE_P (gnu_in_type))
+ return convert (gnu_type, gnu_expr);
+
+ /* First convert the expression to its base type. This
+ will never generate code, but makes the tests below much simpler.
+ But don't do this if converting from an integer type to an unconstrained
+ array type since then we need to get the bounds from the original
+ (unpacked) type. */
+ if (TREE_CODE (gnu_type) != UNCONSTRAINED_ARRAY_TYPE)
+ gnu_result = convert (gnu_in_basetype, gnu_result);
+
+ /* If overflow checks are requested, we need to be sure the result will
+ fit in the output base type. But don't do this if the input
+ is integer and the output floating-point. */
+ if (overflowp
+ && !(FLOAT_TYPE_P (gnu_base_type) && INTEGRAL_TYPE_P (gnu_in_basetype)))
+ {
+ /* Ensure GNU_EXPR only gets evaluated once. */
+ tree gnu_input = gnat_protect_expr (gnu_result);
+ tree gnu_cond = boolean_false_node;
+ tree gnu_in_lb = TYPE_MIN_VALUE (gnu_in_basetype);
+ tree gnu_in_ub = TYPE_MAX_VALUE (gnu_in_basetype);
+ tree gnu_out_lb = TYPE_MIN_VALUE (gnu_base_type);
+ tree gnu_out_ub = TYPE_MAX_VALUE (gnu_base_type);
+
+ /* Convert the lower bounds to signed types, so we're sure we're
+ comparing them properly. Likewise, convert the upper bounds
+ to unsigned types. */
+ if (INTEGRAL_TYPE_P (gnu_in_basetype) && TYPE_UNSIGNED (gnu_in_basetype))
+ gnu_in_lb = convert (gnat_signed_type (gnu_in_basetype), gnu_in_lb);
+
+ if (INTEGRAL_TYPE_P (gnu_in_basetype)
+ && !TYPE_UNSIGNED (gnu_in_basetype))
+ gnu_in_ub = convert (gnat_unsigned_type (gnu_in_basetype), gnu_in_ub);
+
+ if (INTEGRAL_TYPE_P (gnu_base_type) && TYPE_UNSIGNED (gnu_base_type))
+ gnu_out_lb = convert (gnat_signed_type (gnu_base_type), gnu_out_lb);
+
+ if (INTEGRAL_TYPE_P (gnu_base_type) && !TYPE_UNSIGNED (gnu_base_type))
+ gnu_out_ub = convert (gnat_unsigned_type (gnu_base_type), gnu_out_ub);
+
+ /* Check each bound separately and only if the result bound
+ is tighter than the bound on the input type. Note that all the
+ types are base types, so the bounds must be constant. Also,
+ the comparison is done in the base type of the input, which
+ always has the proper signedness. First check for input
+ integer (which means output integer), output float (which means
+ both float), or mixed, in which case we always compare.
+ Note that we have to do the comparison which would *fail* in the
+ case of an error since if it's an FP comparison and one of the
+ values is a NaN or Inf, the comparison will fail. */
+ if (INTEGRAL_TYPE_P (gnu_in_basetype)
+ ? tree_int_cst_lt (gnu_in_lb, gnu_out_lb)
+ : (FLOAT_TYPE_P (gnu_base_type)
+ ? REAL_VALUES_LESS (TREE_REAL_CST (gnu_in_lb),
+ TREE_REAL_CST (gnu_out_lb))
+ : 1))
+ gnu_cond
+ = invert_truthvalue
+ (build_binary_op (GE_EXPR, boolean_type_node,
+ gnu_input, convert (gnu_in_basetype,
+ gnu_out_lb)));
+
+ if (INTEGRAL_TYPE_P (gnu_in_basetype)
+ ? tree_int_cst_lt (gnu_out_ub, gnu_in_ub)
+ : (FLOAT_TYPE_P (gnu_base_type)
+ ? REAL_VALUES_LESS (TREE_REAL_CST (gnu_out_ub),
+ TREE_REAL_CST (gnu_in_lb))
+ : 1))
+ gnu_cond
+ = build_binary_op (TRUTH_ORIF_EXPR, boolean_type_node, gnu_cond,
+ invert_truthvalue
+ (build_binary_op (LE_EXPR, boolean_type_node,
+ gnu_input,
+ convert (gnu_in_basetype,
+ gnu_out_ub))));
+
+ if (!integer_zerop (gnu_cond))
+ gnu_result = emit_check (gnu_cond, gnu_input,
+ CE_Overflow_Check_Failed, gnat_node);
+ }
+
+ /* Now convert to the result base type. If this is a non-truncating
+ float-to-integer conversion, round. */
+ if (INTEGRAL_TYPE_P (gnu_base_type) && FLOAT_TYPE_P (gnu_in_basetype)
+ && !truncatep)
+ {
+ REAL_VALUE_TYPE half_minus_pred_half, pred_half;
+ tree gnu_conv, gnu_zero, gnu_comp, calc_type;
+ tree gnu_pred_half, gnu_add_pred_half, gnu_subtract_pred_half;
+ const struct real_format *fmt;
+
+ /* The following calculations depend on proper rounding to even
+ of each arithmetic operation. In order to prevent excess
+ precision from spoiling this property, use the widest hardware
+ floating-point type if FP_ARITH_MAY_WIDEN is true. */
+ calc_type
+ = FP_ARITH_MAY_WIDEN ? longest_float_type_node : gnu_in_basetype;
+
+ /* FIXME: Should not have padding in the first place. */
+ if (TYPE_IS_PADDING_P (calc_type))
+ calc_type = TREE_TYPE (TYPE_FIELDS (calc_type));
+
+ /* Compute the exact value calc_type'Pred (0.5) at compile time. */
+ fmt = REAL_MODE_FORMAT (TYPE_MODE (calc_type));
+ real_2expN (&half_minus_pred_half, -(fmt->p) - 1, TYPE_MODE (calc_type));
+ REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf,
+ half_minus_pred_half);
+ gnu_pred_half = build_real (calc_type, pred_half);
+
+ /* If the input is strictly negative, subtract this value
+ and otherwise add it from the input. For 0.5, the result
+ is exactly between 1.0 and the machine number preceding 1.0
+ (for calc_type). Since the last bit of 1.0 is even, this 0.5
+ will round to 1.0, while all other number with an absolute
+ value less than 0.5 round to 0.0. For larger numbers exactly
+ halfway between integers, rounding will always be correct as
+ the true mathematical result will be closer to the higher
+ integer compared to the lower one. So, this constant works
+ for all floating-point numbers.
+
+ The reason to use the same constant with subtract/add instead
+ of a positive and negative constant is to allow the comparison
+ to be scheduled in parallel with retrieval of the constant and
+ conversion of the input to the calc_type (if necessary). */
+
+ gnu_zero = convert (gnu_in_basetype, integer_zero_node);
+ gnu_result = gnat_protect_expr (gnu_result);
+ gnu_conv = convert (calc_type, gnu_result);
+ gnu_comp
+ = fold_build2 (GE_EXPR, boolean_type_node, gnu_result, gnu_zero);
+ gnu_add_pred_half
+ = fold_build2 (PLUS_EXPR, calc_type, gnu_conv, gnu_pred_half);
+ gnu_subtract_pred_half
+ = fold_build2 (MINUS_EXPR, calc_type, gnu_conv, gnu_pred_half);
+ gnu_result = fold_build3 (COND_EXPR, calc_type, gnu_comp,
+ gnu_add_pred_half, gnu_subtract_pred_half);
+ }
+
+ if (TREE_CODE (gnu_base_type) == INTEGER_TYPE
+ && TYPE_HAS_ACTUAL_BOUNDS_P (gnu_base_type)
+ && TREE_CODE (gnu_result) == UNCONSTRAINED_ARRAY_REF)
+ gnu_result = unchecked_convert (gnu_base_type, gnu_result, false);
+ else
+ gnu_result = convert (gnu_base_type, gnu_result);
+
+ /* Finally, do the range check if requested. Note that if the result type
+ is a modular type, the range check is actually an overflow check. */
+ if (rangep
+ || (TREE_CODE (gnu_base_type) == INTEGER_TYPE
+ && TYPE_MODULAR_P (gnu_base_type) && overflowp))
+ gnu_result = emit_range_check (gnu_result, gnat_type, gnat_node);
+
+ return convert (gnu_type, gnu_result);
+}
+
+/* Return true if GNU_EXPR can be directly addressed. This is the case
+ unless it is an expression involving computation or if it involves a
+ reference to a bitfield or to an object not sufficiently aligned for
+ its type. If GNU_TYPE is non-null, return true only if GNU_EXPR can
+ be directly addressed as an object of this type.
+
+ *** Notes on addressability issues in the Ada compiler ***
+
+ This predicate is necessary in order to bridge the gap between Gigi
+ and the middle-end about addressability of GENERIC trees. A tree
+ is said to be addressable if it can be directly addressed, i.e. if
+ its address can be taken, is a multiple of the type's alignment on
+ strict-alignment architectures and returns the first storage unit
+ assigned to the object represented by the tree.
+
+ In the C family of languages, everything is in practice addressable
+ at the language level, except for bit-fields. This means that these
+ compilers will take the address of any tree that doesn't represent
+ a bit-field reference and expect the result to be the first storage
+ unit assigned to the object. Even in cases where this will result
+ in unaligned accesses at run time, nothing is supposed to be done
+ and the program is considered as erroneous instead (see PR c/18287).
+
+ The implicit assumptions made in the middle-end are in keeping with
+ the C viewpoint described above:
+ - the address of a bit-field reference is supposed to be never
+ taken; the compiler (generally) will stop on such a construct,
+ - any other tree is addressable if it is formally addressable,
+ i.e. if it is formally allowed to be the operand of ADDR_EXPR.
+
+ In Ada, the viewpoint is the opposite one: nothing is addressable
+ at the language level unless explicitly declared so. This means
+ that the compiler will both make sure that the trees representing
+ references to addressable ("aliased" in Ada parlance) objects are
+ addressable and make no real attempts at ensuring that the trees
+ representing references to non-addressable objects are addressable.
+
+ In the first case, Ada is effectively equivalent to C and handing
+ down the direct result of applying ADDR_EXPR to these trees to the
+ middle-end works flawlessly. In the second case, Ada cannot afford
+ to consider the program as erroneous if the address of trees that
+ are not addressable is requested for technical reasons, unlike C;
+ as a consequence, the Ada compiler must arrange for either making
+ sure that this address is not requested in the middle-end or for
+ compensating by inserting temporaries if it is requested in Gigi.
+
+ The first goal can be achieved because the middle-end should not
+ request the address of non-addressable trees on its own; the only
+ exception is for the invocation of low-level block operations like
+ memcpy, for which the addressability requirements are lower since
+ the type's alignment can be disregarded. In practice, this means
+ that Gigi must make sure that such operations cannot be applied to
+ non-BLKmode bit-fields.
+
+ The second goal is achieved by means of the addressable_p predicate,
+ which computes whether a temporary must be inserted by Gigi when the
+ address of a tree is requested; if so, the address of the temporary
+ will be used in lieu of that of the original tree and some glue code
+ generated to connect everything together. */
+
+static bool
+addressable_p (tree gnu_expr, tree gnu_type)
+{
+ /* For an integral type, the size of the actual type of the object may not
+ be greater than that of the expected type, otherwise an indirect access
+ in the latter type wouldn't correctly set all the bits of the object. */
+ if (gnu_type
+ && INTEGRAL_TYPE_P (gnu_type)
+ && smaller_form_type_p (gnu_type, TREE_TYPE (gnu_expr)))
+ return false;
+
+ /* The size of the actual type of the object may not be smaller than that
+ of the expected type, otherwise an indirect access in the latter type
+ would be larger than the object. But only record types need to be
+ considered in practice for this case. */
+ if (gnu_type
+ && TREE_CODE (gnu_type) == RECORD_TYPE
+ && smaller_form_type_p (TREE_TYPE (gnu_expr), gnu_type))
+ return false;
+
+ switch (TREE_CODE (gnu_expr))
+ {
+ case VAR_DECL:
+ case PARM_DECL:
+ case FUNCTION_DECL:
+ case RESULT_DECL:
+ /* All DECLs are addressable: if they are in a register, we can force
+ them to memory. */
+ return true;
+
+ case UNCONSTRAINED_ARRAY_REF:
+ case INDIRECT_REF:
+ /* Taking the address of a dereference yields the original pointer. */
+ return true;
+
+ case STRING_CST:
+ case INTEGER_CST:
+ /* Taking the address yields a pointer to the constant pool. */
+ return true;
+
+ case CONSTRUCTOR:
+ /* Taking the address of a static constructor yields a pointer to the
+ tree constant pool. */
+ return TREE_STATIC (gnu_expr) ? true : false;
+
+ case NULL_EXPR:
+ case SAVE_EXPR:
+ case CALL_EXPR:
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case BIT_AND_EXPR:
+ case BIT_NOT_EXPR:
+ /* All rvalues are deemed addressable since taking their address will
+ force a temporary to be created by the middle-end. */
+ return true;
+
+ case COMPOUND_EXPR:
+ /* The address of a compound expression is that of its 2nd operand. */
+ return addressable_p (TREE_OPERAND (gnu_expr, 1), gnu_type);
+
+ case COND_EXPR:
+ /* We accept &COND_EXPR as soon as both operands are addressable and
+ expect the outcome to be the address of the selected operand. */
+ return (addressable_p (TREE_OPERAND (gnu_expr, 1), NULL_TREE)
+ && addressable_p (TREE_OPERAND (gnu_expr, 2), NULL_TREE));
+
+ case COMPONENT_REF:
+ return (((!DECL_BIT_FIELD (TREE_OPERAND (gnu_expr, 1))
+ /* Even with DECL_BIT_FIELD cleared, we have to ensure that
+ the field is sufficiently aligned, in case it is subject
+ to a pragma Component_Alignment. But we don't need to
+ check the alignment of the containing record, as it is
+ guaranteed to be not smaller than that of its most
+ aligned field that is not a bit-field. */
+ && (!STRICT_ALIGNMENT
+ || DECL_ALIGN (TREE_OPERAND (gnu_expr, 1))
+ >= TYPE_ALIGN (TREE_TYPE (gnu_expr))))
+ /* The field of a padding record is always addressable. */
+ || TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (gnu_expr, 0))))
+ && addressable_p (TREE_OPERAND (gnu_expr, 0), NULL_TREE));
+
+ case ARRAY_REF: case ARRAY_RANGE_REF:
+ case REALPART_EXPR: case IMAGPART_EXPR:
+ case NOP_EXPR:
+ return addressable_p (TREE_OPERAND (gnu_expr, 0), NULL_TREE);
+
+ case CONVERT_EXPR:
+ return (AGGREGATE_TYPE_P (TREE_TYPE (gnu_expr))
+ && addressable_p (TREE_OPERAND (gnu_expr, 0), NULL_TREE));
+
+ case VIEW_CONVERT_EXPR:
+ {
+ /* This is addressable if we can avoid a copy. */
+ tree type = TREE_TYPE (gnu_expr);
+ tree inner_type = TREE_TYPE (TREE_OPERAND (gnu_expr, 0));
+ return (((TYPE_MODE (type) == TYPE_MODE (inner_type)
+ && (!STRICT_ALIGNMENT
+ || TYPE_ALIGN (type) <= TYPE_ALIGN (inner_type)
+ || TYPE_ALIGN (inner_type) >= BIGGEST_ALIGNMENT))
+ || ((TYPE_MODE (type) == BLKmode
+ || TYPE_MODE (inner_type) == BLKmode)
+ && (!STRICT_ALIGNMENT
+ || TYPE_ALIGN (type) <= TYPE_ALIGN (inner_type)
+ || TYPE_ALIGN (inner_type) >= BIGGEST_ALIGNMENT
+ || TYPE_ALIGN_OK (type)
+ || TYPE_ALIGN_OK (inner_type))))
+ && addressable_p (TREE_OPERAND (gnu_expr, 0), NULL_TREE));
+ }
+
+ default:
+ return false;
+ }
+}
+
+/* Do the processing for the declaration of a GNAT_ENTITY, a type. If
+ a separate Freeze node exists, delay the bulk of the processing. Otherwise
+ make a GCC type for GNAT_ENTITY and set up the correspondence. */
+
+void
+process_type (Entity_Id gnat_entity)
+{
+ tree gnu_old
+ = present_gnu_tree (gnat_entity) ? get_gnu_tree (gnat_entity) : 0;
+ tree gnu_new;
+
+ /* If we are to delay elaboration of this type, just do any
+ elaborations needed for expressions within the declaration and
+ make a dummy type entry for this node and its Full_View (if
+ any) in case something points to it. Don't do this if it
+ has already been done (the only way that can happen is if
+ the private completion is also delayed). */
+ if (Present (Freeze_Node (gnat_entity))
+ || (IN (Ekind (gnat_entity), Incomplete_Or_Private_Kind)
+ && Present (Full_View (gnat_entity))
+ && Present (Freeze_Node (Full_View (gnat_entity)))
+ && !present_gnu_tree (Full_View (gnat_entity))))
+ {
+ elaborate_entity (gnat_entity);
+
+ if (!gnu_old)
+ {
+ tree gnu_decl = TYPE_STUB_DECL (make_dummy_type (gnat_entity));
+ save_gnu_tree (gnat_entity, gnu_decl, false);
+ if (IN (Ekind (gnat_entity), Incomplete_Or_Private_Kind)
+ && Present (Full_View (gnat_entity)))
+ {
+ if (Has_Completion_In_Body (gnat_entity))
+ DECL_TAFT_TYPE_P (gnu_decl) = 1;
+ save_gnu_tree (Full_View (gnat_entity), gnu_decl, false);
+ }
+ }
+
+ return;
+ }
+
+ /* If we saved away a dummy type for this node it means that this
+ made the type that corresponds to the full type of an incomplete
+ type. Clear that type for now and then update the type in the
+ pointers. */
+ if (gnu_old)
+ {
+ gcc_assert (TREE_CODE (gnu_old) == TYPE_DECL
+ && TYPE_IS_DUMMY_P (TREE_TYPE (gnu_old)));
+
+ save_gnu_tree (gnat_entity, NULL_TREE, false);
+ }
+
+ /* Now fully elaborate the type. */
+ gnu_new = gnat_to_gnu_entity (gnat_entity, NULL_TREE, 1);
+ gcc_assert (TREE_CODE (gnu_new) == TYPE_DECL);
+
+ /* If we have an old type and we've made pointers to this type, update those
+ pointers. If this is a Taft amendment type in the main unit, we need to
+ mark the type as used since other units referencing it don't see the full
+ declaration and, therefore, cannot mark it as used themselves. */
+ if (gnu_old)
+ {
+ update_pointer_to (TYPE_MAIN_VARIANT (TREE_TYPE (gnu_old)),
+ TREE_TYPE (gnu_new));
+ if (DECL_TAFT_TYPE_P (gnu_old))
+ used_types_insert (TREE_TYPE (gnu_new));
+ }
+
+ /* If this is a record type corresponding to a task or protected type
+ that is a completion of an incomplete type, perform a similar update
+ on the type. ??? Including protected types here is a guess. */
+ if (IN (Ekind (gnat_entity), Record_Kind)
+ && Is_Concurrent_Record_Type (gnat_entity)
+ && present_gnu_tree (Corresponding_Concurrent_Type (gnat_entity)))
+ {
+ tree gnu_task_old
+ = get_gnu_tree (Corresponding_Concurrent_Type (gnat_entity));
+
+ save_gnu_tree (Corresponding_Concurrent_Type (gnat_entity),
+ NULL_TREE, false);
+ save_gnu_tree (Corresponding_Concurrent_Type (gnat_entity),
+ gnu_new, false);
+
+ update_pointer_to (TYPE_MAIN_VARIANT (TREE_TYPE (gnu_task_old)),
+ TREE_TYPE (gnu_new));
+ }
+}
+
+/* GNAT_ENTITY is the type of the resulting constructor, GNAT_ASSOC is the
+ front of the Component_Associations of an N_Aggregate and GNU_TYPE is the
+ GCC type of the corresponding record type. Return the CONSTRUCTOR. */
+
+static tree
+assoc_to_constructor (Entity_Id gnat_entity, Node_Id gnat_assoc, tree gnu_type)
+{
+ tree gnu_list = NULL_TREE, gnu_result;
+
+ /* We test for GNU_FIELD being empty in the case where a variant
+ was the last thing since we don't take things off GNAT_ASSOC in
+ that case. We check GNAT_ASSOC in case we have a variant, but it
+ has no fields. */
+
+ for (; Present (gnat_assoc); gnat_assoc = Next (gnat_assoc))
+ {
+ Node_Id gnat_field = First (Choices (gnat_assoc));
+ tree gnu_field = gnat_to_gnu_field_decl (Entity (gnat_field));
+ tree gnu_expr = gnat_to_gnu (Expression (gnat_assoc));
+
+ /* The expander is supposed to put a single component selector name
+ in every record component association. */
+ gcc_assert (No (Next (gnat_field)));
+
+ /* Ignore fields that have Corresponding_Discriminants since we'll
+ be setting that field in the parent. */
+ if (Present (Corresponding_Discriminant (Entity (gnat_field)))
+ && Is_Tagged_Type (Scope (Entity (gnat_field))))
+ continue;
+
+ /* Also ignore discriminants of Unchecked_Unions. */
+ if (Is_Unchecked_Union (gnat_entity)
+ && Ekind (Entity (gnat_field)) == E_Discriminant)
+ continue;
+
+ /* Before assigning a value in an aggregate make sure range checks
+ are done if required. Then convert to the type of the field. */
+ if (Do_Range_Check (Expression (gnat_assoc)))
+ gnu_expr = emit_range_check (gnu_expr, Etype (gnat_field), Empty);
+
+ gnu_expr = convert (TREE_TYPE (gnu_field), gnu_expr);
+
+ /* Add the field and expression to the list. */
+ gnu_list = tree_cons (gnu_field, gnu_expr, gnu_list);
+ }
+
+ gnu_result = extract_values (gnu_list, gnu_type);
+
+#ifdef ENABLE_CHECKING
+ /* Verify that every entry in GNU_LIST was used. */
+ for (; gnu_list; gnu_list = TREE_CHAIN (gnu_list))
+ gcc_assert (TREE_ADDRESSABLE (gnu_list));
+#endif
+
+ return gnu_result;
+}
+
+/* Build a possibly nested constructor for array aggregates. GNAT_EXPR is
+ the first element of an array aggregate. It may itself be an aggregate.
+ GNU_ARRAY_TYPE is the GCC type corresponding to the array aggregate.
+ GNAT_COMPONENT_TYPE is the type of the array component; it is needed
+ for range checking. */
+
+static tree
+pos_to_constructor (Node_Id gnat_expr, tree gnu_array_type,
+ Entity_Id gnat_component_type)
+{
+ tree gnu_index = TYPE_MIN_VALUE (TYPE_DOMAIN (gnu_array_type));
+ tree gnu_expr;
+ vec<constructor_elt, va_gc> *gnu_expr_vec = NULL;
+
+ for ( ; Present (gnat_expr); gnat_expr = Next (gnat_expr))
+ {
+ /* If the expression is itself an array aggregate then first build the
+ innermost constructor if it is part of our array (multi-dimensional
+ case). */
+ if (Nkind (gnat_expr) == N_Aggregate
+ && TREE_CODE (TREE_TYPE (gnu_array_type)) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_array_type)))
+ gnu_expr = pos_to_constructor (First (Expressions (gnat_expr)),
+ TREE_TYPE (gnu_array_type),
+ gnat_component_type);
+ else
+ {
+ gnu_expr = gnat_to_gnu (gnat_expr);
+
+ /* Before assigning the element to the array, make sure it is
+ in range. */
+ if (Do_Range_Check (gnat_expr))
+ gnu_expr = emit_range_check (gnu_expr, gnat_component_type, Empty);
+ }
+
+ CONSTRUCTOR_APPEND_ELT (gnu_expr_vec, gnu_index,
+ convert (TREE_TYPE (gnu_array_type), gnu_expr));
+
+ gnu_index = int_const_binop (PLUS_EXPR, gnu_index, integer_one_node);
+ }
+
+ return gnat_build_constructor (gnu_array_type, gnu_expr_vec);
+}
+
+/* Subroutine of assoc_to_constructor: VALUES is a list of field associations,
+ some of which are from RECORD_TYPE. Return a CONSTRUCTOR consisting
+ of the associations that are from RECORD_TYPE. If we see an internal
+ record, make a recursive call to fill it in as well. */
+
+static tree
+extract_values (tree values, tree record_type)
+{
+ tree field, tem;
+ vec<constructor_elt, va_gc> *v = NULL;
+
+ for (field = TYPE_FIELDS (record_type); field; field = DECL_CHAIN (field))
+ {
+ tree value = 0;
+
+ /* _Parent is an internal field, but may have values in the aggregate,
+ so check for values first. */
+ if ((tem = purpose_member (field, values)))
+ {
+ value = TREE_VALUE (tem);
+ TREE_ADDRESSABLE (tem) = 1;
+ }
+
+ else if (DECL_INTERNAL_P (field))
+ {
+ value = extract_values (values, TREE_TYPE (field));
+ if (TREE_CODE (value) == CONSTRUCTOR
+ && vec_safe_is_empty (CONSTRUCTOR_ELTS (value)))
+ value = 0;
+ }
+ else
+ /* If we have a record subtype, the names will match, but not the
+ actual FIELD_DECLs. */
+ for (tem = values; tem; tem = TREE_CHAIN (tem))
+ if (DECL_NAME (TREE_PURPOSE (tem)) == DECL_NAME (field))
+ {
+ value = convert (TREE_TYPE (field), TREE_VALUE (tem));
+ TREE_ADDRESSABLE (tem) = 1;
+ }
+
+ if (!value)
+ continue;
+
+ CONSTRUCTOR_APPEND_ELT (v, field, value);
+ }
+
+ return gnat_build_constructor (record_type, v);
+}
+
+/* Process a N_Validate_Unchecked_Conversion node. */
+
+static void
+validate_unchecked_conversion (Node_Id gnat_node)
+{
+ tree gnu_source_type = gnat_to_gnu_type (Source_Type (gnat_node));
+ tree gnu_target_type = gnat_to_gnu_type (Target_Type (gnat_node));
+
+ /* If the target is a pointer type, see if we are either converting from a
+ non-pointer or from a pointer to a type with a different alias set and
+ warn if so, unless the pointer has been marked to alias everything. */
+ if (POINTER_TYPE_P (gnu_target_type)
+ && !TYPE_REF_CAN_ALIAS_ALL (gnu_target_type))
+ {
+ tree gnu_source_desig_type = POINTER_TYPE_P (gnu_source_type)
+ ? TREE_TYPE (gnu_source_type)
+ : NULL_TREE;
+ tree gnu_target_desig_type = TREE_TYPE (gnu_target_type);
+ alias_set_type target_alias_set = get_alias_set (gnu_target_desig_type);
+
+ if (target_alias_set != 0
+ && (!POINTER_TYPE_P (gnu_source_type)
+ || !alias_sets_conflict_p (get_alias_set (gnu_source_desig_type),
+ target_alias_set)))
+ {
+ post_error_ne ("?possible aliasing problem for type&",
+ gnat_node, Target_Type (gnat_node));
+ post_error ("\\?use -fno-strict-aliasing switch for references",
+ gnat_node);
+ post_error_ne ("\\?or use `pragma No_Strict_Aliasing (&);`",
+ gnat_node, Target_Type (gnat_node));
+ }
+ }
+
+ /* Likewise if the target is a fat pointer type, but we have no mechanism to
+ mitigate the problem in this case, so we unconditionally warn. */
+ else if (TYPE_IS_FAT_POINTER_P (gnu_target_type))
+ {
+ tree gnu_source_desig_type
+ = TYPE_IS_FAT_POINTER_P (gnu_source_type)
+ ? TREE_TYPE (TREE_TYPE (TYPE_FIELDS (gnu_source_type)))
+ : NULL_TREE;
+ tree gnu_target_desig_type
+ = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (gnu_target_type)));
+ alias_set_type target_alias_set = get_alias_set (gnu_target_desig_type);
+
+ if (target_alias_set != 0
+ && (!TYPE_IS_FAT_POINTER_P (gnu_source_type)
+ || !alias_sets_conflict_p (get_alias_set (gnu_source_desig_type),
+ target_alias_set)))
+ {
+ post_error_ne ("?possible aliasing problem for type&",
+ gnat_node, Target_Type (gnat_node));
+ post_error ("\\?use -fno-strict-aliasing switch for references",
+ gnat_node);
+ }
+ }
+}
+
+/* EXP is to be treated as an array or record. Handle the cases when it is
+ an access object and perform the required dereferences. */
+
+static tree
+maybe_implicit_deref (tree exp)
+{
+ /* If the type is a pointer, dereference it. */
+ if (POINTER_TYPE_P (TREE_TYPE (exp))
+ || TYPE_IS_FAT_POINTER_P (TREE_TYPE (exp)))
+ exp = build_unary_op (INDIRECT_REF, NULL_TREE, exp);
+
+ /* If we got a padded type, remove it too. */
+ if (TYPE_IS_PADDING_P (TREE_TYPE (exp)))
+ exp = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (exp))), exp);
+
+ return exp;
+}
+
+/* Convert SLOC into LOCUS. Return true if SLOC corresponds to a source code
+ location and false if it doesn't. In the former case, set the Gigi global
+ variable REF_FILENAME to the simple debug file name as given by sinput.
+ If clear_column is true, set column information to 0. */
+
+static bool
+Sloc_to_locus1 (Source_Ptr Sloc, location_t *locus, bool clear_column)
+{
+ if (Sloc == No_Location)
+ return false;
+
+ if (Sloc <= Standard_Location)
+ {
+ *locus = BUILTINS_LOCATION;
+ return false;
+ }
+ else
+ {
+ Source_File_Index file = Get_Source_File_Index (Sloc);
+ Logical_Line_Number line = Get_Logical_Line_Number (Sloc);
+ Column_Number column = (clear_column ? 0 : Get_Column_Number (Sloc));
+ struct line_map *map = LINEMAPS_ORDINARY_MAP_AT (line_table, file - 1);
+
+ /* We can have zero if pragma Source_Reference is in effect. */
+ if (line < 1)
+ line = 1;
+
+ /* Translate the location. */
+ *locus = linemap_position_for_line_and_column (map, line, column);
+ }
+
+ ref_filename
+ = IDENTIFIER_POINTER
+ (get_identifier
+ (Get_Name_String (Debug_Source_Name (Get_Source_File_Index (Sloc)))));;
+
+ return true;
+}
+
+/* Similar to the above, not clearing the column information. */
+
+bool
+Sloc_to_locus (Source_Ptr Sloc, location_t *locus)
+{
+ return Sloc_to_locus1 (Sloc, locus, false);
+}
+
+/* Similar to set_expr_location, but start with the Sloc of GNAT_NODE and
+ don't do anything if it doesn't correspond to a source location. */
+
+static void
+set_expr_location_from_node1 (tree node, Node_Id gnat_node, bool clear_column)
+{
+ location_t locus;
+
+ if (!Sloc_to_locus1 (Sloc (gnat_node), &locus, clear_column))
+ return;
+
+ SET_EXPR_LOCATION (node, locus);
+}
+
+/* Similar to the above, not clearing the column information. */
+
+static void
+set_expr_location_from_node (tree node, Node_Id gnat_node)
+{
+ set_expr_location_from_node1 (node, gnat_node, false);
+}
+
+/* More elaborate version of set_expr_location_from_node to be used in more
+ general contexts, for example the result of the translation of a generic
+ GNAT node. */
+
+static void
+set_gnu_expr_location_from_node (tree node, Node_Id gnat_node)
+{
+ /* Set the location information on the node if it is a real expression.
+ References can be reused for multiple GNAT nodes and they would get
+ the location information of their last use. Also make sure not to
+ overwrite an existing location as it is probably more precise. */
+
+ switch (TREE_CODE (node))
+ {
+ CASE_CONVERT:
+ case NON_LVALUE_EXPR:
+ break;
+
+ case COMPOUND_EXPR:
+ if (EXPR_P (TREE_OPERAND (node, 1)))
+ set_gnu_expr_location_from_node (TREE_OPERAND (node, 1), gnat_node);
+
+ /* ... fall through ... */
+
+ default:
+ if (!REFERENCE_CLASS_P (node) && !EXPR_HAS_LOCATION (node))
+ {
+ set_expr_location_from_node (node, gnat_node);
+ set_end_locus_from_node (node, gnat_node);
+ }
+ break;
+ }
+}
+
+/* Return a colon-separated list of encodings contained in encoded Ada
+ name. */
+
+static const char *
+extract_encoding (const char *name)
+{
+ char *encoding = (char *) ggc_alloc_atomic (strlen (name));
+ get_encoding (name, encoding);
+ return encoding;
+}
+
+/* Extract the Ada name from an encoded name. */
+
+static const char *
+decode_name (const char *name)
+{
+ char *decoded = (char *) ggc_alloc_atomic (strlen (name) * 2 + 60);
+ __gnat_decode (name, decoded, 0);
+ return decoded;
+}
+
+/* Post an error message. MSG is the error message, properly annotated.
+ NODE is the node at which to post the error and the node to use for the
+ '&' substitution. */
+
+void
+post_error (const char *msg, Node_Id node)
+{
+ String_Template temp;
+ Fat_Pointer fp;
+
+ if (No (node))
+ return;
+
+ temp.Low_Bound = 1;
+ temp.High_Bound = strlen (msg);
+ fp.Bounds = &temp;
+ fp.Array = msg;
+ Error_Msg_N (fp, node);
+}
+
+/* Similar to post_error, but NODE is the node at which to post the error and
+ ENT is the node to use for the '&' substitution. */
+
+void
+post_error_ne (const char *msg, Node_Id node, Entity_Id ent)
+{
+ String_Template temp;
+ Fat_Pointer fp;
+
+ if (No (node))
+ return;
+
+ temp.Low_Bound = 1;
+ temp.High_Bound = strlen (msg);
+ fp.Bounds = &temp;
+ fp.Array = msg;
+ Error_Msg_NE (fp, node, ent);
+}
+
+/* Similar to post_error_ne, but NUM is the number to use for the '^'. */
+
+void
+post_error_ne_num (const char *msg, Node_Id node, Entity_Id ent, int num)
+{
+ Error_Msg_Uint_1 = UI_From_Int (num);
+ post_error_ne (msg, node, ent);
+}
+
+/* Set the end_locus information for GNU_NODE, if any, from an explicit end
+ location associated with GNAT_NODE or GNAT_NODE itself, whichever makes
+ most sense. Return true if a sensible assignment was performed. */
+
+static bool
+set_end_locus_from_node (tree gnu_node, Node_Id gnat_node)
+{
+ Node_Id gnat_end_label = Empty;
+ location_t end_locus;
+
+ /* Pick the GNAT node of which we'll take the sloc to assign to the GCC node
+ end_locus when there is one. We consider only GNAT nodes with a possible
+ End_Label attached. If the End_Label actually was unassigned, fallback
+ on the original node. We'd better assign an explicit sloc associated with
+ the outer construct in any case. */
+
+ switch (Nkind (gnat_node))
+ {
+ case N_Package_Body:
+ case N_Subprogram_Body:
+ case N_Block_Statement:
+ gnat_end_label = End_Label (Handled_Statement_Sequence (gnat_node));
+ break;
+
+ case N_Package_Declaration:
+ gnat_end_label = End_Label (Specification (gnat_node));
+ break;
+
+ default:
+ return false;
+ }
+
+ gnat_node = Present (gnat_end_label) ? gnat_end_label : gnat_node;
+
+ /* Some expanded subprograms have neither an End_Label nor a Sloc
+ attached. Notify that to callers. For a block statement with no
+ End_Label, clear column information, so that the tree for a
+ transient block does not receive the sloc of a source condition. */
+
+ if (!Sloc_to_locus1 (Sloc (gnat_node), &end_locus,
+ No (gnat_end_label) &&
+ (Nkind (gnat_node) == N_Block_Statement)))
+ return false;
+
+ switch (TREE_CODE (gnu_node))
+ {
+ case BIND_EXPR:
+ BLOCK_SOURCE_END_LOCATION (BIND_EXPR_BLOCK (gnu_node)) = end_locus;
+ return true;
+
+ case FUNCTION_DECL:
+ DECL_STRUCT_FUNCTION (gnu_node)->function_end_locus = end_locus;
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Similar to post_error_ne, but T is a GCC tree representing the number to
+ write. If T represents a constant, the text inside curly brackets in
+ MSG will be output (presumably including a '^'). Otherwise it will not
+ be output and the text inside square brackets will be output instead. */
+
+void
+post_error_ne_tree (const char *msg, Node_Id node, Entity_Id ent, tree t)
+{
+ char *new_msg = XALLOCAVEC (char, strlen (msg) + 1);
+ char start_yes, end_yes, start_no, end_no;
+ const char *p;
+ char *q;
+
+ if (TREE_CODE (t) == INTEGER_CST)
+ {
+ Error_Msg_Uint_1 = UI_From_gnu (t);
+ start_yes = '{', end_yes = '}', start_no = '[', end_no = ']';
+ }
+ else
+ start_yes = '[', end_yes = ']', start_no = '{', end_no = '}';
+
+ for (p = msg, q = new_msg; *p; p++)
+ {
+ if (*p == start_yes)
+ for (p++; *p != end_yes; p++)
+ *q++ = *p;
+ else if (*p == start_no)
+ for (p++; *p != end_no; p++)
+ ;
+ else
+ *q++ = *p;
+ }
+
+ *q = 0;
+
+ post_error_ne (new_msg, node, ent);
+}
+
+/* Similar to post_error_ne_tree, but NUM is a second integer to write. */
+
+void
+post_error_ne_tree_2 (const char *msg, Node_Id node, Entity_Id ent, tree t,
+ int num)
+{
+ Error_Msg_Uint_2 = UI_From_Int (num);
+ post_error_ne_tree (msg, node, ent, t);
+}
+
+/* Initialize the table that maps GNAT codes to GCC codes for simple
+ binary and unary operations. */
+
+static void
+init_code_table (void)
+{
+ gnu_codes[N_And_Then] = TRUTH_ANDIF_EXPR;
+ gnu_codes[N_Or_Else] = TRUTH_ORIF_EXPR;
+
+ gnu_codes[N_Op_And] = TRUTH_AND_EXPR;
+ gnu_codes[N_Op_Or] = TRUTH_OR_EXPR;
+ gnu_codes[N_Op_Xor] = TRUTH_XOR_EXPR;
+ gnu_codes[N_Op_Eq] = EQ_EXPR;
+ gnu_codes[N_Op_Ne] = NE_EXPR;
+ gnu_codes[N_Op_Lt] = LT_EXPR;
+ gnu_codes[N_Op_Le] = LE_EXPR;
+ gnu_codes[N_Op_Gt] = GT_EXPR;
+ gnu_codes[N_Op_Ge] = GE_EXPR;
+ gnu_codes[N_Op_Add] = PLUS_EXPR;
+ gnu_codes[N_Op_Subtract] = MINUS_EXPR;
+ gnu_codes[N_Op_Multiply] = MULT_EXPR;
+ gnu_codes[N_Op_Mod] = FLOOR_MOD_EXPR;
+ gnu_codes[N_Op_Rem] = TRUNC_MOD_EXPR;
+ gnu_codes[N_Op_Minus] = NEGATE_EXPR;
+ gnu_codes[N_Op_Abs] = ABS_EXPR;
+ gnu_codes[N_Op_Not] = TRUTH_NOT_EXPR;
+ gnu_codes[N_Op_Rotate_Left] = LROTATE_EXPR;
+ gnu_codes[N_Op_Rotate_Right] = RROTATE_EXPR;
+ gnu_codes[N_Op_Shift_Left] = LSHIFT_EXPR;
+ gnu_codes[N_Op_Shift_Right] = RSHIFT_EXPR;
+ gnu_codes[N_Op_Shift_Right_Arithmetic] = RSHIFT_EXPR;
+}
+
+/* Return a label to branch to for the exception type in KIND or NULL_TREE
+ if none. */
+
+tree
+get_exception_label (char kind)
+{
+ if (kind == N_Raise_Constraint_Error)
+ return gnu_constraint_error_label_stack->last ();
+ else if (kind == N_Raise_Storage_Error)
+ return gnu_storage_error_label_stack->last ();
+ else if (kind == N_Raise_Program_Error)
+ return gnu_program_error_label_stack->last ();
+ else
+ return NULL_TREE;
+}
+
+/* Return the decl for the current elaboration procedure. */
+
+tree
+get_elaboration_procedure (void)
+{
+ return gnu_elab_proc_stack->last ();
+}
+
+#include "gt-ada-trans.h"
diff --git a/gcc-4.9/gcc/ada/gcc-interface/utils.c b/gcc-4.9/gcc/ada/gcc-interface/utils.c
new file mode 100644
index 000000000..014fe361b
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/utils.c
@@ -0,0 +1,6579 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * U T I L S *
+ * *
+ * C Implementation File *
+ * *
+ * Copyright (C) 1992-2014, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License along with GCC; see the file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "attribs.h"
+#include "varasm.h"
+#include "flags.h"
+#include "toplev.h"
+#include "diagnostic-core.h"
+#include "output.h"
+#include "ggc.h"
+#include "debug.h"
+#include "convert.h"
+#include "target.h"
+#include "common/common-target.h"
+#include "langhooks.h"
+#include "cgraph.h"
+#include "diagnostic.h"
+#include "timevar.h"
+#include "tree-dump.h"
+#include "tree-inline.h"
+#include "tree-iterator.h"
+
+#include "ada.h"
+#include "types.h"
+#include "atree.h"
+#include "elists.h"
+#include "namet.h"
+#include "nlists.h"
+#include "stringt.h"
+#include "uintp.h"
+#include "fe.h"
+#include "sinfo.h"
+#include "einfo.h"
+#include "ada-tree.h"
+#include "gigi.h"
+
+/* If nonzero, pretend we are allocating at global level. */
+int force_global;
+
+/* The default alignment of "double" floating-point types, i.e. floating
+ point types whose size is equal to 64 bits, or 0 if this alignment is
+ not specifically capped. */
+int double_float_alignment;
+
+/* The default alignment of "double" or larger scalar types, i.e. scalar
+ types whose size is greater or equal to 64 bits, or 0 if this alignment
+ is not specifically capped. */
+int double_scalar_alignment;
+
+/* Tree nodes for the various types and decls we create. */
+tree gnat_std_decls[(int) ADT_LAST];
+
+/* Functions to call for each of the possible raise reasons. */
+tree gnat_raise_decls[(int) LAST_REASON_CODE + 1];
+
+/* Likewise, but with extra info for each of the possible raise reasons. */
+tree gnat_raise_decls_ext[(int) LAST_REASON_CODE + 1];
+
+/* Forward declarations for handlers of attributes. */
+static tree handle_const_attribute (tree *, tree, tree, int, bool *);
+static tree handle_nothrow_attribute (tree *, tree, tree, int, bool *);
+static tree handle_pure_attribute (tree *, tree, tree, int, bool *);
+static tree handle_novops_attribute (tree *, tree, tree, int, bool *);
+static tree handle_nonnull_attribute (tree *, tree, tree, int, bool *);
+static tree handle_sentinel_attribute (tree *, tree, tree, int, bool *);
+static tree handle_noreturn_attribute (tree *, tree, tree, int, bool *);
+static tree handle_leaf_attribute (tree *, tree, tree, int, bool *);
+static tree handle_malloc_attribute (tree *, tree, tree, int, bool *);
+static tree handle_type_generic_attribute (tree *, tree, tree, int, bool *);
+static tree handle_vector_size_attribute (tree *, tree, tree, int, bool *);
+static tree handle_vector_type_attribute (tree *, tree, tree, int, bool *);
+
+/* Fake handler for attributes we don't properly support, typically because
+ they'd require dragging a lot of the common-c front-end circuitry. */
+static tree fake_attribute_handler (tree *, tree, tree, int, bool *);
+
+/* Table of machine-independent internal attributes for Ada. We support
+ this minimal set of attributes to accommodate the needs of builtins. */
+const struct attribute_spec gnat_internal_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
+ affects_type_identity } */
+ { "const", 0, 0, true, false, false, handle_const_attribute,
+ false },
+ { "nothrow", 0, 0, true, false, false, handle_nothrow_attribute,
+ false },
+ { "pure", 0, 0, true, false, false, handle_pure_attribute,
+ false },
+ { "no vops", 0, 0, true, false, false, handle_novops_attribute,
+ false },
+ { "nonnull", 0, -1, false, true, true, handle_nonnull_attribute,
+ false },
+ { "sentinel", 0, 1, false, true, true, handle_sentinel_attribute,
+ false },
+ { "noreturn", 0, 0, true, false, false, handle_noreturn_attribute,
+ false },
+ { "leaf", 0, 0, true, false, false, handle_leaf_attribute,
+ false },
+ { "malloc", 0, 0, true, false, false, handle_malloc_attribute,
+ false },
+ { "type generic", 0, 0, false, true, true, handle_type_generic_attribute,
+ false },
+
+ { "vector_size", 1, 1, false, true, false, handle_vector_size_attribute,
+ false },
+ { "vector_type", 0, 0, false, true, false, handle_vector_type_attribute,
+ false },
+ { "may_alias", 0, 0, false, true, false, NULL, false },
+
+ /* ??? format and format_arg are heavy and not supported, which actually
+ prevents support for stdio builtins, which we however declare as part
+ of the common builtins.def contents. */
+ { "format", 3, 3, false, true, true, fake_attribute_handler, false },
+ { "format_arg", 1, 1, false, true, true, fake_attribute_handler, false },
+
+ { NULL, 0, 0, false, false, false, NULL, false }
+};
+
+/* Associates a GNAT tree node to a GCC tree node. It is used in
+ `save_gnu_tree', `get_gnu_tree' and `present_gnu_tree'. See documentation
+ of `save_gnu_tree' for more info. */
+static GTY((length ("max_gnat_nodes"))) tree *associate_gnat_to_gnu;
+
+#define GET_GNU_TREE(GNAT_ENTITY) \
+ associate_gnat_to_gnu[(GNAT_ENTITY) - First_Node_Id]
+
+#define SET_GNU_TREE(GNAT_ENTITY,VAL) \
+ associate_gnat_to_gnu[(GNAT_ENTITY) - First_Node_Id] = (VAL)
+
+#define PRESENT_GNU_TREE(GNAT_ENTITY) \
+ (associate_gnat_to_gnu[(GNAT_ENTITY) - First_Node_Id] != NULL_TREE)
+
+/* Associates a GNAT entity to a GCC tree node used as a dummy, if any. */
+static GTY((length ("max_gnat_nodes"))) tree *dummy_node_table;
+
+#define GET_DUMMY_NODE(GNAT_ENTITY) \
+ dummy_node_table[(GNAT_ENTITY) - First_Node_Id]
+
+#define SET_DUMMY_NODE(GNAT_ENTITY,VAL) \
+ dummy_node_table[(GNAT_ENTITY) - First_Node_Id] = (VAL)
+
+#define PRESENT_DUMMY_NODE(GNAT_ENTITY) \
+ (dummy_node_table[(GNAT_ENTITY) - First_Node_Id] != NULL_TREE)
+
+/* This variable keeps a table for types for each precision so that we only
+ allocate each of them once. Signed and unsigned types are kept separate.
+
+ Note that these types are only used when fold-const requests something
+ special. Perhaps we should NOT share these types; we'll see how it
+ goes later. */
+static GTY(()) tree signed_and_unsigned_types[2 * MAX_BITS_PER_WORD + 1][2];
+
+/* Likewise for float types, but record these by mode. */
+static GTY(()) tree float_types[NUM_MACHINE_MODES];
+
+/* For each binding contour we allocate a binding_level structure to indicate
+ the binding depth. */
+
+struct GTY((chain_next ("%h.chain"))) gnat_binding_level {
+ /* The binding level containing this one (the enclosing binding level). */
+ struct gnat_binding_level *chain;
+ /* The BLOCK node for this level. */
+ tree block;
+ /* If nonzero, the setjmp buffer that needs to be updated for any
+ variable-sized definition within this context. */
+ tree jmpbuf_decl;
+};
+
+/* The binding level currently in effect. */
+static GTY(()) struct gnat_binding_level *current_binding_level;
+
+/* A chain of gnat_binding_level structures awaiting reuse. */
+static GTY((deletable)) struct gnat_binding_level *free_binding_level;
+
+/* The context to be used for global declarations. */
+static GTY(()) tree global_context;
+
+/* An array of global declarations. */
+static GTY(()) vec<tree, va_gc> *global_decls;
+
+/* An array of builtin function declarations. */
+static GTY(()) vec<tree, va_gc> *builtin_decls;
+
+/* An array of global renaming pointers. */
+static GTY(()) vec<tree, va_gc> *global_renaming_pointers;
+
+/* A chain of unused BLOCK nodes. */
+static GTY((deletable)) tree free_block_chain;
+
+static int pad_type_hash_marked_p (const void *p);
+static hashval_t pad_type_hash_hash (const void *p);
+static int pad_type_hash_eq (const void *p1, const void *p2);
+
+/* A hash table of padded types. It is modelled on the generic type
+ hash table in tree.c, which must thus be used as a reference. */
+struct GTY(()) pad_type_hash {
+ unsigned long hash;
+ tree type;
+};
+
+static GTY ((if_marked ("pad_type_hash_marked_p"),
+ param_is (struct pad_type_hash)))
+ htab_t pad_type_hash_table;
+
+static tree merge_sizes (tree, tree, tree, bool, bool);
+static tree compute_related_constant (tree, tree);
+static tree split_plus (tree, tree *);
+static tree float_type_for_precision (int, enum machine_mode);
+static tree convert_to_fat_pointer (tree, tree);
+static unsigned int scale_by_factor_of (tree, unsigned int);
+static bool potential_alignment_gap (tree, tree, tree);
+
+/* Initialize data structures of the utils.c module. */
+
+void
+init_gnat_utils (void)
+{
+ /* Initialize the association of GNAT nodes to GCC trees. */
+ associate_gnat_to_gnu = ggc_alloc_cleared_vec_tree (max_gnat_nodes);
+
+ /* Initialize the association of GNAT nodes to GCC trees as dummies. */
+ dummy_node_table = ggc_alloc_cleared_vec_tree (max_gnat_nodes);
+
+ /* Initialize the hash table of padded types. */
+ pad_type_hash_table = htab_create_ggc (512, pad_type_hash_hash,
+ pad_type_hash_eq, 0);
+}
+
+/* Destroy data structures of the utils.c module. */
+
+void
+destroy_gnat_utils (void)
+{
+ /* Destroy the association of GNAT nodes to GCC trees. */
+ ggc_free (associate_gnat_to_gnu);
+ associate_gnat_to_gnu = NULL;
+
+ /* Destroy the association of GNAT nodes to GCC trees as dummies. */
+ ggc_free (dummy_node_table);
+ dummy_node_table = NULL;
+
+ /* Destroy the hash table of padded types. */
+ htab_delete (pad_type_hash_table);
+ pad_type_hash_table = NULL;
+
+ /* Invalidate the global renaming pointers. */
+ invalidate_global_renaming_pointers ();
+}
+
+/* GNAT_ENTITY is a GNAT tree node for an entity. Associate GNU_DECL, a GCC
+ tree node, with GNAT_ENTITY. If GNU_DECL is not a ..._DECL node, abort.
+ If NO_CHECK is true, the latter check is suppressed.
+
+ If GNU_DECL is zero, reset a previous association. */
+
+void
+save_gnu_tree (Entity_Id gnat_entity, tree gnu_decl, bool no_check)
+{
+ /* Check that GNAT_ENTITY is not already defined and that it is being set
+ to something which is a decl. If that is not the case, this usually
+ means GNAT_ENTITY is defined twice, but occasionally is due to some
+ Gigi problem. */
+ gcc_assert (!(gnu_decl
+ && (PRESENT_GNU_TREE (gnat_entity)
+ || (!no_check && !DECL_P (gnu_decl)))));
+
+ SET_GNU_TREE (gnat_entity, gnu_decl);
+}
+
+/* GNAT_ENTITY is a GNAT tree node for an entity. Return the GCC tree node
+ that was associated with it. If there is no such tree node, abort.
+
+ In some cases, such as delayed elaboration or expressions that need to
+ be elaborated only once, GNAT_ENTITY is really not an entity. */
+
+tree
+get_gnu_tree (Entity_Id gnat_entity)
+{
+ gcc_assert (PRESENT_GNU_TREE (gnat_entity));
+ return GET_GNU_TREE (gnat_entity);
+}
+
+/* Return nonzero if a GCC tree has been associated with GNAT_ENTITY. */
+
+bool
+present_gnu_tree (Entity_Id gnat_entity)
+{
+ return PRESENT_GNU_TREE (gnat_entity);
+}
+
+/* Make a dummy type corresponding to GNAT_TYPE. */
+
+tree
+make_dummy_type (Entity_Id gnat_type)
+{
+ Entity_Id gnat_underlying = Gigi_Equivalent_Type (gnat_type);
+ tree gnu_type;
+
+ /* If there is an equivalent type, get its underlying type. */
+ if (Present (gnat_underlying))
+ gnat_underlying = Gigi_Equivalent_Type (Underlying_Type (gnat_underlying));
+
+ /* If there was no equivalent type (can only happen when just annotating
+ types) or underlying type, go back to the original type. */
+ if (No (gnat_underlying))
+ gnat_underlying = gnat_type;
+
+ /* If it there already a dummy type, use that one. Else make one. */
+ if (PRESENT_DUMMY_NODE (gnat_underlying))
+ return GET_DUMMY_NODE (gnat_underlying);
+
+ /* If this is a record, make a RECORD_TYPE or UNION_TYPE; else make
+ an ENUMERAL_TYPE. */
+ gnu_type = make_node (Is_Record_Type (gnat_underlying)
+ ? tree_code_for_record_type (gnat_underlying)
+ : ENUMERAL_TYPE);
+ TYPE_NAME (gnu_type) = get_entity_name (gnat_type);
+ TYPE_DUMMY_P (gnu_type) = 1;
+ TYPE_STUB_DECL (gnu_type)
+ = create_type_stub_decl (TYPE_NAME (gnu_type), gnu_type);
+ if (Is_By_Reference_Type (gnat_underlying))
+ TYPE_BY_REFERENCE_P (gnu_type) = 1;
+
+ SET_DUMMY_NODE (gnat_underlying, gnu_type);
+
+ return gnu_type;
+}
+
+/* Return the dummy type that was made for GNAT_TYPE, if any. */
+
+tree
+get_dummy_type (Entity_Id gnat_type)
+{
+ return GET_DUMMY_NODE (gnat_type);
+}
+
+/* Build dummy fat and thin pointer types whose designated type is specified
+ by GNAT_DESIG_TYPE/GNU_DESIG_TYPE and attach them to the latter. */
+
+void
+build_dummy_unc_pointer_types (Entity_Id gnat_desig_type, tree gnu_desig_type)
+{
+ tree gnu_template_type, gnu_ptr_template, gnu_array_type, gnu_ptr_array;
+ tree gnu_fat_type, fields, gnu_object_type;
+
+ gnu_template_type = make_node (RECORD_TYPE);
+ TYPE_NAME (gnu_template_type) = create_concat_name (gnat_desig_type, "XUB");
+ TYPE_DUMMY_P (gnu_template_type) = 1;
+ gnu_ptr_template = build_pointer_type (gnu_template_type);
+
+ gnu_array_type = make_node (ENUMERAL_TYPE);
+ TYPE_NAME (gnu_array_type) = create_concat_name (gnat_desig_type, "XUA");
+ TYPE_DUMMY_P (gnu_array_type) = 1;
+ gnu_ptr_array = build_pointer_type (gnu_array_type);
+
+ gnu_fat_type = make_node (RECORD_TYPE);
+ /* Build a stub DECL to trigger the special processing for fat pointer types
+ in gnat_pushdecl. */
+ TYPE_NAME (gnu_fat_type)
+ = create_type_stub_decl (create_concat_name (gnat_desig_type, "XUP"),
+ gnu_fat_type);
+ fields = create_field_decl (get_identifier ("P_ARRAY"), gnu_ptr_array,
+ gnu_fat_type, NULL_TREE, NULL_TREE, 0, 0);
+ DECL_CHAIN (fields)
+ = create_field_decl (get_identifier ("P_BOUNDS"), gnu_ptr_template,
+ gnu_fat_type, NULL_TREE, NULL_TREE, 0, 0);
+ finish_fat_pointer_type (gnu_fat_type, fields);
+ SET_TYPE_UNCONSTRAINED_ARRAY (gnu_fat_type, gnu_desig_type);
+ /* Suppress debug info until after the type is completed. */
+ TYPE_DECL_SUPPRESS_DEBUG (TYPE_STUB_DECL (gnu_fat_type)) = 1;
+
+ gnu_object_type = make_node (RECORD_TYPE);
+ TYPE_NAME (gnu_object_type) = create_concat_name (gnat_desig_type, "XUT");
+ TYPE_DUMMY_P (gnu_object_type) = 1;
+
+ TYPE_POINTER_TO (gnu_desig_type) = gnu_fat_type;
+ TYPE_OBJECT_RECORD_TYPE (gnu_desig_type) = gnu_object_type;
+}
+
+/* Return true if we are in the global binding level. */
+
+bool
+global_bindings_p (void)
+{
+ return force_global || current_function_decl == NULL_TREE;
+}
+
+/* Enter a new binding level. */
+
+void
+gnat_pushlevel (void)
+{
+ struct gnat_binding_level *newlevel = NULL;
+
+ /* Reuse a struct for this binding level, if there is one. */
+ if (free_binding_level)
+ {
+ newlevel = free_binding_level;
+ free_binding_level = free_binding_level->chain;
+ }
+ else
+ newlevel = ggc_alloc_gnat_binding_level ();
+
+ /* Use a free BLOCK, if any; otherwise, allocate one. */
+ if (free_block_chain)
+ {
+ newlevel->block = free_block_chain;
+ free_block_chain = BLOCK_CHAIN (free_block_chain);
+ BLOCK_CHAIN (newlevel->block) = NULL_TREE;
+ }
+ else
+ newlevel->block = make_node (BLOCK);
+
+ /* Point the BLOCK we just made to its parent. */
+ if (current_binding_level)
+ BLOCK_SUPERCONTEXT (newlevel->block) = current_binding_level->block;
+
+ BLOCK_VARS (newlevel->block) = NULL_TREE;
+ BLOCK_SUBBLOCKS (newlevel->block) = NULL_TREE;
+ TREE_USED (newlevel->block) = 1;
+
+ /* Add this level to the front of the chain (stack) of active levels. */
+ newlevel->chain = current_binding_level;
+ newlevel->jmpbuf_decl = NULL_TREE;
+ current_binding_level = newlevel;
+}
+
+/* Set SUPERCONTEXT of the BLOCK for the current binding level to FNDECL
+ and point FNDECL to this BLOCK. */
+
+void
+set_current_block_context (tree fndecl)
+{
+ BLOCK_SUPERCONTEXT (current_binding_level->block) = fndecl;
+ DECL_INITIAL (fndecl) = current_binding_level->block;
+ set_block_for_group (current_binding_level->block);
+}
+
+/* Set the jmpbuf_decl for the current binding level to DECL. */
+
+void
+set_block_jmpbuf_decl (tree decl)
+{
+ current_binding_level->jmpbuf_decl = decl;
+}
+
+/* Get the jmpbuf_decl, if any, for the current binding level. */
+
+tree
+get_block_jmpbuf_decl (void)
+{
+ return current_binding_level->jmpbuf_decl;
+}
+
+/* Exit a binding level. Set any BLOCK into the current code group. */
+
+void
+gnat_poplevel (void)
+{
+ struct gnat_binding_level *level = current_binding_level;
+ tree block = level->block;
+
+ BLOCK_VARS (block) = nreverse (BLOCK_VARS (block));
+ BLOCK_SUBBLOCKS (block) = blocks_nreverse (BLOCK_SUBBLOCKS (block));
+
+ /* If this is a function-level BLOCK don't do anything. Otherwise, if there
+ are no variables free the block and merge its subblocks into those of its
+ parent block. Otherwise, add it to the list of its parent. */
+ if (TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL)
+ ;
+ else if (BLOCK_VARS (block) == NULL_TREE)
+ {
+ BLOCK_SUBBLOCKS (level->chain->block)
+ = block_chainon (BLOCK_SUBBLOCKS (block),
+ BLOCK_SUBBLOCKS (level->chain->block));
+ BLOCK_CHAIN (block) = free_block_chain;
+ free_block_chain = block;
+ }
+ else
+ {
+ BLOCK_CHAIN (block) = BLOCK_SUBBLOCKS (level->chain->block);
+ BLOCK_SUBBLOCKS (level->chain->block) = block;
+ TREE_USED (block) = 1;
+ set_block_for_group (block);
+ }
+
+ /* Free this binding structure. */
+ current_binding_level = level->chain;
+ level->chain = free_binding_level;
+ free_binding_level = level;
+}
+
+/* Exit a binding level and discard the associated BLOCK. */
+
+void
+gnat_zaplevel (void)
+{
+ struct gnat_binding_level *level = current_binding_level;
+ tree block = level->block;
+
+ BLOCK_CHAIN (block) = free_block_chain;
+ free_block_chain = block;
+
+ /* Free this binding structure. */
+ current_binding_level = level->chain;
+ level->chain = free_binding_level;
+ free_binding_level = level;
+}
+
+/* Set the context of TYPE and its parallel types (if any) to CONTEXT. */
+
+static void
+gnat_set_type_context (tree type, tree context)
+{
+ tree decl = TYPE_STUB_DECL (type);
+
+ TYPE_CONTEXT (type) = context;
+
+ while (decl && DECL_PARALLEL_TYPE (decl))
+ {
+ TYPE_CONTEXT (DECL_PARALLEL_TYPE (decl)) = context;
+ decl = TYPE_STUB_DECL (DECL_PARALLEL_TYPE (decl));
+ }
+}
+
+/* Record DECL as belonging to the current lexical scope and use GNAT_NODE
+ for location information and flag propagation. */
+
+void
+gnat_pushdecl (tree decl, Node_Id gnat_node)
+{
+ /* If DECL is public external or at top level, it has global context. */
+ if ((TREE_PUBLIC (decl) && DECL_EXTERNAL (decl)) || global_bindings_p ())
+ {
+ if (!global_context)
+ global_context = build_translation_unit_decl (NULL_TREE);
+ DECL_CONTEXT (decl) = global_context;
+ }
+ else
+ {
+ DECL_CONTEXT (decl) = current_function_decl;
+
+ /* Functions imported in another function are not really nested.
+ For really nested functions mark them initially as needing
+ a static chain for uses of that flag before unnesting;
+ lower_nested_functions will then recompute it. */
+ if (TREE_CODE (decl) == FUNCTION_DECL && !TREE_PUBLIC (decl))
+ DECL_STATIC_CHAIN (decl) = 1;
+ }
+
+ TREE_NO_WARNING (decl) = (No (gnat_node) || Warnings_Off (gnat_node));
+
+ /* Set the location of DECL and emit a declaration for it. */
+ if (Present (gnat_node))
+ Sloc_to_locus (Sloc (gnat_node), &DECL_SOURCE_LOCATION (decl));
+
+ add_decl_expr (decl, gnat_node);
+
+ /* Put the declaration on the list. The list of declarations is in reverse
+ order. The list will be reversed later. Put global declarations in the
+ globals list and local ones in the current block. But skip TYPE_DECLs
+ for UNCONSTRAINED_ARRAY_TYPE in both cases, as they will cause trouble
+ with the debugger and aren't needed anyway. */
+ if (!(TREE_CODE (decl) == TYPE_DECL
+ && TREE_CODE (TREE_TYPE (decl)) == UNCONSTRAINED_ARRAY_TYPE))
+ {
+ if (DECL_EXTERNAL (decl))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl))
+ vec_safe_push (builtin_decls, decl);
+ }
+ else if (global_bindings_p ())
+ vec_safe_push (global_decls, decl);
+ else
+ {
+ DECL_CHAIN (decl) = BLOCK_VARS (current_binding_level->block);
+ BLOCK_VARS (current_binding_level->block) = decl;
+ }
+ }
+
+ /* For the declaration of a type, set its name if it either is not already
+ set or if the previous type name was not derived from a source name.
+ We'd rather have the type named with a real name and all the pointer
+ types to the same object have the same POINTER_TYPE node. Code in the
+ equivalent function of c-decl.c makes a copy of the type node here, but
+ that may cause us trouble with incomplete types. We make an exception
+ for fat pointer types because the compiler automatically builds them
+ for unconstrained array types and the debugger uses them to represent
+ both these and pointers to these. */
+ if (TREE_CODE (decl) == TYPE_DECL && DECL_NAME (decl))
+ {
+ tree t = TREE_TYPE (decl);
+
+ if (!(TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL))
+ {
+ /* Array and pointer types aren't "tagged" types so we force the
+ type to be associated with its typedef in the DWARF back-end,
+ in order to make sure that the latter is always preserved. */
+ if (!DECL_ARTIFICIAL (decl)
+ && (TREE_CODE (t) == ARRAY_TYPE
+ || TREE_CODE (t) == POINTER_TYPE))
+ {
+ tree tt = build_distinct_type_copy (t);
+ if (TREE_CODE (t) == POINTER_TYPE)
+ TYPE_NEXT_PTR_TO (t) = tt;
+ TYPE_NAME (tt) = DECL_NAME (decl);
+ gnat_set_type_context (tt, DECL_CONTEXT (decl));
+ TYPE_STUB_DECL (tt) = TYPE_STUB_DECL (t);
+ DECL_ORIGINAL_TYPE (decl) = tt;
+ }
+ }
+ else if (TYPE_IS_FAT_POINTER_P (t))
+ {
+ /* We need a variant for the placeholder machinery to work. */
+ tree tt = build_variant_type_copy (t);
+ TYPE_NAME (tt) = decl;
+ gnat_set_type_context (tt, DECL_CONTEXT (decl));
+ TREE_USED (tt) = TREE_USED (t);
+ TREE_TYPE (decl) = tt;
+ if (DECL_ORIGINAL_TYPE (TYPE_NAME (t)))
+ DECL_ORIGINAL_TYPE (decl) = DECL_ORIGINAL_TYPE (TYPE_NAME (t));
+ else
+ DECL_ORIGINAL_TYPE (decl) = t;
+ DECL_ARTIFICIAL (decl) = 0;
+ t = NULL_TREE;
+ }
+ else if (DECL_ARTIFICIAL (TYPE_NAME (t)) && !DECL_ARTIFICIAL (decl))
+ ;
+ else
+ t = NULL_TREE;
+
+ /* Propagate the name to all the anonymous variants. This is needed
+ for the type qualifiers machinery to work properly. */
+ if (t)
+ for (t = TYPE_MAIN_VARIANT (t); t; t = TYPE_NEXT_VARIANT (t))
+ if (!(TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL))
+ {
+ TYPE_NAME (t) = decl;
+ gnat_set_type_context (t, DECL_CONTEXT (decl));
+ }
+ }
+}
+
+/* Create a record type that contains a SIZE bytes long field of TYPE with a
+ starting bit position so that it is aligned to ALIGN bits, and leaving at
+ least ROOM bytes free before the field. BASE_ALIGN is the alignment the
+ record is guaranteed to get. GNAT_NODE is used for the position of the
+ associated TYPE_DECL. */
+
+tree
+make_aligning_type (tree type, unsigned int align, tree size,
+ unsigned int base_align, int room, Node_Id gnat_node)
+{
+ /* We will be crafting a record type with one field at a position set to be
+ the next multiple of ALIGN past record'address + room bytes. We use a
+ record placeholder to express record'address. */
+ tree record_type = make_node (RECORD_TYPE);
+ tree record = build0 (PLACEHOLDER_EXPR, record_type);
+
+ tree record_addr_st
+ = convert (sizetype, build_unary_op (ADDR_EXPR, NULL_TREE, record));
+
+ /* The diagram below summarizes the shape of what we manipulate:
+
+ <--------- pos ---------->
+ { +------------+-------------+-----------------+
+ record =>{ |############| ... | field (type) |
+ { +------------+-------------+-----------------+
+ |<-- room -->|<- voffset ->|<---- size ----->|
+ o o
+ | |
+ record_addr vblock_addr
+
+ Every length is in sizetype bytes there, except "pos" which has to be
+ set as a bit position in the GCC tree for the record. */
+ tree room_st = size_int (room);
+ tree vblock_addr_st = size_binop (PLUS_EXPR, record_addr_st, room_st);
+ tree voffset_st, pos, field;
+
+ tree name = TYPE_NAME (type);
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+ name = concat_name (name, "ALIGN");
+ TYPE_NAME (record_type) = name;
+
+ /* Compute VOFFSET and then POS. The next byte position multiple of some
+ alignment after some address is obtained by "and"ing the alignment minus
+ 1 with the two's complement of the address. */
+ voffset_st = size_binop (BIT_AND_EXPR,
+ fold_build1 (NEGATE_EXPR, sizetype, vblock_addr_st),
+ size_int ((align / BITS_PER_UNIT) - 1));
+
+ /* POS = (ROOM + VOFFSET) * BIT_PER_UNIT, in bitsizetype. */
+ pos = size_binop (MULT_EXPR,
+ convert (bitsizetype,
+ size_binop (PLUS_EXPR, room_st, voffset_st)),
+ bitsize_unit_node);
+
+ /* Craft the GCC record representation. We exceptionally do everything
+ manually here because 1) our generic circuitry is not quite ready to
+ handle the complex position/size expressions we are setting up, 2) we
+ have a strong simplifying factor at hand: we know the maximum possible
+ value of voffset, and 3) we have to set/reset at least the sizes in
+ accordance with this maximum value anyway, as we need them to convey
+ what should be "alloc"ated for this type.
+
+ Use -1 as the 'addressable' indication for the field to prevent the
+ creation of a bitfield. We don't need one, it would have damaging
+ consequences on the alignment computation, and create_field_decl would
+ make one without this special argument, for instance because of the
+ complex position expression. */
+ field = create_field_decl (get_identifier ("F"), type, record_type, size,
+ pos, 1, -1);
+ TYPE_FIELDS (record_type) = field;
+
+ TYPE_ALIGN (record_type) = base_align;
+ TYPE_USER_ALIGN (record_type) = 1;
+
+ TYPE_SIZE (record_type)
+ = size_binop (PLUS_EXPR,
+ size_binop (MULT_EXPR, convert (bitsizetype, size),
+ bitsize_unit_node),
+ bitsize_int (align + room * BITS_PER_UNIT));
+ TYPE_SIZE_UNIT (record_type)
+ = size_binop (PLUS_EXPR, size,
+ size_int (room + align / BITS_PER_UNIT));
+
+ SET_TYPE_MODE (record_type, BLKmode);
+ relate_alias_sets (record_type, type, ALIAS_SET_COPY);
+
+ /* Declare it now since it will never be declared otherwise. This is
+ necessary to ensure that its subtrees are properly marked. */
+ create_type_decl (name, record_type, true, false, gnat_node);
+
+ return record_type;
+}
+
+/* TYPE is a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE that is being used
+ as the field type of a packed record if IN_RECORD is true, or as the
+ component type of a packed array if IN_RECORD is false. See if we can
+ rewrite it either as a type that has a non-BLKmode, which we can pack
+ tighter in the packed record case, or as a smaller type. If so, return
+ the new type. If not, return the original type. */
+
+tree
+make_packable_type (tree type, bool in_record)
+{
+ unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE (type));
+ unsigned HOST_WIDE_INT new_size;
+ tree new_type, old_field, field_list = NULL_TREE;
+ unsigned int align;
+
+ /* No point in doing anything if the size is zero. */
+ if (size == 0)
+ return type;
+
+ new_type = make_node (TREE_CODE (type));
+
+ /* Copy the name and flags from the old type to that of the new.
+ Note that we rely on the pointer equality created here for
+ TYPE_NAME to look through conversions in various places. */
+ TYPE_NAME (new_type) = TYPE_NAME (type);
+ TYPE_JUSTIFIED_MODULAR_P (new_type) = TYPE_JUSTIFIED_MODULAR_P (type);
+ TYPE_CONTAINS_TEMPLATE_P (new_type) = TYPE_CONTAINS_TEMPLATE_P (type);
+ if (TREE_CODE (type) == RECORD_TYPE)
+ TYPE_PADDING_P (new_type) = TYPE_PADDING_P (type);
+
+ /* If we are in a record and have a small size, set the alignment to
+ try for an integral mode. Otherwise set it to try for a smaller
+ type with BLKmode. */
+ if (in_record && size <= MAX_FIXED_MODE_SIZE)
+ {
+ align = ceil_pow2 (size);
+ TYPE_ALIGN (new_type) = align;
+ new_size = (size + align - 1) & -align;
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT align;
+
+ /* Do not try to shrink the size if the RM size is not constant. */
+ if (TYPE_CONTAINS_TEMPLATE_P (type)
+ || !tree_fits_uhwi_p (TYPE_ADA_SIZE (type)))
+ return type;
+
+ /* Round the RM size up to a unit boundary to get the minimal size
+ for a BLKmode record. Give up if it's already the size. */
+ new_size = tree_to_uhwi (TYPE_ADA_SIZE (type));
+ new_size = (new_size + BITS_PER_UNIT - 1) & -BITS_PER_UNIT;
+ if (new_size == size)
+ return type;
+
+ align = new_size & -new_size;
+ TYPE_ALIGN (new_type) = MIN (TYPE_ALIGN (type), align);
+ }
+
+ TYPE_USER_ALIGN (new_type) = 1;
+
+ /* Now copy the fields, keeping the position and size as we don't want
+ to change the layout by propagating the packedness downwards. */
+ for (old_field = TYPE_FIELDS (type); old_field;
+ old_field = DECL_CHAIN (old_field))
+ {
+ tree new_field_type = TREE_TYPE (old_field);
+ tree new_field, new_size;
+
+ if (RECORD_OR_UNION_TYPE_P (new_field_type)
+ && !TYPE_FAT_POINTER_P (new_field_type)
+ && tree_fits_uhwi_p (TYPE_SIZE (new_field_type)))
+ new_field_type = make_packable_type (new_field_type, true);
+
+ /* However, for the last field in a not already packed record type
+ that is of an aggregate type, we need to use the RM size in the
+ packable version of the record type, see finish_record_type. */
+ if (!DECL_CHAIN (old_field)
+ && !TYPE_PACKED (type)
+ && RECORD_OR_UNION_TYPE_P (new_field_type)
+ && !TYPE_FAT_POINTER_P (new_field_type)
+ && !TYPE_CONTAINS_TEMPLATE_P (new_field_type)
+ && TYPE_ADA_SIZE (new_field_type))
+ new_size = TYPE_ADA_SIZE (new_field_type);
+ else
+ new_size = DECL_SIZE (old_field);
+
+ new_field
+ = create_field_decl (DECL_NAME (old_field), new_field_type, new_type,
+ new_size, bit_position (old_field),
+ TYPE_PACKED (type),
+ !DECL_NONADDRESSABLE_P (old_field));
+
+ DECL_INTERNAL_P (new_field) = DECL_INTERNAL_P (old_field);
+ SET_DECL_ORIGINAL_FIELD_TO_FIELD (new_field, old_field);
+ if (TREE_CODE (new_type) == QUAL_UNION_TYPE)
+ DECL_QUALIFIER (new_field) = DECL_QUALIFIER (old_field);
+
+ DECL_CHAIN (new_field) = field_list;
+ field_list = new_field;
+ }
+
+ finish_record_type (new_type, nreverse (field_list), 2, false);
+ relate_alias_sets (new_type, type, ALIAS_SET_COPY);
+ if (TYPE_STUB_DECL (type))
+ SET_DECL_PARALLEL_TYPE (TYPE_STUB_DECL (new_type),
+ DECL_PARALLEL_TYPE (TYPE_STUB_DECL (type)));
+
+ /* If this is a padding record, we never want to make the size smaller
+ than what was specified. For QUAL_UNION_TYPE, also copy the size. */
+ if (TYPE_IS_PADDING_P (type) || TREE_CODE (type) == QUAL_UNION_TYPE)
+ {
+ TYPE_SIZE (new_type) = TYPE_SIZE (type);
+ TYPE_SIZE_UNIT (new_type) = TYPE_SIZE_UNIT (type);
+ new_size = size;
+ }
+ else
+ {
+ TYPE_SIZE (new_type) = bitsize_int (new_size);
+ TYPE_SIZE_UNIT (new_type)
+ = size_int ((new_size + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
+ }
+
+ if (!TYPE_CONTAINS_TEMPLATE_P (type))
+ SET_TYPE_ADA_SIZE (new_type, TYPE_ADA_SIZE (type));
+
+ compute_record_mode (new_type);
+
+ /* Try harder to get a packable type if necessary, for example
+ in case the record itself contains a BLKmode field. */
+ if (in_record && TYPE_MODE (new_type) == BLKmode)
+ SET_TYPE_MODE (new_type,
+ mode_for_size_tree (TYPE_SIZE (new_type), MODE_INT, 1));
+
+ /* If neither the mode nor the size has shrunk, return the old type. */
+ if (TYPE_MODE (new_type) == BLKmode && new_size >= size)
+ return type;
+
+ return new_type;
+}
+
+/* Given a type TYPE, return a new type whose size is appropriate for SIZE.
+ If TYPE is the best type, return it. Otherwise, make a new type. We
+ only support new integral and pointer types. FOR_BIASED is true if
+ we are making a biased type. */
+
+tree
+make_type_from_size (tree type, tree size_tree, bool for_biased)
+{
+ unsigned HOST_WIDE_INT size;
+ bool biased_p;
+ tree new_type;
+
+ /* If size indicates an error, just return TYPE to avoid propagating
+ the error. Likewise if it's too large to represent. */
+ if (!size_tree || !tree_fits_uhwi_p (size_tree))
+ return type;
+
+ size = tree_to_uhwi (size_tree);
+
+ switch (TREE_CODE (type))
+ {
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ biased_p = (TREE_CODE (type) == INTEGER_TYPE
+ && TYPE_BIASED_REPRESENTATION_P (type));
+
+ /* Integer types with precision 0 are forbidden. */
+ if (size == 0)
+ size = 1;
+
+ /* Only do something if the type isn't a packed array type and doesn't
+ already have the proper size and the size isn't too large. */
+ if (TYPE_IS_PACKED_ARRAY_TYPE_P (type)
+ || (TYPE_PRECISION (type) == size && biased_p == for_biased)
+ || size > LONG_LONG_TYPE_SIZE)
+ break;
+
+ biased_p |= for_biased;
+ if (TYPE_UNSIGNED (type) || biased_p)
+ new_type = make_unsigned_type (size);
+ else
+ new_type = make_signed_type (size);
+ TREE_TYPE (new_type) = TREE_TYPE (type) ? TREE_TYPE (type) : type;
+ SET_TYPE_RM_MIN_VALUE (new_type,
+ convert (TREE_TYPE (new_type),
+ TYPE_MIN_VALUE (type)));
+ SET_TYPE_RM_MAX_VALUE (new_type,
+ convert (TREE_TYPE (new_type),
+ TYPE_MAX_VALUE (type)));
+ /* Copy the name to show that it's essentially the same type and
+ not a subrange type. */
+ TYPE_NAME (new_type) = TYPE_NAME (type);
+ TYPE_BIASED_REPRESENTATION_P (new_type) = biased_p;
+ SET_TYPE_RM_SIZE (new_type, bitsize_int (size));
+ return new_type;
+
+ case RECORD_TYPE:
+ /* Do something if this is a fat pointer, in which case we
+ may need to return the thin pointer. */
+ if (TYPE_FAT_POINTER_P (type) && size < POINTER_SIZE * 2)
+ {
+ enum machine_mode p_mode = mode_for_size (size, MODE_INT, 0);
+ if (!targetm.valid_pointer_mode (p_mode))
+ p_mode = ptr_mode;
+ return
+ build_pointer_type_for_mode
+ (TYPE_OBJECT_RECORD_TYPE (TYPE_UNCONSTRAINED_ARRAY (type)),
+ p_mode, 0);
+ }
+ break;
+
+ case POINTER_TYPE:
+ /* Only do something if this is a thin pointer, in which case we
+ may need to return the fat pointer. */
+ if (TYPE_IS_THIN_POINTER_P (type) && size >= POINTER_SIZE * 2)
+ return
+ build_pointer_type (TYPE_UNCONSTRAINED_ARRAY (TREE_TYPE (type)));
+ break;
+
+ default:
+ break;
+ }
+
+ return type;
+}
+
+/* See if the data pointed to by the hash table slot is marked. */
+
+static int
+pad_type_hash_marked_p (const void *p)
+{
+ const_tree const type = ((const struct pad_type_hash *) p)->type;
+
+ return ggc_marked_p (type);
+}
+
+/* Return the cached hash value. */
+
+static hashval_t
+pad_type_hash_hash (const void *p)
+{
+ return ((const struct pad_type_hash *) p)->hash;
+}
+
+/* Return 1 iff the padded types are equivalent. */
+
+static int
+pad_type_hash_eq (const void *p1, const void *p2)
+{
+ const struct pad_type_hash *const t1 = (const struct pad_type_hash *) p1;
+ const struct pad_type_hash *const t2 = (const struct pad_type_hash *) p2;
+ tree type1, type2;
+
+ if (t1->hash != t2->hash)
+ return 0;
+
+ type1 = t1->type;
+ type2 = t2->type;
+
+ /* We consider that the padded types are equivalent if they pad the same
+ type and have the same size, alignment and RM size. Taking the mode
+ into account is redundant since it is determined by the others. */
+ return
+ TREE_TYPE (TYPE_FIELDS (type1)) == TREE_TYPE (TYPE_FIELDS (type2))
+ && TYPE_SIZE (type1) == TYPE_SIZE (type2)
+ && TYPE_ALIGN (type1) == TYPE_ALIGN (type2)
+ && TYPE_ADA_SIZE (type1) == TYPE_ADA_SIZE (type2);
+}
+
+/* Ensure that TYPE has SIZE and ALIGN. Make and return a new padded type
+ if needed. We have already verified that SIZE and TYPE are large enough.
+ GNAT_ENTITY is used to name the resulting record and to issue a warning.
+ IS_COMPONENT_TYPE is true if this is being done for the component type of
+ an array. IS_USER_TYPE is true if the original type needs to be completed.
+ DEFINITION is true if this type is being defined. SET_RM_SIZE is true if
+ the RM size of the resulting type is to be set to SIZE too. */
+
+tree
+maybe_pad_type (tree type, tree size, unsigned int align,
+ Entity_Id gnat_entity, bool is_component_type,
+ bool is_user_type, bool definition, bool set_rm_size)
+{
+ tree orig_size = TYPE_SIZE (type);
+ unsigned int orig_align = TYPE_ALIGN (type);
+ tree record, field;
+
+ /* If TYPE is a padded type, see if it agrees with any size and alignment
+ we were given. If so, return the original type. Otherwise, strip
+ off the padding, since we will either be returning the inner type
+ or repadding it. If no size or alignment is specified, use that of
+ the original padded type. */
+ if (TYPE_IS_PADDING_P (type))
+ {
+ if ((!size
+ || operand_equal_p (round_up (size, orig_align), orig_size, 0))
+ && (align == 0 || align == orig_align))
+ return type;
+
+ if (!size)
+ size = orig_size;
+ if (align == 0)
+ align = orig_align;
+
+ type = TREE_TYPE (TYPE_FIELDS (type));
+ orig_size = TYPE_SIZE (type);
+ orig_align = TYPE_ALIGN (type);
+ }
+
+ /* If the size is either not being changed or is being made smaller (which
+ is not done here and is only valid for bitfields anyway), show the size
+ isn't changing. Likewise, clear the alignment if it isn't being
+ changed. Then return if we aren't doing anything. */
+ if (size
+ && (operand_equal_p (size, orig_size, 0)
+ || (TREE_CODE (orig_size) == INTEGER_CST
+ && tree_int_cst_lt (size, orig_size))))
+ size = NULL_TREE;
+
+ if (align == orig_align)
+ align = 0;
+
+ if (align == 0 && !size)
+ return type;
+
+ /* If requested, complete the original type and give it a name. */
+ if (is_user_type)
+ create_type_decl (get_entity_name (gnat_entity), type,
+ !Comes_From_Source (gnat_entity),
+ !(TYPE_NAME (type)
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_IGNORED_P (TYPE_NAME (type))),
+ gnat_entity);
+
+ /* We used to modify the record in place in some cases, but that could
+ generate incorrect debugging information. So make a new record
+ type and name. */
+ record = make_node (RECORD_TYPE);
+ TYPE_PADDING_P (record) = 1;
+
+ if (Present (gnat_entity))
+ TYPE_NAME (record) = create_concat_name (gnat_entity, "PAD");
+
+ TYPE_ALIGN (record) = align ? align : orig_align;
+ TYPE_SIZE (record) = size ? size : orig_size;
+ TYPE_SIZE_UNIT (record)
+ = convert (sizetype,
+ size_binop (CEIL_DIV_EXPR, TYPE_SIZE (record),
+ bitsize_unit_node));
+
+ /* If we are changing the alignment and the input type is a record with
+ BLKmode and a small constant size, try to make a form that has an
+ integral mode. This might allow the padding record to also have an
+ integral mode, which will be much more efficient. There is no point
+ in doing so if a size is specified unless it is also a small constant
+ size and it is incorrect to do so if we cannot guarantee that the mode
+ will be naturally aligned since the field must always be addressable.
+
+ ??? This might not always be a win when done for a stand-alone object:
+ since the nominal and the effective type of the object will now have
+ different modes, a VIEW_CONVERT_EXPR will be required for converting
+ between them and it might be hard to overcome afterwards, including
+ at the RTL level when the stand-alone object is accessed as a whole. */
+ if (align != 0
+ && RECORD_OR_UNION_TYPE_P (type)
+ && TYPE_MODE (type) == BLKmode
+ && !TYPE_BY_REFERENCE_P (type)
+ && TREE_CODE (orig_size) == INTEGER_CST
+ && !TREE_OVERFLOW (orig_size)
+ && compare_tree_int (orig_size, MAX_FIXED_MODE_SIZE) <= 0
+ && (!size
+ || (TREE_CODE (size) == INTEGER_CST
+ && compare_tree_int (size, MAX_FIXED_MODE_SIZE) <= 0)))
+ {
+ tree packable_type = make_packable_type (type, true);
+ if (TYPE_MODE (packable_type) != BLKmode
+ && align >= TYPE_ALIGN (packable_type))
+ type = packable_type;
+ }
+
+ /* Now create the field with the original size. */
+ field = create_field_decl (get_identifier ("F"), type, record, orig_size,
+ bitsize_zero_node, 0, 1);
+ DECL_INTERNAL_P (field) = 1;
+
+ /* Do not emit debug info until after the auxiliary record is built. */
+ finish_record_type (record, field, 1, false);
+
+ /* Set the RM size if requested. */
+ if (set_rm_size)
+ {
+ SET_TYPE_ADA_SIZE (record, size ? size : orig_size);
+
+ /* If the padded type is complete and has constant size, we canonicalize
+ it by means of the hash table. This is consistent with the language
+ semantics and ensures that gigi and the middle-end have a common view
+ of these padded types. */
+ if (TREE_CONSTANT (TYPE_SIZE (record)))
+ {
+ hashval_t hashcode;
+ struct pad_type_hash in, *h;
+ void **loc;
+
+ hashcode = iterative_hash_object (TYPE_HASH (type), 0);
+ hashcode = iterative_hash_expr (TYPE_SIZE (record), hashcode);
+ hashcode = iterative_hash_hashval_t (TYPE_ALIGN (record), hashcode);
+ hashcode = iterative_hash_expr (TYPE_ADA_SIZE (record), hashcode);
+
+ in.hash = hashcode;
+ in.type = record;
+ h = (struct pad_type_hash *)
+ htab_find_with_hash (pad_type_hash_table, &in, hashcode);
+ if (h)
+ {
+ record = h->type;
+ goto built;
+ }
+
+ h = ggc_alloc_pad_type_hash ();
+ h->hash = hashcode;
+ h->type = record;
+ loc = htab_find_slot_with_hash (pad_type_hash_table, h, hashcode,
+ INSERT);
+ *loc = (void *)h;
+ }
+ }
+
+ /* Unless debugging information isn't being written for the input type,
+ write a record that shows what we are a subtype of and also make a
+ variable that indicates our size, if still variable. */
+ if (TREE_CODE (orig_size) != INTEGER_CST
+ && TYPE_NAME (record)
+ && TYPE_NAME (type)
+ && !(TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_IGNORED_P (TYPE_NAME (type))))
+ {
+ tree marker = make_node (RECORD_TYPE);
+ tree name = TYPE_NAME (record);
+ tree orig_name = TYPE_NAME (type);
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ if (TREE_CODE (orig_name) == TYPE_DECL)
+ orig_name = DECL_NAME (orig_name);
+
+ TYPE_NAME (marker) = concat_name (name, "XVS");
+ finish_record_type (marker,
+ create_field_decl (orig_name,
+ build_reference_type (type),
+ marker, NULL_TREE, NULL_TREE,
+ 0, 0),
+ 0, true);
+
+ add_parallel_type (record, marker);
+
+ if (definition && size && TREE_CODE (size) != INTEGER_CST)
+ TYPE_SIZE_UNIT (marker)
+ = create_var_decl (concat_name (name, "XVZ"), NULL_TREE, sizetype,
+ TYPE_SIZE_UNIT (record), false, false, false,
+ false, NULL, gnat_entity);
+ }
+
+ rest_of_record_type_compilation (record);
+
+built:
+ /* If the size was widened explicitly, maybe give a warning. Take the
+ original size as the maximum size of the input if there was an
+ unconstrained record involved and round it up to the specified alignment,
+ if one was specified. But don't do it if we are just annotating types
+ and the type is tagged, since tagged types aren't fully laid out in this
+ mode. */
+ if (!size
+ || TREE_CODE (size) == COND_EXPR
+ || TREE_CODE (size) == MAX_EXPR
+ || No (gnat_entity)
+ || (type_annotate_only && Is_Tagged_Type (Etype (gnat_entity))))
+ return record;
+
+ if (CONTAINS_PLACEHOLDER_P (orig_size))
+ orig_size = max_size (orig_size, true);
+
+ if (align)
+ orig_size = round_up (orig_size, align);
+
+ if (!operand_equal_p (size, orig_size, 0)
+ && !(TREE_CODE (size) == INTEGER_CST
+ && TREE_CODE (orig_size) == INTEGER_CST
+ && (TREE_OVERFLOW (size)
+ || TREE_OVERFLOW (orig_size)
+ || tree_int_cst_lt (size, orig_size))))
+ {
+ Node_Id gnat_error_node = Empty;
+
+ if (Is_Packed_Array_Type (gnat_entity))
+ gnat_entity = Original_Array_Type (gnat_entity);
+
+ if ((Ekind (gnat_entity) == E_Component
+ || Ekind (gnat_entity) == E_Discriminant)
+ && Present (Component_Clause (gnat_entity)))
+ gnat_error_node = Last_Bit (Component_Clause (gnat_entity));
+ else if (Present (Size_Clause (gnat_entity)))
+ gnat_error_node = Expression (Size_Clause (gnat_entity));
+
+ /* Generate message only for entities that come from source, since
+ if we have an entity created by expansion, the message will be
+ generated for some other corresponding source entity. */
+ if (Comes_From_Source (gnat_entity))
+ {
+ if (Present (gnat_error_node))
+ post_error_ne_tree ("{^ }bits of & unused?",
+ gnat_error_node, gnat_entity,
+ size_diffop (size, orig_size));
+ else if (is_component_type)
+ post_error_ne_tree ("component of& padded{ by ^ bits}?",
+ gnat_entity, gnat_entity,
+ size_diffop (size, orig_size));
+ }
+ }
+
+ return record;
+}
+
+/* Relate the alias sets of GNU_NEW_TYPE and GNU_OLD_TYPE according to OP.
+ If this is a multi-dimensional array type, do this recursively.
+
+ OP may be
+ - ALIAS_SET_COPY: the new set is made a copy of the old one.
+ - ALIAS_SET_SUPERSET: the new set is made a superset of the old one.
+ - ALIAS_SET_SUBSET: the new set is made a subset of the old one. */
+
+void
+relate_alias_sets (tree gnu_new_type, tree gnu_old_type, enum alias_set_op op)
+{
+ /* Remove any padding from GNU_OLD_TYPE. It doesn't matter in the case
+ of a one-dimensional array, since the padding has the same alias set
+ as the field type, but if it's a multi-dimensional array, we need to
+ see the inner types. */
+ while (TREE_CODE (gnu_old_type) == RECORD_TYPE
+ && (TYPE_JUSTIFIED_MODULAR_P (gnu_old_type)
+ || TYPE_PADDING_P (gnu_old_type)))
+ gnu_old_type = TREE_TYPE (TYPE_FIELDS (gnu_old_type));
+
+ /* Unconstrained array types are deemed incomplete and would thus be given
+ alias set 0. Retrieve the underlying array type. */
+ if (TREE_CODE (gnu_old_type) == UNCONSTRAINED_ARRAY_TYPE)
+ gnu_old_type
+ = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_old_type))));
+ if (TREE_CODE (gnu_new_type) == UNCONSTRAINED_ARRAY_TYPE)
+ gnu_new_type
+ = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_new_type))));
+
+ if (TREE_CODE (gnu_new_type) == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (gnu_new_type)) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_new_type)))
+ relate_alias_sets (TREE_TYPE (gnu_new_type), TREE_TYPE (gnu_old_type), op);
+
+ switch (op)
+ {
+ case ALIAS_SET_COPY:
+ /* The alias set shouldn't be copied between array types with different
+ aliasing settings because this can break the aliasing relationship
+ between the array type and its element type. */
+#ifndef ENABLE_CHECKING
+ if (flag_strict_aliasing)
+#endif
+ gcc_assert (!(TREE_CODE (gnu_new_type) == ARRAY_TYPE
+ && TREE_CODE (gnu_old_type) == ARRAY_TYPE
+ && TYPE_NONALIASED_COMPONENT (gnu_new_type)
+ != TYPE_NONALIASED_COMPONENT (gnu_old_type)));
+
+ TYPE_ALIAS_SET (gnu_new_type) = get_alias_set (gnu_old_type);
+ break;
+
+ case ALIAS_SET_SUBSET:
+ case ALIAS_SET_SUPERSET:
+ {
+ alias_set_type old_set = get_alias_set (gnu_old_type);
+ alias_set_type new_set = get_alias_set (gnu_new_type);
+
+ /* Do nothing if the alias sets conflict. This ensures that we
+ never call record_alias_subset several times for the same pair
+ or at all for alias set 0. */
+ if (!alias_sets_conflict_p (old_set, new_set))
+ {
+ if (op == ALIAS_SET_SUBSET)
+ record_alias_subset (old_set, new_set);
+ else
+ record_alias_subset (new_set, old_set);
+ }
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ record_component_aliases (gnu_new_type);
+}
+
+/* Record TYPE as a builtin type for Ada. NAME is the name of the type.
+ ARTIFICIAL_P is true if it's a type that was generated by the compiler. */
+
+void
+record_builtin_type (const char *name, tree type, bool artificial_p)
+{
+ tree type_decl = build_decl (input_location,
+ TYPE_DECL, get_identifier (name), type);
+ DECL_ARTIFICIAL (type_decl) = artificial_p;
+ TYPE_ARTIFICIAL (type) = artificial_p;
+ gnat_pushdecl (type_decl, Empty);
+
+ if (debug_hooks->type_decl)
+ debug_hooks->type_decl (type_decl, false);
+}
+
+/* Given a record type RECORD_TYPE and a list of FIELD_DECL nodes FIELD_LIST,
+ finish constructing the record type as a fat pointer type. */
+
+void
+finish_fat_pointer_type (tree record_type, tree field_list)
+{
+ /* Make sure we can put it into a register. */
+ if (STRICT_ALIGNMENT)
+ TYPE_ALIGN (record_type) = MIN (BIGGEST_ALIGNMENT, 2 * POINTER_SIZE);
+
+ /* Show what it really is. */
+ TYPE_FAT_POINTER_P (record_type) = 1;
+
+ /* Do not emit debug info for it since the types of its fields may still be
+ incomplete at this point. */
+ finish_record_type (record_type, field_list, 0, false);
+
+ /* Force type_contains_placeholder_p to return true on it. Although the
+ PLACEHOLDER_EXPRs are referenced only indirectly, this isn't a pointer
+ type but the representation of the unconstrained array. */
+ TYPE_CONTAINS_PLACEHOLDER_INTERNAL (record_type) = 2;
+}
+
+/* Given a record type RECORD_TYPE and a list of FIELD_DECL nodes FIELD_LIST,
+ finish constructing the record or union type. If REP_LEVEL is zero, this
+ record has no representation clause and so will be entirely laid out here.
+ If REP_LEVEL is one, this record has a representation clause and has been
+ laid out already; only set the sizes and alignment. If REP_LEVEL is two,
+ this record is derived from a parent record and thus inherits its layout;
+ only make a pass on the fields to finalize them. DEBUG_INFO_P is true if
+ we need to write debug information about this type. */
+
+void
+finish_record_type (tree record_type, tree field_list, int rep_level,
+ bool debug_info_p)
+{
+ enum tree_code code = TREE_CODE (record_type);
+ tree name = TYPE_NAME (record_type);
+ tree ada_size = bitsize_zero_node;
+ tree size = bitsize_zero_node;
+ bool had_size = TYPE_SIZE (record_type) != 0;
+ bool had_size_unit = TYPE_SIZE_UNIT (record_type) != 0;
+ bool had_align = TYPE_ALIGN (record_type) != 0;
+ tree field;
+
+ TYPE_FIELDS (record_type) = field_list;
+
+ /* Always attach the TYPE_STUB_DECL for a record type. It is required to
+ generate debug info and have a parallel type. */
+ if (name && TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+ TYPE_STUB_DECL (record_type) = create_type_stub_decl (name, record_type);
+
+ /* Globally initialize the record first. If this is a rep'ed record,
+ that just means some initializations; otherwise, layout the record. */
+ if (rep_level > 0)
+ {
+ TYPE_ALIGN (record_type) = MAX (BITS_PER_UNIT, TYPE_ALIGN (record_type));
+
+ if (!had_size_unit)
+ TYPE_SIZE_UNIT (record_type) = size_zero_node;
+
+ if (!had_size)
+ TYPE_SIZE (record_type) = bitsize_zero_node;
+
+ /* For all-repped records with a size specified, lay the QUAL_UNION_TYPE
+ out just like a UNION_TYPE, since the size will be fixed. */
+ else if (code == QUAL_UNION_TYPE)
+ code = UNION_TYPE;
+ }
+ else
+ {
+ /* Ensure there isn't a size already set. There can be in an error
+ case where there is a rep clause but all fields have errors and
+ no longer have a position. */
+ TYPE_SIZE (record_type) = 0;
+
+ /* Ensure we use the traditional GCC layout for bitfields when we need
+ to pack the record type or have a representation clause. The other
+ possible layout (Microsoft C compiler), if available, would prevent
+ efficient packing in almost all cases. */
+#ifdef TARGET_MS_BITFIELD_LAYOUT
+ if (TARGET_MS_BITFIELD_LAYOUT && TYPE_PACKED (record_type))
+ decl_attributes (&record_type,
+ tree_cons (get_identifier ("gcc_struct"),
+ NULL_TREE, NULL_TREE),
+ ATTR_FLAG_TYPE_IN_PLACE);
+#endif
+
+ layout_type (record_type);
+ }
+
+ /* At this point, the position and size of each field is known. It was
+ either set before entry by a rep clause, or by laying out the type above.
+
+ We now run a pass over the fields (in reverse order for QUAL_UNION_TYPEs)
+ to compute the Ada size; the GCC size and alignment (for rep'ed records
+ that are not padding types); and the mode (for rep'ed records). We also
+ clear the DECL_BIT_FIELD indication for the cases we know have not been
+ handled yet, and adjust DECL_NONADDRESSABLE_P accordingly. */
+
+ if (code == QUAL_UNION_TYPE)
+ field_list = nreverse (field_list);
+
+ for (field = field_list; field; field = DECL_CHAIN (field))
+ {
+ tree type = TREE_TYPE (field);
+ tree pos = bit_position (field);
+ tree this_size = DECL_SIZE (field);
+ tree this_ada_size;
+
+ if (RECORD_OR_UNION_TYPE_P (type)
+ && !TYPE_FAT_POINTER_P (type)
+ && !TYPE_CONTAINS_TEMPLATE_P (type)
+ && TYPE_ADA_SIZE (type))
+ this_ada_size = TYPE_ADA_SIZE (type);
+ else
+ this_ada_size = this_size;
+
+ /* Clear DECL_BIT_FIELD for the cases layout_decl does not handle. */
+ if (DECL_BIT_FIELD (field)
+ && operand_equal_p (this_size, TYPE_SIZE (type), 0))
+ {
+ unsigned int align = TYPE_ALIGN (type);
+
+ /* In the general case, type alignment is required. */
+ if (value_factor_p (pos, align))
+ {
+ /* The enclosing record type must be sufficiently aligned.
+ Otherwise, if no alignment was specified for it and it
+ has been laid out already, bump its alignment to the
+ desired one if this is compatible with its size. */
+ if (TYPE_ALIGN (record_type) >= align)
+ {
+ DECL_ALIGN (field) = MAX (DECL_ALIGN (field), align);
+ DECL_BIT_FIELD (field) = 0;
+ }
+ else if (!had_align
+ && rep_level == 0
+ && value_factor_p (TYPE_SIZE (record_type), align))
+ {
+ TYPE_ALIGN (record_type) = align;
+ DECL_ALIGN (field) = MAX (DECL_ALIGN (field), align);
+ DECL_BIT_FIELD (field) = 0;
+ }
+ }
+
+ /* In the non-strict alignment case, only byte alignment is. */
+ if (!STRICT_ALIGNMENT
+ && DECL_BIT_FIELD (field)
+ && value_factor_p (pos, BITS_PER_UNIT))
+ DECL_BIT_FIELD (field) = 0;
+ }
+
+ /* If we still have DECL_BIT_FIELD set at this point, we know that the
+ field is technically not addressable. Except that it can actually
+ be addressed if it is BLKmode and happens to be properly aligned. */
+ if (DECL_BIT_FIELD (field)
+ && !(DECL_MODE (field) == BLKmode
+ && value_factor_p (pos, BITS_PER_UNIT)))
+ DECL_NONADDRESSABLE_P (field) = 1;
+
+ /* A type must be as aligned as its most aligned field that is not
+ a bit-field. But this is already enforced by layout_type. */
+ if (rep_level > 0 && !DECL_BIT_FIELD (field))
+ TYPE_ALIGN (record_type)
+ = MAX (TYPE_ALIGN (record_type), DECL_ALIGN (field));
+
+ switch (code)
+ {
+ case UNION_TYPE:
+ ada_size = size_binop (MAX_EXPR, ada_size, this_ada_size);
+ size = size_binop (MAX_EXPR, size, this_size);
+ break;
+
+ case QUAL_UNION_TYPE:
+ ada_size
+ = fold_build3 (COND_EXPR, bitsizetype, DECL_QUALIFIER (field),
+ this_ada_size, ada_size);
+ size = fold_build3 (COND_EXPR, bitsizetype, DECL_QUALIFIER (field),
+ this_size, size);
+ break;
+
+ case RECORD_TYPE:
+ /* Since we know here that all fields are sorted in order of
+ increasing bit position, the size of the record is one
+ higher than the ending bit of the last field processed
+ unless we have a rep clause, since in that case we might
+ have a field outside a QUAL_UNION_TYPE that has a higher ending
+ position. So use a MAX in that case. Also, if this field is a
+ QUAL_UNION_TYPE, we need to take into account the previous size in
+ the case of empty variants. */
+ ada_size
+ = merge_sizes (ada_size, pos, this_ada_size,
+ TREE_CODE (type) == QUAL_UNION_TYPE, rep_level > 0);
+ size
+ = merge_sizes (size, pos, this_size,
+ TREE_CODE (type) == QUAL_UNION_TYPE, rep_level > 0);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if (code == QUAL_UNION_TYPE)
+ nreverse (field_list);
+
+ if (rep_level < 2)
+ {
+ /* If this is a padding record, we never want to make the size smaller
+ than what was specified in it, if any. */
+ if (TYPE_IS_PADDING_P (record_type) && TYPE_SIZE (record_type))
+ size = TYPE_SIZE (record_type);
+
+ /* Now set any of the values we've just computed that apply. */
+ if (!TYPE_FAT_POINTER_P (record_type)
+ && !TYPE_CONTAINS_TEMPLATE_P (record_type))
+ SET_TYPE_ADA_SIZE (record_type, ada_size);
+
+ if (rep_level > 0)
+ {
+ tree size_unit = had_size_unit
+ ? TYPE_SIZE_UNIT (record_type)
+ : convert (sizetype,
+ size_binop (CEIL_DIV_EXPR, size,
+ bitsize_unit_node));
+ unsigned int align = TYPE_ALIGN (record_type);
+
+ TYPE_SIZE (record_type) = variable_size (round_up (size, align));
+ TYPE_SIZE_UNIT (record_type)
+ = variable_size (round_up (size_unit, align / BITS_PER_UNIT));
+
+ compute_record_mode (record_type);
+ }
+ }
+
+ if (debug_info_p)
+ rest_of_record_type_compilation (record_type);
+}
+
+/* Append PARALLEL_TYPE on the chain of parallel types of TYPE. */
+
+void
+add_parallel_type (tree type, tree parallel_type)
+{
+ tree decl = TYPE_STUB_DECL (type);
+
+ while (DECL_PARALLEL_TYPE (decl))
+ decl = TYPE_STUB_DECL (DECL_PARALLEL_TYPE (decl));
+
+ SET_DECL_PARALLEL_TYPE (decl, parallel_type);
+}
+
+/* Return true if TYPE has a parallel type. */
+
+static bool
+has_parallel_type (tree type)
+{
+ tree decl = TYPE_STUB_DECL (type);
+
+ return DECL_PARALLEL_TYPE (decl) != NULL_TREE;
+}
+
+/* Wrap up compilation of RECORD_TYPE, i.e. output all the debug information
+ associated with it. It need not be invoked directly in most cases since
+ finish_record_type takes care of doing so, but this can be necessary if
+ a parallel type is to be attached to the record type. */
+
+void
+rest_of_record_type_compilation (tree record_type)
+{
+ bool var_size = false;
+ tree field;
+
+ /* If this is a padded type, the bulk of the debug info has already been
+ generated for the field's type. */
+ if (TYPE_IS_PADDING_P (record_type))
+ return;
+
+ /* If the type already has a parallel type (XVS type), then we're done. */
+ if (has_parallel_type (record_type))
+ return;
+
+ for (field = TYPE_FIELDS (record_type); field; field = DECL_CHAIN (field))
+ {
+ /* We need to make an XVE/XVU record if any field has variable size,
+ whether or not the record does. For example, if we have a union,
+ it may be that all fields, rounded up to the alignment, have the
+ same size, in which case we'll use that size. But the debug
+ output routines (except Dwarf2) won't be able to output the fields,
+ so we need to make the special record. */
+ if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
+ /* If a field has a non-constant qualifier, the record will have
+ variable size too. */
+ || (TREE_CODE (record_type) == QUAL_UNION_TYPE
+ && TREE_CODE (DECL_QUALIFIER (field)) != INTEGER_CST))
+ {
+ var_size = true;
+ break;
+ }
+ }
+
+ /* If this record type is of variable size, make a parallel record type that
+ will tell the debugger how the former is laid out (see exp_dbug.ads). */
+ if (var_size)
+ {
+ tree new_record_type
+ = make_node (TREE_CODE (record_type) == QUAL_UNION_TYPE
+ ? UNION_TYPE : TREE_CODE (record_type));
+ tree orig_name = TYPE_NAME (record_type), new_name;
+ tree last_pos = bitsize_zero_node;
+ tree old_field, prev_old_field = NULL_TREE;
+
+ if (TREE_CODE (orig_name) == TYPE_DECL)
+ orig_name = DECL_NAME (orig_name);
+
+ new_name
+ = concat_name (orig_name, TREE_CODE (record_type) == QUAL_UNION_TYPE
+ ? "XVU" : "XVE");
+ TYPE_NAME (new_record_type) = new_name;
+ TYPE_ALIGN (new_record_type) = BIGGEST_ALIGNMENT;
+ TYPE_STUB_DECL (new_record_type)
+ = create_type_stub_decl (new_name, new_record_type);
+ DECL_IGNORED_P (TYPE_STUB_DECL (new_record_type))
+ = DECL_IGNORED_P (TYPE_STUB_DECL (record_type));
+ TYPE_SIZE (new_record_type) = size_int (TYPE_ALIGN (record_type));
+ TYPE_SIZE_UNIT (new_record_type)
+ = size_int (TYPE_ALIGN (record_type) / BITS_PER_UNIT);
+
+ /* Now scan all the fields, replacing each field with a new field
+ corresponding to the new encoding. */
+ for (old_field = TYPE_FIELDS (record_type); old_field;
+ old_field = DECL_CHAIN (old_field))
+ {
+ tree field_type = TREE_TYPE (old_field);
+ tree field_name = DECL_NAME (old_field);
+ tree curpos = bit_position (old_field);
+ tree pos, new_field;
+ bool var = false;
+ unsigned int align = 0;
+
+ /* We're going to do some pattern matching below so remove as many
+ conversions as possible. */
+ curpos = remove_conversions (curpos, true);
+
+ /* See how the position was modified from the last position.
+
+ There are two basic cases we support: a value was added
+ to the last position or the last position was rounded to
+ a boundary and they something was added. Check for the
+ first case first. If not, see if there is any evidence
+ of rounding. If so, round the last position and retry.
+
+ If this is a union, the position can be taken as zero. */
+ if (TREE_CODE (new_record_type) == UNION_TYPE)
+ pos = bitsize_zero_node;
+ else
+ pos = compute_related_constant (curpos, last_pos);
+
+ if (!pos
+ && TREE_CODE (curpos) == MULT_EXPR
+ && tree_fits_uhwi_p (TREE_OPERAND (curpos, 1)))
+ {
+ tree offset = TREE_OPERAND (curpos, 0);
+ align = tree_to_uhwi (TREE_OPERAND (curpos, 1));
+ align = scale_by_factor_of (offset, align);
+ last_pos = round_up (last_pos, align);
+ pos = compute_related_constant (curpos, last_pos);
+ }
+ else if (!pos
+ && TREE_CODE (curpos) == PLUS_EXPR
+ && tree_fits_uhwi_p (TREE_OPERAND (curpos, 1))
+ && TREE_CODE (TREE_OPERAND (curpos, 0)) == MULT_EXPR
+ && tree_fits_uhwi_p
+ (TREE_OPERAND (TREE_OPERAND (curpos, 0), 1)))
+ {
+ tree offset = TREE_OPERAND (TREE_OPERAND (curpos, 0), 0);
+ unsigned HOST_WIDE_INT addend
+ = tree_to_uhwi (TREE_OPERAND (curpos, 1));
+ align
+ = tree_to_uhwi (TREE_OPERAND (TREE_OPERAND (curpos, 0), 1));
+ align = scale_by_factor_of (offset, align);
+ align = MIN (align, addend & -addend);
+ last_pos = round_up (last_pos, align);
+ pos = compute_related_constant (curpos, last_pos);
+ }
+ else if (potential_alignment_gap (prev_old_field, old_field, pos))
+ {
+ align = TYPE_ALIGN (field_type);
+ last_pos = round_up (last_pos, align);
+ pos = compute_related_constant (curpos, last_pos);
+ }
+
+ /* If we can't compute a position, set it to zero.
+
+ ??? We really should abort here, but it's too much work
+ to get this correct for all cases. */
+ if (!pos)
+ pos = bitsize_zero_node;
+
+ /* See if this type is variable-sized and make a pointer type
+ and indicate the indirection if so. Beware that the debug
+ back-end may adjust the position computed above according
+ to the alignment of the field type, i.e. the pointer type
+ in this case, if we don't preventively counter that. */
+ if (TREE_CODE (DECL_SIZE (old_field)) != INTEGER_CST)
+ {
+ field_type = build_pointer_type (field_type);
+ if (align != 0 && TYPE_ALIGN (field_type) > align)
+ {
+ field_type = copy_node (field_type);
+ TYPE_ALIGN (field_type) = align;
+ }
+ var = true;
+ }
+
+ /* Make a new field name, if necessary. */
+ if (var || align != 0)
+ {
+ char suffix[16];
+
+ if (align != 0)
+ sprintf (suffix, "XV%c%u", var ? 'L' : 'A',
+ align / BITS_PER_UNIT);
+ else
+ strcpy (suffix, "XVL");
+
+ field_name = concat_name (field_name, suffix);
+ }
+
+ new_field
+ = create_field_decl (field_name, field_type, new_record_type,
+ DECL_SIZE (old_field), pos, 0, 0);
+ DECL_CHAIN (new_field) = TYPE_FIELDS (new_record_type);
+ TYPE_FIELDS (new_record_type) = new_field;
+
+ /* If old_field is a QUAL_UNION_TYPE, take its size as being
+ zero. The only time it's not the last field of the record
+ is when there are other components at fixed positions after
+ it (meaning there was a rep clause for every field) and we
+ want to be able to encode them. */
+ last_pos = size_binop (PLUS_EXPR, bit_position (old_field),
+ (TREE_CODE (TREE_TYPE (old_field))
+ == QUAL_UNION_TYPE)
+ ? bitsize_zero_node
+ : DECL_SIZE (old_field));
+ prev_old_field = old_field;
+ }
+
+ TYPE_FIELDS (new_record_type) = nreverse (TYPE_FIELDS (new_record_type));
+
+ add_parallel_type (record_type, new_record_type);
+ }
+}
+
+/* Utility function of above to merge LAST_SIZE, the previous size of a record
+ with FIRST_BIT and SIZE that describe a field. SPECIAL is true if this
+ represents a QUAL_UNION_TYPE in which case we must look for COND_EXPRs and
+ replace a value of zero with the old size. If HAS_REP is true, we take the
+ MAX of the end position of this field with LAST_SIZE. In all other cases,
+ we use FIRST_BIT plus SIZE. Return an expression for the size. */
+
+static tree
+merge_sizes (tree last_size, tree first_bit, tree size, bool special,
+ bool has_rep)
+{
+ tree type = TREE_TYPE (last_size);
+ tree new_size;
+
+ if (!special || TREE_CODE (size) != COND_EXPR)
+ {
+ new_size = size_binop (PLUS_EXPR, first_bit, size);
+ if (has_rep)
+ new_size = size_binop (MAX_EXPR, last_size, new_size);
+ }
+
+ else
+ new_size = fold_build3 (COND_EXPR, type, TREE_OPERAND (size, 0),
+ integer_zerop (TREE_OPERAND (size, 1))
+ ? last_size : merge_sizes (last_size, first_bit,
+ TREE_OPERAND (size, 1),
+ 1, has_rep),
+ integer_zerop (TREE_OPERAND (size, 2))
+ ? last_size : merge_sizes (last_size, first_bit,
+ TREE_OPERAND (size, 2),
+ 1, has_rep));
+
+ /* We don't need any NON_VALUE_EXPRs and they can confuse us (especially
+ when fed through substitute_in_expr) into thinking that a constant
+ size is not constant. */
+ while (TREE_CODE (new_size) == NON_LVALUE_EXPR)
+ new_size = TREE_OPERAND (new_size, 0);
+
+ return new_size;
+}
+
+/* Utility function of above to see if OP0 and OP1, both of SIZETYPE, are
+ related by the addition of a constant. Return that constant if so. */
+
+static tree
+compute_related_constant (tree op0, tree op1)
+{
+ tree op0_var, op1_var;
+ tree op0_con = split_plus (op0, &op0_var);
+ tree op1_con = split_plus (op1, &op1_var);
+ tree result = size_binop (MINUS_EXPR, op0_con, op1_con);
+
+ if (operand_equal_p (op0_var, op1_var, 0))
+ return result;
+ else if (operand_equal_p (op0, size_binop (PLUS_EXPR, op1_var, result), 0))
+ return result;
+ else
+ return 0;
+}
+
+/* Utility function of above to split a tree OP which may be a sum, into a
+ constant part, which is returned, and a variable part, which is stored
+ in *PVAR. *PVAR may be bitsize_zero_node. All operations must be of
+ bitsizetype. */
+
+static tree
+split_plus (tree in, tree *pvar)
+{
+ /* Strip conversions in order to ease the tree traversal and maximize the
+ potential for constant or plus/minus discovery. We need to be careful
+ to always return and set *pvar to bitsizetype trees, but it's worth
+ the effort. */
+ in = remove_conversions (in, false);
+
+ *pvar = convert (bitsizetype, in);
+
+ if (TREE_CODE (in) == INTEGER_CST)
+ {
+ *pvar = bitsize_zero_node;
+ return convert (bitsizetype, in);
+ }
+ else if (TREE_CODE (in) == PLUS_EXPR || TREE_CODE (in) == MINUS_EXPR)
+ {
+ tree lhs_var, rhs_var;
+ tree lhs_con = split_plus (TREE_OPERAND (in, 0), &lhs_var);
+ tree rhs_con = split_plus (TREE_OPERAND (in, 1), &rhs_var);
+
+ if (lhs_var == TREE_OPERAND (in, 0)
+ && rhs_var == TREE_OPERAND (in, 1))
+ return bitsize_zero_node;
+
+ *pvar = size_binop (TREE_CODE (in), lhs_var, rhs_var);
+ return size_binop (TREE_CODE (in), lhs_con, rhs_con);
+ }
+ else
+ return bitsize_zero_node;
+}
+
+/* Return a FUNCTION_TYPE node. RETURN_TYPE is the type returned by the
+ subprogram. If it is VOID_TYPE, then we are dealing with a procedure,
+ otherwise we are dealing with a function. PARAM_DECL_LIST is a list of
+ PARM_DECL nodes that are the subprogram parameters. CICO_LIST is the
+ copy-in/copy-out list to be stored into the TYPE_CICO_LIST field.
+ RETURN_UNCONSTRAINED_P is true if the function returns an unconstrained
+ object. RETURN_BY_DIRECT_REF_P is true if the function returns by direct
+ reference. RETURN_BY_INVISI_REF_P is true if the function returns by
+ invisible reference. */
+
+tree
+create_subprog_type (tree return_type, tree param_decl_list, tree cico_list,
+ bool return_unconstrained_p, bool return_by_direct_ref_p,
+ bool return_by_invisi_ref_p)
+{
+ /* A list of the data type nodes of the subprogram formal parameters.
+ This list is generated by traversing the input list of PARM_DECL
+ nodes. */
+ vec<tree, va_gc> *param_type_list = NULL;
+ tree t, type;
+
+ for (t = param_decl_list; t; t = DECL_CHAIN (t))
+ vec_safe_push (param_type_list, TREE_TYPE (t));
+
+ type = build_function_type_vec (return_type, param_type_list);
+
+ /* TYPE may have been shared since GCC hashes types. If it has a different
+ CICO_LIST, make a copy. Likewise for the various flags. */
+ if (!fntype_same_flags_p (type, cico_list, return_unconstrained_p,
+ return_by_direct_ref_p, return_by_invisi_ref_p))
+ {
+ type = copy_type (type);
+ TYPE_CI_CO_LIST (type) = cico_list;
+ TYPE_RETURN_UNCONSTRAINED_P (type) = return_unconstrained_p;
+ TYPE_RETURN_BY_DIRECT_REF_P (type) = return_by_direct_ref_p;
+ TREE_ADDRESSABLE (type) = return_by_invisi_ref_p;
+ }
+
+ return type;
+}
+
+/* Return a copy of TYPE but safe to modify in any way. */
+
+tree
+copy_type (tree type)
+{
+ tree new_type = copy_node (type);
+
+ /* Unshare the language-specific data. */
+ if (TYPE_LANG_SPECIFIC (type))
+ {
+ TYPE_LANG_SPECIFIC (new_type) = NULL;
+ SET_TYPE_LANG_SPECIFIC (new_type, GET_TYPE_LANG_SPECIFIC (type));
+ }
+
+ /* And the contents of the language-specific slot if needed. */
+ if ((INTEGRAL_TYPE_P (type) || TREE_CODE (type) == REAL_TYPE)
+ && TYPE_RM_VALUES (type))
+ {
+ TYPE_RM_VALUES (new_type) = NULL_TREE;
+ SET_TYPE_RM_SIZE (new_type, TYPE_RM_SIZE (type));
+ SET_TYPE_RM_MIN_VALUE (new_type, TYPE_RM_MIN_VALUE (type));
+ SET_TYPE_RM_MAX_VALUE (new_type, TYPE_RM_MAX_VALUE (type));
+ }
+
+ /* copy_node clears this field instead of copying it, because it is
+ aliased with TREE_CHAIN. */
+ TYPE_STUB_DECL (new_type) = TYPE_STUB_DECL (type);
+
+ TYPE_POINTER_TO (new_type) = 0;
+ TYPE_REFERENCE_TO (new_type) = 0;
+ TYPE_MAIN_VARIANT (new_type) = new_type;
+ TYPE_NEXT_VARIANT (new_type) = 0;
+
+ return new_type;
+}
+
+/* Return a subtype of sizetype with range MIN to MAX and whose
+ TYPE_INDEX_TYPE is INDEX. GNAT_NODE is used for the position
+ of the associated TYPE_DECL. */
+
+tree
+create_index_type (tree min, tree max, tree index, Node_Id gnat_node)
+{
+ /* First build a type for the desired range. */
+ tree type = build_nonshared_range_type (sizetype, min, max);
+
+ /* Then set the index type. */
+ SET_TYPE_INDEX_TYPE (type, index);
+ create_type_decl (NULL_TREE, type, true, false, gnat_node);
+
+ return type;
+}
+
+/* Return a subtype of TYPE with range MIN to MAX. If TYPE is NULL,
+ sizetype is used. */
+
+tree
+create_range_type (tree type, tree min, tree max)
+{
+ tree range_type;
+
+ if (type == NULL_TREE)
+ type = sizetype;
+
+ /* First build a type with the base range. */
+ range_type = build_nonshared_range_type (type, TYPE_MIN_VALUE (type),
+ TYPE_MAX_VALUE (type));
+
+ /* Then set the actual range. */
+ SET_TYPE_RM_MIN_VALUE (range_type, convert (type, min));
+ SET_TYPE_RM_MAX_VALUE (range_type, convert (type, max));
+
+ return range_type;
+}
+
+/* Return a TYPE_DECL node suitable for the TYPE_STUB_DECL field of a type.
+ TYPE_NAME gives the name of the type and TYPE is a ..._TYPE node giving
+ its data type. */
+
+tree
+create_type_stub_decl (tree type_name, tree type)
+{
+ /* Using a named TYPE_DECL ensures that a type name marker is emitted in
+ STABS while setting DECL_ARTIFICIAL ensures that no DW_TAG_typedef is
+ emitted in DWARF. */
+ tree type_decl = build_decl (input_location, TYPE_DECL, type_name, type);
+ DECL_ARTIFICIAL (type_decl) = 1;
+ TYPE_ARTIFICIAL (type) = 1;
+ return type_decl;
+}
+
+/* Return a TYPE_DECL node. TYPE_NAME gives the name of the type and TYPE
+ is a ..._TYPE node giving its data type. ARTIFICIAL_P is true if this
+ is a declaration that was generated by the compiler. DEBUG_INFO_P is
+ true if we need to write debug information about this type. GNAT_NODE
+ is used for the position of the decl. */
+
+tree
+create_type_decl (tree type_name, tree type, bool artificial_p,
+ bool debug_info_p, Node_Id gnat_node)
+{
+ enum tree_code code = TREE_CODE (type);
+ bool named = TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL;
+ tree type_decl;
+
+ /* Only the builtin TYPE_STUB_DECL should be used for dummy types. */
+ gcc_assert (!TYPE_IS_DUMMY_P (type));
+
+ /* If the type hasn't been named yet, we're naming it; preserve an existing
+ TYPE_STUB_DECL that has been attached to it for some purpose. */
+ if (!named && TYPE_STUB_DECL (type))
+ {
+ type_decl = TYPE_STUB_DECL (type);
+ DECL_NAME (type_decl) = type_name;
+ }
+ else
+ type_decl = build_decl (input_location, TYPE_DECL, type_name, type);
+
+ DECL_ARTIFICIAL (type_decl) = artificial_p;
+ TYPE_ARTIFICIAL (type) = artificial_p;
+
+ /* Add this decl to the current binding level. */
+ gnat_pushdecl (type_decl, gnat_node);
+
+ /* If we're naming the type, equate the TYPE_STUB_DECL to the name.
+ This causes the name to be also viewed as a "tag" by the debug
+ back-end, with the advantage that no DW_TAG_typedef is emitted
+ for artificial "tagged" types in DWARF. */
+ if (!named)
+ TYPE_STUB_DECL (type) = type_decl;
+
+ /* Do not generate debug info for UNCONSTRAINED_ARRAY_TYPE that the
+ back-end doesn't support, and for others if we don't need to. */
+ if (code == UNCONSTRAINED_ARRAY_TYPE || !debug_info_p)
+ DECL_IGNORED_P (type_decl) = 1;
+
+ return type_decl;
+}
+
+/* Return a VAR_DECL or CONST_DECL node.
+
+ VAR_NAME gives the name of the variable. ASM_NAME is its assembler name
+ (if provided). TYPE is its data type (a GCC ..._TYPE node). VAR_INIT is
+ the GCC tree for an optional initial expression; NULL_TREE if none.
+
+ CONST_FLAG is true if this variable is constant, in which case we might
+ return a CONST_DECL node unless CONST_DECL_ALLOWED_P is false.
+
+ PUBLIC_FLAG is true if this is for a reference to a public entity or for a
+ definition to be made visible outside of the current compilation unit, for
+ instance variable definitions in a package specification.
+
+ EXTERN_FLAG is true when processing an external variable declaration (as
+ opposed to a definition: no storage is to be allocated for the variable).
+
+ STATIC_FLAG is only relevant when not at top level. In that case
+ it indicates whether to always allocate storage to the variable.
+
+ GNAT_NODE is used for the position of the decl. */
+
+tree
+create_var_decl_1 (tree var_name, tree asm_name, tree type, tree var_init,
+ bool const_flag, bool public_flag, bool extern_flag,
+ bool static_flag, bool const_decl_allowed_p,
+ struct attrib *attr_list, Node_Id gnat_node)
+{
+ /* Whether the initializer is a constant initializer. At the global level
+ or for an external object or an object to be allocated in static memory,
+ we check that it is a valid constant expression for use in initializing
+ a static variable; otherwise, we only check that it is constant. */
+ bool init_const
+ = (var_init != 0
+ && gnat_types_compatible_p (type, TREE_TYPE (var_init))
+ && (global_bindings_p () || extern_flag || static_flag
+ ? initializer_constant_valid_p (var_init, TREE_TYPE (var_init)) != 0
+ : TREE_CONSTANT (var_init)));
+
+ /* Whether we will make TREE_CONSTANT the DECL we produce here, in which
+ case the initializer may be used in-lieu of the DECL node (as done in
+ Identifier_to_gnu). This is useful to prevent the need of elaboration
+ code when an identifier for which such a decl is made is in turn used as
+ an initializer. We used to rely on CONST vs VAR_DECL for this purpose,
+ but extra constraints apply to this choice (see below) and are not
+ relevant to the distinction we wish to make. */
+ bool constant_p = const_flag && init_const;
+
+ /* The actual DECL node. CONST_DECL was initially intended for enumerals
+ and may be used for scalars in general but not for aggregates. */
+ tree var_decl
+ = build_decl (input_location,
+ (constant_p && const_decl_allowed_p
+ && !AGGREGATE_TYPE_P (type)) ? CONST_DECL : VAR_DECL,
+ var_name, type);
+
+ /* If this is external, throw away any initializations (they will be done
+ elsewhere) unless this is a constant for which we would like to remain
+ able to get the initializer. If we are defining a global here, leave a
+ constant initialization and save any variable elaborations for the
+ elaboration routine. If we are just annotating types, throw away the
+ initialization if it isn't a constant. */
+ if ((extern_flag && !constant_p)
+ || (type_annotate_only && var_init && !TREE_CONSTANT (var_init)))
+ var_init = NULL_TREE;
+
+ /* At the global level, an initializer requiring code to be generated
+ produces elaboration statements. Check that such statements are allowed,
+ that is, not violating a No_Elaboration_Code restriction. */
+ if (global_bindings_p () && var_init != 0 && !init_const)
+ Check_Elaboration_Code_Allowed (gnat_node);
+
+ DECL_INITIAL (var_decl) = var_init;
+ TREE_READONLY (var_decl) = const_flag;
+ DECL_EXTERNAL (var_decl) = extern_flag;
+ TREE_PUBLIC (var_decl) = public_flag || extern_flag;
+ TREE_CONSTANT (var_decl) = constant_p;
+ TREE_THIS_VOLATILE (var_decl) = TREE_SIDE_EFFECTS (var_decl)
+ = TYPE_VOLATILE (type);
+
+ /* Ada doesn't feature Fortran-like COMMON variables so we shouldn't
+ try to fiddle with DECL_COMMON. However, on platforms that don't
+ support global BSS sections, uninitialized global variables would
+ go in DATA instead, thus increasing the size of the executable. */
+ if (!flag_no_common
+ && TREE_CODE (var_decl) == VAR_DECL
+ && TREE_PUBLIC (var_decl)
+ && !have_global_bss_p ())
+ DECL_COMMON (var_decl) = 1;
+
+ /* At the global binding level, we need to allocate static storage for the
+ variable if it isn't external. Otherwise, we allocate automatic storage
+ unless requested not to. */
+ TREE_STATIC (var_decl)
+ = !extern_flag && (static_flag || global_bindings_p ());
+
+ /* For an external constant whose initializer is not absolute, do not emit
+ debug info. In DWARF this would mean a global relocation in a read-only
+ section which runs afoul of the PE-COFF run-time relocation mechanism. */
+ if (extern_flag
+ && constant_p
+ && var_init
+ && initializer_constant_valid_p (var_init, TREE_TYPE (var_init))
+ != null_pointer_node)
+ DECL_IGNORED_P (var_decl) = 1;
+
+ if (TREE_SIDE_EFFECTS (var_decl))
+ TREE_ADDRESSABLE (var_decl) = 1;
+
+ /* ??? Some attributes cannot be applied to CONST_DECLs. */
+ if (TREE_CODE (var_decl) == VAR_DECL)
+ process_attributes (&var_decl, &attr_list, true, gnat_node);
+
+ /* Add this decl to the current binding level. */
+ gnat_pushdecl (var_decl, gnat_node);
+
+ if (TREE_CODE (var_decl) == VAR_DECL)
+ {
+ if (asm_name)
+ SET_DECL_ASSEMBLER_NAME (var_decl, asm_name);
+
+ if (global_bindings_p ())
+ rest_of_decl_compilation (var_decl, true, 0);
+ }
+
+ return var_decl;
+}
+
+/* Return true if TYPE, an aggregate type, contains (or is) an array. */
+
+static bool
+aggregate_type_contains_array_p (tree type)
+{
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ tree field;
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
+ if (AGGREGATE_TYPE_P (TREE_TYPE (field))
+ && aggregate_type_contains_array_p (TREE_TYPE (field)))
+ return true;
+ return false;
+ }
+
+ case ARRAY_TYPE:
+ return true;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return a FIELD_DECL node. FIELD_NAME is the field's name, FIELD_TYPE is
+ its type and RECORD_TYPE is the type of the enclosing record. If SIZE is
+ nonzero, it is the specified size of the field. If POS is nonzero, it is
+ the bit position. PACKED is 1 if the enclosing record is packed, -1 if it
+ has Component_Alignment of Storage_Unit. If ADDRESSABLE is nonzero, it
+ means we are allowed to take the address of the field; if it is negative,
+ we should not make a bitfield, which is used by make_aligning_type. */
+
+tree
+create_field_decl (tree field_name, tree field_type, tree record_type,
+ tree size, tree pos, int packed, int addressable)
+{
+ tree field_decl = build_decl (input_location,
+ FIELD_DECL, field_name, field_type);
+
+ DECL_CONTEXT (field_decl) = record_type;
+ TREE_READONLY (field_decl) = TYPE_READONLY (field_type);
+
+ /* If FIELD_TYPE is BLKmode, we must ensure this is aligned to at least a
+ byte boundary since GCC cannot handle less-aligned BLKmode bitfields.
+ Likewise for an aggregate without specified position that contains an
+ array, because in this case slices of variable length of this array
+ must be handled by GCC and variable-sized objects need to be aligned
+ to at least a byte boundary. */
+ if (packed && (TYPE_MODE (field_type) == BLKmode
+ || (!pos
+ && AGGREGATE_TYPE_P (field_type)
+ && aggregate_type_contains_array_p (field_type))))
+ DECL_ALIGN (field_decl) = BITS_PER_UNIT;
+
+ /* If a size is specified, use it. Otherwise, if the record type is packed
+ compute a size to use, which may differ from the object's natural size.
+ We always set a size in this case to trigger the checks for bitfield
+ creation below, which is typically required when no position has been
+ specified. */
+ if (size)
+ size = convert (bitsizetype, size);
+ else if (packed == 1)
+ {
+ size = rm_size (field_type);
+ if (TYPE_MODE (field_type) == BLKmode)
+ size = round_up (size, BITS_PER_UNIT);
+ }
+
+ /* If we may, according to ADDRESSABLE, make a bitfield if a size is
+ specified for two reasons: first if the size differs from the natural
+ size. Second, if the alignment is insufficient. There are a number of
+ ways the latter can be true.
+
+ We never make a bitfield if the type of the field has a nonconstant size,
+ because no such entity requiring bitfield operations should reach here.
+
+ We do *preventively* make a bitfield when there might be the need for it
+ but we don't have all the necessary information to decide, as is the case
+ of a field with no specified position in a packed record.
+
+ We also don't look at STRICT_ALIGNMENT here, and rely on later processing
+ in layout_decl or finish_record_type to clear the bit_field indication if
+ it is in fact not needed. */
+ if (addressable >= 0
+ && size
+ && TREE_CODE (size) == INTEGER_CST
+ && TREE_CODE (TYPE_SIZE (field_type)) == INTEGER_CST
+ && (!tree_int_cst_equal (size, TYPE_SIZE (field_type))
+ || (pos && !value_factor_p (pos, TYPE_ALIGN (field_type)))
+ || packed
+ || (TYPE_ALIGN (record_type) != 0
+ && TYPE_ALIGN (record_type) < TYPE_ALIGN (field_type))))
+ {
+ DECL_BIT_FIELD (field_decl) = 1;
+ DECL_SIZE (field_decl) = size;
+ if (!packed && !pos)
+ {
+ if (TYPE_ALIGN (record_type) != 0
+ && TYPE_ALIGN (record_type) < TYPE_ALIGN (field_type))
+ DECL_ALIGN (field_decl) = TYPE_ALIGN (record_type);
+ else
+ DECL_ALIGN (field_decl) = TYPE_ALIGN (field_type);
+ }
+ }
+
+ DECL_PACKED (field_decl) = pos ? DECL_BIT_FIELD (field_decl) : packed;
+
+ /* Bump the alignment if need be, either for bitfield/packing purposes or
+ to satisfy the type requirements if no such consideration applies. When
+ we get the alignment from the type, indicate if this is from an explicit
+ user request, which prevents stor-layout from lowering it later on. */
+ {
+ unsigned int bit_align
+ = (DECL_BIT_FIELD (field_decl) ? 1
+ : packed && TYPE_MODE (field_type) != BLKmode ? BITS_PER_UNIT : 0);
+
+ if (bit_align > DECL_ALIGN (field_decl))
+ DECL_ALIGN (field_decl) = bit_align;
+ else if (!bit_align && TYPE_ALIGN (field_type) > DECL_ALIGN (field_decl))
+ {
+ DECL_ALIGN (field_decl) = TYPE_ALIGN (field_type);
+ DECL_USER_ALIGN (field_decl) = TYPE_USER_ALIGN (field_type);
+ }
+ }
+
+ if (pos)
+ {
+ /* We need to pass in the alignment the DECL is known to have.
+ This is the lowest-order bit set in POS, but no more than
+ the alignment of the record, if one is specified. Note
+ that an alignment of 0 is taken as infinite. */
+ unsigned int known_align;
+
+ if (tree_fits_uhwi_p (pos))
+ known_align = tree_to_uhwi (pos) & - tree_to_uhwi (pos);
+ else
+ known_align = BITS_PER_UNIT;
+
+ if (TYPE_ALIGN (record_type)
+ && (known_align == 0 || known_align > TYPE_ALIGN (record_type)))
+ known_align = TYPE_ALIGN (record_type);
+
+ layout_decl (field_decl, known_align);
+ SET_DECL_OFFSET_ALIGN (field_decl,
+ tree_fits_uhwi_p (pos) ? BIGGEST_ALIGNMENT
+ : BITS_PER_UNIT);
+ pos_from_bit (&DECL_FIELD_OFFSET (field_decl),
+ &DECL_FIELD_BIT_OFFSET (field_decl),
+ DECL_OFFSET_ALIGN (field_decl), pos);
+ }
+
+ /* In addition to what our caller says, claim the field is addressable if we
+ know that its type is not suitable.
+
+ The field may also be "technically" nonaddressable, meaning that even if
+ we attempt to take the field's address we will actually get the address
+ of a copy. This is the case for true bitfields, but the DECL_BIT_FIELD
+ value we have at this point is not accurate enough, so we don't account
+ for this here and let finish_record_type decide. */
+ if (!addressable && !type_for_nonaliased_component_p (field_type))
+ addressable = 1;
+
+ DECL_NONADDRESSABLE_P (field_decl) = !addressable;
+
+ return field_decl;
+}
+
+/* Return a PARM_DECL node. PARAM_NAME is the name of the parameter and
+ PARAM_TYPE is its type. READONLY is true if the parameter is readonly
+ (either an In parameter or an address of a pass-by-ref parameter). */
+
+tree
+create_param_decl (tree param_name, tree param_type, bool readonly)
+{
+ tree param_decl = build_decl (input_location,
+ PARM_DECL, param_name, param_type);
+
+ /* Honor TARGET_PROMOTE_PROTOTYPES like the C compiler, as not doing so
+ can lead to various ABI violations. */
+ if (targetm.calls.promote_prototypes (NULL_TREE)
+ && INTEGRAL_TYPE_P (param_type)
+ && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node))
+ {
+ /* We have to be careful about biased types here. Make a subtype
+ of integer_type_node with the proper biasing. */
+ if (TREE_CODE (param_type) == INTEGER_TYPE
+ && TYPE_BIASED_REPRESENTATION_P (param_type))
+ {
+ tree subtype
+ = make_unsigned_type (TYPE_PRECISION (integer_type_node));
+ TREE_TYPE (subtype) = integer_type_node;
+ TYPE_BIASED_REPRESENTATION_P (subtype) = 1;
+ SET_TYPE_RM_MIN_VALUE (subtype, TYPE_MIN_VALUE (param_type));
+ SET_TYPE_RM_MAX_VALUE (subtype, TYPE_MAX_VALUE (param_type));
+ param_type = subtype;
+ }
+ else
+ param_type = integer_type_node;
+ }
+
+ DECL_ARG_TYPE (param_decl) = param_type;
+ TREE_READONLY (param_decl) = readonly;
+ return param_decl;
+}
+
+/* Process the attributes in ATTR_LIST for NODE, which is either a DECL or
+ a TYPE. If IN_PLACE is true, the tree pointed to by NODE should not be
+ changed. GNAT_NODE is used for the position of error messages. */
+
+void
+process_attributes (tree *node, struct attrib **attr_list, bool in_place,
+ Node_Id gnat_node)
+{
+ struct attrib *attr;
+
+ for (attr = *attr_list; attr; attr = attr->next)
+ switch (attr->type)
+ {
+ case ATTR_MACHINE_ATTRIBUTE:
+ Sloc_to_locus (Sloc (gnat_node), &input_location);
+ decl_attributes (node, tree_cons (attr->name, attr->args, NULL_TREE),
+ in_place ? ATTR_FLAG_TYPE_IN_PLACE : 0);
+ break;
+
+ case ATTR_LINK_ALIAS:
+ if (!DECL_EXTERNAL (*node))
+ {
+ TREE_STATIC (*node) = 1;
+ assemble_alias (*node, attr->name);
+ }
+ break;
+
+ case ATTR_WEAK_EXTERNAL:
+ if (SUPPORTS_WEAK)
+ declare_weak (*node);
+ else
+ post_error ("?weak declarations not supported on this target",
+ attr->error_point);
+ break;
+
+ case ATTR_LINK_SECTION:
+ if (targetm_common.have_named_sections)
+ {
+ DECL_SECTION_NAME (*node)
+ = build_string (IDENTIFIER_LENGTH (attr->name),
+ IDENTIFIER_POINTER (attr->name));
+ DECL_COMMON (*node) = 0;
+ }
+ else
+ post_error ("?section attributes are not supported for this target",
+ attr->error_point);
+ break;
+
+ case ATTR_LINK_CONSTRUCTOR:
+ DECL_STATIC_CONSTRUCTOR (*node) = 1;
+ TREE_USED (*node) = 1;
+ break;
+
+ case ATTR_LINK_DESTRUCTOR:
+ DECL_STATIC_DESTRUCTOR (*node) = 1;
+ TREE_USED (*node) = 1;
+ break;
+
+ case ATTR_THREAD_LOCAL_STORAGE:
+ DECL_TLS_MODEL (*node) = decl_default_tls_model (*node);
+ DECL_COMMON (*node) = 0;
+ break;
+ }
+
+ *attr_list = NULL;
+}
+
+/* Record DECL as a global renaming pointer. */
+
+void
+record_global_renaming_pointer (tree decl)
+{
+ gcc_assert (!DECL_LOOP_PARM_P (decl) && DECL_RENAMED_OBJECT (decl));
+ vec_safe_push (global_renaming_pointers, decl);
+}
+
+/* Invalidate the global renaming pointers. */
+
+void
+invalidate_global_renaming_pointers (void)
+{
+ unsigned int i;
+ tree iter;
+
+ if (global_renaming_pointers == NULL)
+ return;
+
+ FOR_EACH_VEC_ELT (*global_renaming_pointers, i, iter)
+ SET_DECL_RENAMED_OBJECT (iter, NULL_TREE);
+
+ vec_free (global_renaming_pointers);
+}
+
+/* Return true if VALUE is a known to be a multiple of FACTOR, which must be
+ a power of 2. */
+
+bool
+value_factor_p (tree value, HOST_WIDE_INT factor)
+{
+ if (tree_fits_uhwi_p (value))
+ return tree_to_uhwi (value) % factor == 0;
+
+ if (TREE_CODE (value) == MULT_EXPR)
+ return (value_factor_p (TREE_OPERAND (value, 0), factor)
+ || value_factor_p (TREE_OPERAND (value, 1), factor));
+
+ return false;
+}
+
+/* Return VALUE scaled by the biggest power-of-2 factor of EXPR. */
+
+static unsigned int
+scale_by_factor_of (tree expr, unsigned int value)
+{
+ expr = remove_conversions (expr, true);
+
+ /* An expression which is a bitwise AND with a mask has a power-of-2 factor
+ corresponding to the number of trailing zeros of the mask. */
+ if (TREE_CODE (expr) == BIT_AND_EXPR
+ && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST)
+ {
+ unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (TREE_OPERAND (expr, 1));
+ unsigned int i = 0;
+
+ while ((mask & 1) == 0 && i < HOST_BITS_PER_WIDE_INT)
+ {
+ mask >>= 1;
+ value *= 2;
+ i++;
+ }
+ }
+
+ return value;
+}
+
+/* Given two consecutive field decls PREV_FIELD and CURR_FIELD, return true
+ unless we can prove these 2 fields are laid out in such a way that no gap
+ exist between the end of PREV_FIELD and the beginning of CURR_FIELD. OFFSET
+ is the distance in bits between the end of PREV_FIELD and the starting
+ position of CURR_FIELD. It is ignored if null. */
+
+static bool
+potential_alignment_gap (tree prev_field, tree curr_field, tree offset)
+{
+ /* If this is the first field of the record, there cannot be any gap */
+ if (!prev_field)
+ return false;
+
+ /* If the previous field is a union type, then return false: The only
+ time when such a field is not the last field of the record is when
+ there are other components at fixed positions after it (meaning there
+ was a rep clause for every field), in which case we don't want the
+ alignment constraint to override them. */
+ if (TREE_CODE (TREE_TYPE (prev_field)) == QUAL_UNION_TYPE)
+ return false;
+
+ /* If the distance between the end of prev_field and the beginning of
+ curr_field is constant, then there is a gap if the value of this
+ constant is not null. */
+ if (offset && tree_fits_uhwi_p (offset))
+ return !integer_zerop (offset);
+
+ /* If the size and position of the previous field are constant,
+ then check the sum of this size and position. There will be a gap
+ iff it is not multiple of the current field alignment. */
+ if (tree_fits_uhwi_p (DECL_SIZE (prev_field))
+ && tree_fits_uhwi_p (bit_position (prev_field)))
+ return ((tree_to_uhwi (bit_position (prev_field))
+ + tree_to_uhwi (DECL_SIZE (prev_field)))
+ % DECL_ALIGN (curr_field) != 0);
+
+ /* If both the position and size of the previous field are multiples
+ of the current field alignment, there cannot be any gap. */
+ if (value_factor_p (bit_position (prev_field), DECL_ALIGN (curr_field))
+ && value_factor_p (DECL_SIZE (prev_field), DECL_ALIGN (curr_field)))
+ return false;
+
+ /* Fallback, return that there may be a potential gap */
+ return true;
+}
+
+/* Return a LABEL_DECL with LABEL_NAME. GNAT_NODE is used for the position
+ of the decl. */
+
+tree
+create_label_decl (tree label_name, Node_Id gnat_node)
+{
+ tree label_decl
+ = build_decl (input_location, LABEL_DECL, label_name, void_type_node);
+
+ DECL_MODE (label_decl) = VOIDmode;
+
+ /* Add this decl to the current binding level. */
+ gnat_pushdecl (label_decl, gnat_node);
+
+ return label_decl;
+}
+
+/* Return a FUNCTION_DECL node. SUBPROG_NAME is the name of the subprogram,
+ ASM_NAME is its assembler name, SUBPROG_TYPE is its type (a FUNCTION_TYPE
+ node), PARAM_DECL_LIST is the list of the subprogram arguments (a list of
+ PARM_DECL nodes chained through the DECL_CHAIN field).
+
+ INLINE_STATUS, PUBLIC_FLAG, EXTERN_FLAG, ARTIFICIAL_FLAG and ATTR_LIST are
+ used to set the appropriate fields in the FUNCTION_DECL. GNAT_NODE is
+ used for the position of the decl. */
+
+tree
+create_subprog_decl (tree subprog_name, tree asm_name, tree subprog_type,
+ tree param_decl_list, enum inline_status_t inline_status,
+ bool public_flag, bool extern_flag, bool artificial_flag,
+ struct attrib *attr_list, Node_Id gnat_node)
+{
+ tree subprog_decl = build_decl (input_location, FUNCTION_DECL, subprog_name,
+ subprog_type);
+ tree result_decl = build_decl (input_location, RESULT_DECL, NULL_TREE,
+ TREE_TYPE (subprog_type));
+ DECL_ARGUMENTS (subprog_decl) = param_decl_list;
+
+ /* If this is a non-inline function nested inside an inlined external
+ function, we cannot honor both requests without cloning the nested
+ function in the current unit since it is private to the other unit.
+ We could inline the nested function as well but it's probably better
+ to err on the side of too little inlining. */
+ if (inline_status != is_enabled
+ && !public_flag
+ && current_function_decl
+ && DECL_DECLARED_INLINE_P (current_function_decl)
+ && DECL_EXTERNAL (current_function_decl))
+ DECL_DECLARED_INLINE_P (current_function_decl) = 0;
+
+ DECL_ARTIFICIAL (subprog_decl) = artificial_flag;
+ DECL_EXTERNAL (subprog_decl) = extern_flag;
+
+ switch (inline_status)
+ {
+ case is_suppressed:
+ DECL_UNINLINABLE (subprog_decl) = 1;
+ break;
+
+ case is_disabled:
+ break;
+
+ case is_enabled:
+ DECL_DECLARED_INLINE_P (subprog_decl) = 1;
+ DECL_NO_INLINE_WARNING_P (subprog_decl) = artificial_flag;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ TREE_PUBLIC (subprog_decl) = public_flag;
+ TREE_READONLY (subprog_decl) = TYPE_READONLY (subprog_type);
+ TREE_THIS_VOLATILE (subprog_decl) = TYPE_VOLATILE (subprog_type);
+ TREE_SIDE_EFFECTS (subprog_decl) = TYPE_VOLATILE (subprog_type);
+
+ DECL_ARTIFICIAL (result_decl) = 1;
+ DECL_IGNORED_P (result_decl) = 1;
+ DECL_BY_REFERENCE (result_decl) = TREE_ADDRESSABLE (subprog_type);
+ DECL_RESULT (subprog_decl) = result_decl;
+
+ if (asm_name)
+ {
+ SET_DECL_ASSEMBLER_NAME (subprog_decl, asm_name);
+
+ /* The expand_main_function circuitry expects "main_identifier_node" to
+ designate the DECL_NAME of the 'main' entry point, in turn expected
+ to be declared as the "main" function literally by default. Ada
+ program entry points are typically declared with a different name
+ within the binder generated file, exported as 'main' to satisfy the
+ system expectations. Force main_identifier_node in this case. */
+ if (asm_name == main_identifier_node)
+ DECL_NAME (subprog_decl) = main_identifier_node;
+ }
+
+ process_attributes (&subprog_decl, &attr_list, true, gnat_node);
+
+ /* Add this decl to the current binding level. */
+ gnat_pushdecl (subprog_decl, gnat_node);
+
+ /* Output the assembler code and/or RTL for the declaration. */
+ rest_of_decl_compilation (subprog_decl, global_bindings_p (), 0);
+
+ return subprog_decl;
+}
+
+/* Set up the framework for generating code for SUBPROG_DECL, a subprogram
+ body. This routine needs to be invoked before processing the declarations
+ appearing in the subprogram. */
+
+void
+begin_subprog_body (tree subprog_decl)
+{
+ tree param_decl;
+
+ announce_function (subprog_decl);
+
+ /* This function is being defined. */
+ TREE_STATIC (subprog_decl) = 1;
+
+ current_function_decl = subprog_decl;
+
+ /* Enter a new binding level and show that all the parameters belong to
+ this function. */
+ gnat_pushlevel ();
+
+ for (param_decl = DECL_ARGUMENTS (subprog_decl); param_decl;
+ param_decl = DECL_CHAIN (param_decl))
+ DECL_CONTEXT (param_decl) = subprog_decl;
+
+ make_decl_rtl (subprog_decl);
+}
+
+/* Finish translating the current subprogram and set its BODY. */
+
+void
+end_subprog_body (tree body)
+{
+ tree fndecl = current_function_decl;
+
+ /* Attach the BLOCK for this level to the function and pop the level. */
+ BLOCK_SUPERCONTEXT (current_binding_level->block) = fndecl;
+ DECL_INITIAL (fndecl) = current_binding_level->block;
+ gnat_poplevel ();
+
+ /* Mark the RESULT_DECL as being in this subprogram. */
+ DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
+
+ /* The body should be a BIND_EXPR whose BLOCK is the top-level one. */
+ if (TREE_CODE (body) == BIND_EXPR)
+ {
+ BLOCK_SUPERCONTEXT (BIND_EXPR_BLOCK (body)) = fndecl;
+ DECL_INITIAL (fndecl) = BIND_EXPR_BLOCK (body);
+ }
+
+ DECL_SAVED_TREE (fndecl) = body;
+
+ current_function_decl = decl_function_context (fndecl);
+}
+
+/* Wrap up compilation of SUBPROG_DECL, a subprogram body. */
+
+void
+rest_of_subprog_body_compilation (tree subprog_decl)
+{
+ /* We cannot track the location of errors past this point. */
+ error_gnat_node = Empty;
+
+ /* If we're only annotating types, don't actually compile this function. */
+ if (type_annotate_only)
+ return;
+
+ /* Dump functions before gimplification. */
+ dump_function (TDI_original, subprog_decl);
+
+ if (!decl_function_context (subprog_decl))
+ cgraph_finalize_function (subprog_decl, false);
+ else
+ /* Register this function with cgraph just far enough to get it
+ added to our parent's nested function list. */
+ (void) cgraph_get_create_node (subprog_decl);
+}
+
+tree
+gnat_builtin_function (tree decl)
+{
+ gnat_pushdecl (decl, Empty);
+ return decl;
+}
+
+/* Return an integer type with the number of bits of precision given by
+ PRECISION. UNSIGNEDP is nonzero if the type is unsigned; otherwise
+ it is a signed type. */
+
+tree
+gnat_type_for_size (unsigned precision, int unsignedp)
+{
+ tree t;
+ char type_name[20];
+
+ if (precision <= 2 * MAX_BITS_PER_WORD
+ && signed_and_unsigned_types[precision][unsignedp])
+ return signed_and_unsigned_types[precision][unsignedp];
+
+ if (unsignedp)
+ t = make_unsigned_type (precision);
+ else
+ t = make_signed_type (precision);
+
+ if (precision <= 2 * MAX_BITS_PER_WORD)
+ signed_and_unsigned_types[precision][unsignedp] = t;
+
+ if (!TYPE_NAME (t))
+ {
+ sprintf (type_name, "%sSIGNED_%u", unsignedp ? "UN" : "", precision);
+ TYPE_NAME (t) = get_identifier (type_name);
+ }
+
+ return t;
+}
+
+/* Likewise for floating-point types. */
+
+static tree
+float_type_for_precision (int precision, enum machine_mode mode)
+{
+ tree t;
+ char type_name[20];
+
+ if (float_types[(int) mode])
+ return float_types[(int) mode];
+
+ float_types[(int) mode] = t = make_node (REAL_TYPE);
+ TYPE_PRECISION (t) = precision;
+ layout_type (t);
+
+ gcc_assert (TYPE_MODE (t) == mode);
+ if (!TYPE_NAME (t))
+ {
+ sprintf (type_name, "FLOAT_%d", precision);
+ TYPE_NAME (t) = get_identifier (type_name);
+ }
+
+ return t;
+}
+
+/* Return a data type that has machine mode MODE. UNSIGNEDP selects
+ an unsigned type; otherwise a signed type is returned. */
+
+tree
+gnat_type_for_mode (enum machine_mode mode, int unsignedp)
+{
+ if (mode == BLKmode)
+ return NULL_TREE;
+
+ if (mode == VOIDmode)
+ return void_type_node;
+
+ if (COMPLEX_MODE_P (mode))
+ return NULL_TREE;
+
+ if (SCALAR_FLOAT_MODE_P (mode))
+ return float_type_for_precision (GET_MODE_PRECISION (mode), mode);
+
+ if (SCALAR_INT_MODE_P (mode))
+ return gnat_type_for_size (GET_MODE_BITSIZE (mode), unsignedp);
+
+ if (VECTOR_MODE_P (mode))
+ {
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ tree inner_type = gnat_type_for_mode (inner_mode, unsignedp);
+ if (inner_type)
+ return build_vector_type_for_mode (inner_type, mode);
+ }
+
+ return NULL_TREE;
+}
+
+/* Return the unsigned version of a TYPE_NODE, a scalar type. */
+
+tree
+gnat_unsigned_type (tree type_node)
+{
+ tree type = gnat_type_for_size (TYPE_PRECISION (type_node), 1);
+
+ if (TREE_CODE (type_node) == INTEGER_TYPE && TYPE_MODULAR_P (type_node))
+ {
+ type = copy_node (type);
+ TREE_TYPE (type) = type_node;
+ }
+ else if (TREE_TYPE (type_node)
+ && TREE_CODE (TREE_TYPE (type_node)) == INTEGER_TYPE
+ && TYPE_MODULAR_P (TREE_TYPE (type_node)))
+ {
+ type = copy_node (type);
+ TREE_TYPE (type) = TREE_TYPE (type_node);
+ }
+
+ return type;
+}
+
+/* Return the signed version of a TYPE_NODE, a scalar type. */
+
+tree
+gnat_signed_type (tree type_node)
+{
+ tree type = gnat_type_for_size (TYPE_PRECISION (type_node), 0);
+
+ if (TREE_CODE (type_node) == INTEGER_TYPE && TYPE_MODULAR_P (type_node))
+ {
+ type = copy_node (type);
+ TREE_TYPE (type) = type_node;
+ }
+ else if (TREE_TYPE (type_node)
+ && TREE_CODE (TREE_TYPE (type_node)) == INTEGER_TYPE
+ && TYPE_MODULAR_P (TREE_TYPE (type_node)))
+ {
+ type = copy_node (type);
+ TREE_TYPE (type) = TREE_TYPE (type_node);
+ }
+
+ return type;
+}
+
+/* Return 1 if the types T1 and T2 are compatible, i.e. if they can be
+ transparently converted to each other. */
+
+int
+gnat_types_compatible_p (tree t1, tree t2)
+{
+ enum tree_code code;
+
+ /* This is the default criterion. */
+ if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
+ return 1;
+
+ /* We only check structural equivalence here. */
+ if ((code = TREE_CODE (t1)) != TREE_CODE (t2))
+ return 0;
+
+ /* Vector types are also compatible if they have the same number of subparts
+ and the same form of (scalar) element type. */
+ if (code == VECTOR_TYPE
+ && TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2)
+ && TREE_CODE (TREE_TYPE (t1)) == TREE_CODE (TREE_TYPE (t2))
+ && TYPE_PRECISION (TREE_TYPE (t1)) == TYPE_PRECISION (TREE_TYPE (t2)))
+ return 1;
+
+ /* Array types are also compatible if they are constrained and have the same
+ domain(s) and the same component type. */
+ if (code == ARRAY_TYPE
+ && (TYPE_DOMAIN (t1) == TYPE_DOMAIN (t2)
+ || (TYPE_DOMAIN (t1)
+ && TYPE_DOMAIN (t2)
+ && tree_int_cst_equal (TYPE_MIN_VALUE (TYPE_DOMAIN (t1)),
+ TYPE_MIN_VALUE (TYPE_DOMAIN (t2)))
+ && tree_int_cst_equal (TYPE_MAX_VALUE (TYPE_DOMAIN (t1)),
+ TYPE_MAX_VALUE (TYPE_DOMAIN (t2)))))
+ && (TREE_TYPE (t1) == TREE_TYPE (t2)
+ || (TREE_CODE (TREE_TYPE (t1)) == ARRAY_TYPE
+ && gnat_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)))))
+ return 1;
+
+ return 0;
+}
+
+/* Return true if EXPR is a useless type conversion. */
+
+bool
+gnat_useless_type_conversion (tree expr)
+{
+ if (CONVERT_EXPR_P (expr)
+ || TREE_CODE (expr) == VIEW_CONVERT_EXPR
+ || TREE_CODE (expr) == NON_LVALUE_EXPR)
+ return gnat_types_compatible_p (TREE_TYPE (expr),
+ TREE_TYPE (TREE_OPERAND (expr, 0)));
+
+ return false;
+}
+
+/* Return true if T, a FUNCTION_TYPE, has the specified list of flags. */
+
+bool
+fntype_same_flags_p (const_tree t, tree cico_list, bool return_unconstrained_p,
+ bool return_by_direct_ref_p, bool return_by_invisi_ref_p)
+{
+ return TYPE_CI_CO_LIST (t) == cico_list
+ && TYPE_RETURN_UNCONSTRAINED_P (t) == return_unconstrained_p
+ && TYPE_RETURN_BY_DIRECT_REF_P (t) == return_by_direct_ref_p
+ && TREE_ADDRESSABLE (t) == return_by_invisi_ref_p;
+}
+
+/* EXP is an expression for the size of an object. If this size contains
+ discriminant references, replace them with the maximum (if MAX_P) or
+ minimum (if !MAX_P) possible value of the discriminant. */
+
+tree
+max_size (tree exp, bool max_p)
+{
+ enum tree_code code = TREE_CODE (exp);
+ tree type = TREE_TYPE (exp);
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case tcc_declaration:
+ case tcc_constant:
+ return exp;
+
+ case tcc_vl_exp:
+ if (code == CALL_EXPR)
+ {
+ tree t, *argarray;
+ int n, i;
+
+ t = maybe_inline_call_in_expr (exp);
+ if (t)
+ return max_size (t, max_p);
+
+ n = call_expr_nargs (exp);
+ gcc_assert (n > 0);
+ argarray = XALLOCAVEC (tree, n);
+ for (i = 0; i < n; i++)
+ argarray[i] = max_size (CALL_EXPR_ARG (exp, i), max_p);
+ return build_call_array (type, CALL_EXPR_FN (exp), n, argarray);
+ }
+ break;
+
+ case tcc_reference:
+ /* If this contains a PLACEHOLDER_EXPR, it is the thing we want to
+ modify. Otherwise, we treat it like a variable. */
+ if (!CONTAINS_PLACEHOLDER_P (exp))
+ return exp;
+
+ type = TREE_TYPE (TREE_OPERAND (exp, 1));
+ return
+ max_size (max_p ? TYPE_MAX_VALUE (type) : TYPE_MIN_VALUE (type), true);
+
+ case tcc_comparison:
+ return max_p ? size_one_node : size_zero_node;
+
+ case tcc_unary:
+ if (code == NON_LVALUE_EXPR)
+ return max_size (TREE_OPERAND (exp, 0), max_p);
+
+ return fold_build1 (code, type,
+ max_size (TREE_OPERAND (exp, 0),
+ code == NEGATE_EXPR ? !max_p : max_p));
+
+ case tcc_binary:
+ {
+ tree lhs = max_size (TREE_OPERAND (exp, 0), max_p);
+ tree rhs = max_size (TREE_OPERAND (exp, 1),
+ code == MINUS_EXPR ? !max_p : max_p);
+
+ /* Special-case wanting the maximum value of a MIN_EXPR.
+ In that case, if one side overflows, return the other. */
+ if (max_p && code == MIN_EXPR)
+ {
+ if (TREE_CODE (rhs) == INTEGER_CST && TREE_OVERFLOW (rhs))
+ return lhs;
+
+ if (TREE_CODE (lhs) == INTEGER_CST && TREE_OVERFLOW (lhs))
+ return rhs;
+ }
+
+ /* Likewise, handle a MINUS_EXPR or PLUS_EXPR with the LHS
+ overflowing and the RHS a variable. */
+ if ((code == MINUS_EXPR || code == PLUS_EXPR)
+ && TREE_CODE (lhs) == INTEGER_CST
+ && TREE_OVERFLOW (lhs)
+ && !TREE_CONSTANT (rhs))
+ return lhs;
+
+ return size_binop (code, lhs, rhs);
+ }
+
+ case tcc_expression:
+ switch (TREE_CODE_LENGTH (code))
+ {
+ case 1:
+ if (code == SAVE_EXPR)
+ return exp;
+
+ return fold_build1 (code, type,
+ max_size (TREE_OPERAND (exp, 0), max_p));
+
+ case 2:
+ if (code == COMPOUND_EXPR)
+ return max_size (TREE_OPERAND (exp, 1), max_p);
+
+ return fold_build2 (code, type,
+ max_size (TREE_OPERAND (exp, 0), max_p),
+ max_size (TREE_OPERAND (exp, 1), max_p));
+
+ case 3:
+ if (code == COND_EXPR)
+ return fold_build2 (max_p ? MAX_EXPR : MIN_EXPR, type,
+ max_size (TREE_OPERAND (exp, 1), max_p),
+ max_size (TREE_OPERAND (exp, 2), max_p));
+
+ default:
+ break;
+ }
+
+ /* Other tree classes cannot happen. */
+ default:
+ break;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Build a template of type TEMPLATE_TYPE from the array bounds of ARRAY_TYPE.
+ EXPR is an expression that we can use to locate any PLACEHOLDER_EXPRs.
+ Return a constructor for the template. */
+
+tree
+build_template (tree template_type, tree array_type, tree expr)
+{
+ vec<constructor_elt, va_gc> *template_elts = NULL;
+ tree bound_list = NULL_TREE;
+ tree field;
+
+ while (TREE_CODE (array_type) == RECORD_TYPE
+ && (TYPE_PADDING_P (array_type)
+ || TYPE_JUSTIFIED_MODULAR_P (array_type)))
+ array_type = TREE_TYPE (TYPE_FIELDS (array_type));
+
+ if (TREE_CODE (array_type) == ARRAY_TYPE
+ || (TREE_CODE (array_type) == INTEGER_TYPE
+ && TYPE_HAS_ACTUAL_BOUNDS_P (array_type)))
+ bound_list = TYPE_ACTUAL_BOUNDS (array_type);
+
+ /* First make the list for a CONSTRUCTOR for the template. Go down the
+ field list of the template instead of the type chain because this
+ array might be an Ada array of arrays and we can't tell where the
+ nested arrays stop being the underlying object. */
+
+ for (field = TYPE_FIELDS (template_type); field;
+ (bound_list
+ ? (bound_list = TREE_CHAIN (bound_list))
+ : (array_type = TREE_TYPE (array_type))),
+ field = DECL_CHAIN (DECL_CHAIN (field)))
+ {
+ tree bounds, min, max;
+
+ /* If we have a bound list, get the bounds from there. Likewise
+ for an ARRAY_TYPE. Otherwise, if expr is a PARM_DECL with
+ DECL_BY_COMPONENT_PTR_P, use the bounds of the field in the template.
+ This will give us a maximum range. */
+ if (bound_list)
+ bounds = TREE_VALUE (bound_list);
+ else if (TREE_CODE (array_type) == ARRAY_TYPE)
+ bounds = TYPE_INDEX_TYPE (TYPE_DOMAIN (array_type));
+ else if (expr && TREE_CODE (expr) == PARM_DECL
+ && DECL_BY_COMPONENT_PTR_P (expr))
+ bounds = TREE_TYPE (field);
+ else
+ gcc_unreachable ();
+
+ min = convert (TREE_TYPE (field), TYPE_MIN_VALUE (bounds));
+ max = convert (TREE_TYPE (DECL_CHAIN (field)), TYPE_MAX_VALUE (bounds));
+
+ /* If either MIN or MAX involve a PLACEHOLDER_EXPR, we must
+ substitute it from OBJECT. */
+ min = SUBSTITUTE_PLACEHOLDER_IN_EXPR (min, expr);
+ max = SUBSTITUTE_PLACEHOLDER_IN_EXPR (max, expr);
+
+ CONSTRUCTOR_APPEND_ELT (template_elts, field, min);
+ CONSTRUCTOR_APPEND_ELT (template_elts, DECL_CHAIN (field), max);
+ }
+
+ return gnat_build_constructor (template_type, template_elts);
+}
+
+/* Helper routine to make a descriptor field. FIELD_LIST is the list of decls
+ being built; the new decl is chained on to the front of the list. */
+
+static tree
+make_descriptor_field (const char *name, tree type, tree rec_type,
+ tree initial, tree field_list)
+{
+ tree field
+ = create_field_decl (get_identifier (name), type, rec_type, NULL_TREE,
+ NULL_TREE, 0, 0);
+
+ DECL_INITIAL (field) = initial;
+ DECL_CHAIN (field) = field_list;
+ return field;
+}
+
+/* Build a 32-bit VMS descriptor from a Mechanism_Type, which must specify a
+ descriptor type, and the GCC type of an object. Each FIELD_DECL in the
+ type contains in its DECL_INITIAL the expression to use when a constructor
+ is made for the type. GNAT_ENTITY is an entity used to print out an error
+ message if the mechanism cannot be applied to an object of that type and
+ also for the name. */
+
+tree
+build_vms_descriptor32 (tree type, Mechanism_Type mech, Entity_Id gnat_entity)
+{
+ tree record_type = make_node (RECORD_TYPE);
+ tree pointer32_type, pointer64_type;
+ tree field_list = NULL_TREE;
+ int klass, ndim, i, dtype = 0;
+ tree inner_type, tem;
+ tree *idx_arr;
+
+ /* If TYPE is an unconstrained array, use the underlying array type. */
+ if (TREE_CODE (type) == UNCONSTRAINED_ARRAY_TYPE)
+ type = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (type))));
+
+ /* If this is an array, compute the number of dimensions in the array,
+ get the index types, and point to the inner type. */
+ if (TREE_CODE (type) != ARRAY_TYPE)
+ ndim = 0;
+ else
+ for (ndim = 1, inner_type = type;
+ TREE_CODE (TREE_TYPE (inner_type)) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (TREE_TYPE (inner_type));
+ ndim++, inner_type = TREE_TYPE (inner_type))
+ ;
+
+ idx_arr = XALLOCAVEC (tree, ndim);
+
+ if (mech != By_Descriptor_NCA && mech != By_Short_Descriptor_NCA
+ && TREE_CODE (type) == ARRAY_TYPE && TYPE_CONVENTION_FORTRAN_P (type))
+ for (i = ndim - 1, inner_type = type;
+ i >= 0;
+ i--, inner_type = TREE_TYPE (inner_type))
+ idx_arr[i] = TYPE_DOMAIN (inner_type);
+ else
+ for (i = 0, inner_type = type;
+ i < ndim;
+ i++, inner_type = TREE_TYPE (inner_type))
+ idx_arr[i] = TYPE_DOMAIN (inner_type);
+
+ /* Now get the DTYPE value. */
+ switch (TREE_CODE (type))
+ {
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ if (TYPE_VAX_FLOATING_POINT_P (type))
+ switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type)))
+ {
+ case 6:
+ dtype = 10;
+ break;
+ case 9:
+ dtype = 11;
+ break;
+ case 15:
+ dtype = 27;
+ break;
+ }
+ else
+ switch (GET_MODE_BITSIZE (TYPE_MODE (type)))
+ {
+ case 8:
+ dtype = TYPE_UNSIGNED (type) ? 2 : 6;
+ break;
+ case 16:
+ dtype = TYPE_UNSIGNED (type) ? 3 : 7;
+ break;
+ case 32:
+ dtype = TYPE_UNSIGNED (type) ? 4 : 8;
+ break;
+ case 64:
+ dtype = TYPE_UNSIGNED (type) ? 5 : 9;
+ break;
+ case 128:
+ dtype = TYPE_UNSIGNED (type) ? 25 : 26;
+ break;
+ }
+ break;
+
+ case REAL_TYPE:
+ dtype = GET_MODE_BITSIZE (TYPE_MODE (type)) == 32 ? 52 : 53;
+ break;
+
+ case COMPLEX_TYPE:
+ if (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
+ && TYPE_VAX_FLOATING_POINT_P (type))
+ switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type)))
+ {
+ case 6:
+ dtype = 12;
+ break;
+ case 9:
+ dtype = 13;
+ break;
+ case 15:
+ dtype = 29;
+ }
+ else
+ dtype = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (type))) == 32 ? 54: 55;
+ break;
+
+ case ARRAY_TYPE:
+ dtype = 14;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Get the CLASS value. */
+ switch (mech)
+ {
+ case By_Descriptor_A:
+ case By_Short_Descriptor_A:
+ klass = 4;
+ break;
+ case By_Descriptor_NCA:
+ case By_Short_Descriptor_NCA:
+ klass = 10;
+ break;
+ case By_Descriptor_SB:
+ case By_Short_Descriptor_SB:
+ klass = 15;
+ break;
+ case By_Descriptor:
+ case By_Short_Descriptor:
+ case By_Descriptor_S:
+ case By_Short_Descriptor_S:
+ default:
+ klass = 1;
+ break;
+ }
+
+ /* Make the type for a descriptor for VMS. The first four fields are the
+ same for all types. */
+ field_list
+ = make_descriptor_field ("LENGTH", gnat_type_for_size (16, 1), record_type,
+ size_in_bytes ((mech == By_Descriptor_A
+ || mech == By_Short_Descriptor_A)
+ ? inner_type : type),
+ field_list);
+ field_list
+ = make_descriptor_field ("DTYPE", gnat_type_for_size (8, 1), record_type,
+ size_int (dtype), field_list);
+ field_list
+ = make_descriptor_field ("CLASS", gnat_type_for_size (8, 1), record_type,
+ size_int (klass), field_list);
+
+ pointer32_type = build_pointer_type_for_mode (type, SImode, false);
+ pointer64_type = build_pointer_type_for_mode (type, DImode, false);
+
+ /* Ensure that only 32-bit pointers are passed in 32-bit descriptors. Note
+ that we cannot build a template call to the CE routine as it would get a
+ wrong source location; instead we use a second placeholder for it. */
+ tem = build_unary_op (ADDR_EXPR, pointer64_type,
+ build0 (PLACEHOLDER_EXPR, type));
+ tem = build3 (COND_EXPR, pointer32_type,
+ Pmode != SImode
+ ? build_binary_op (GE_EXPR, boolean_type_node, tem,
+ build_int_cstu (pointer64_type, 0x80000000))
+ : boolean_false_node,
+ build0 (PLACEHOLDER_EXPR, void_type_node),
+ convert (pointer32_type, tem));
+
+ field_list
+ = make_descriptor_field ("POINTER", pointer32_type, record_type, tem,
+ field_list);
+
+ switch (mech)
+ {
+ case By_Descriptor:
+ case By_Short_Descriptor:
+ case By_Descriptor_S:
+ case By_Short_Descriptor_S:
+ break;
+
+ case By_Descriptor_SB:
+ case By_Short_Descriptor_SB:
+ field_list
+ = make_descriptor_field ("SB_L1", gnat_type_for_size (32, 1),
+ record_type,
+ (TREE_CODE (type) == ARRAY_TYPE
+ ? TYPE_MIN_VALUE (TYPE_DOMAIN (type))
+ : size_zero_node),
+ field_list);
+ field_list
+ = make_descriptor_field ("SB_U1", gnat_type_for_size (32, 1),
+ record_type,
+ (TREE_CODE (type) == ARRAY_TYPE
+ ? TYPE_MAX_VALUE (TYPE_DOMAIN (type))
+ : size_zero_node),
+ field_list);
+ break;
+
+ case By_Descriptor_A:
+ case By_Short_Descriptor_A:
+ case By_Descriptor_NCA:
+ case By_Short_Descriptor_NCA:
+ field_list
+ = make_descriptor_field ("SCALE", gnat_type_for_size (8, 1),
+ record_type, size_zero_node, field_list);
+
+ field_list
+ = make_descriptor_field ("DIGITS", gnat_type_for_size (8, 1),
+ record_type, size_zero_node, field_list);
+
+ field_list
+ = make_descriptor_field ("AFLAGS", gnat_type_for_size (8, 1),
+ record_type,
+ size_int ((mech == By_Descriptor_NCA
+ || mech == By_Short_Descriptor_NCA)
+ ? 0
+ /* Set FL_COLUMN, FL_COEFF, and
+ FL_BOUNDS. */
+ : (TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_CONVENTION_FORTRAN_P
+ (type)
+ ? 224 : 192)),
+ field_list);
+
+ field_list
+ = make_descriptor_field ("DIMCT", gnat_type_for_size (8, 1),
+ record_type, size_int (ndim), field_list);
+
+ field_list
+ = make_descriptor_field ("ARSIZE", gnat_type_for_size (32, 1),
+ record_type, size_in_bytes (type),
+ field_list);
+
+ /* Now build a pointer to the 0,0,0... element. */
+ tem = build0 (PLACEHOLDER_EXPR, type);
+ for (i = 0, inner_type = type; i < ndim;
+ i++, inner_type = TREE_TYPE (inner_type))
+ tem = build4 (ARRAY_REF, TREE_TYPE (inner_type), tem,
+ convert (TYPE_DOMAIN (inner_type), size_zero_node),
+ NULL_TREE, NULL_TREE);
+
+ field_list
+ = make_descriptor_field ("A0", pointer32_type, record_type,
+ build1 (ADDR_EXPR, pointer32_type, tem),
+ field_list);
+
+ /* Next come the addressing coefficients. */
+ tem = size_one_node;
+ for (i = 0; i < ndim; i++)
+ {
+ char fname[3];
+ tree idx_length
+ = size_binop (MULT_EXPR, tem,
+ size_binop (PLUS_EXPR,
+ size_binop (MINUS_EXPR,
+ TYPE_MAX_VALUE (idx_arr[i]),
+ TYPE_MIN_VALUE (idx_arr[i])),
+ size_int (1)));
+
+ fname[0] = ((mech == By_Descriptor_NCA ||
+ mech == By_Short_Descriptor_NCA) ? 'S' : 'M');
+ fname[1] = '0' + i, fname[2] = 0;
+ field_list
+ = make_descriptor_field (fname, gnat_type_for_size (32, 1),
+ record_type, idx_length, field_list);
+
+ if (mech == By_Descriptor_NCA || mech == By_Short_Descriptor_NCA)
+ tem = idx_length;
+ }
+
+ /* Finally here are the bounds. */
+ for (i = 0; i < ndim; i++)
+ {
+ char fname[3];
+
+ fname[0] = 'L', fname[1] = '0' + i, fname[2] = 0;
+ field_list
+ = make_descriptor_field (fname, gnat_type_for_size (32, 1),
+ record_type, TYPE_MIN_VALUE (idx_arr[i]),
+ field_list);
+
+ fname[0] = 'U';
+ field_list
+ = make_descriptor_field (fname, gnat_type_for_size (32, 1),
+ record_type, TYPE_MAX_VALUE (idx_arr[i]),
+ field_list);
+ }
+ break;
+
+ default:
+ post_error ("unsupported descriptor type for &", gnat_entity);
+ }
+
+ TYPE_NAME (record_type) = create_concat_name (gnat_entity, "DESC");
+ finish_record_type (record_type, nreverse (field_list), 0, false);
+ return record_type;
+}
+
+/* Build a 64-bit VMS descriptor from a Mechanism_Type, which must specify a
+ descriptor type, and the GCC type of an object. Each FIELD_DECL in the
+ type contains in its DECL_INITIAL the expression to use when a constructor
+ is made for the type. GNAT_ENTITY is an entity used to print out an error
+ message if the mechanism cannot be applied to an object of that type and
+ also for the name. */
+
+tree
+build_vms_descriptor (tree type, Mechanism_Type mech, Entity_Id gnat_entity)
+{
+ tree record_type = make_node (RECORD_TYPE);
+ tree pointer64_type;
+ tree field_list = NULL_TREE;
+ int klass, ndim, i, dtype = 0;
+ tree inner_type, tem;
+ tree *idx_arr;
+
+ /* If TYPE is an unconstrained array, use the underlying array type. */
+ if (TREE_CODE (type) == UNCONSTRAINED_ARRAY_TYPE)
+ type = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (type))));
+
+ /* If this is an array, compute the number of dimensions in the array,
+ get the index types, and point to the inner type. */
+ if (TREE_CODE (type) != ARRAY_TYPE)
+ ndim = 0;
+ else
+ for (ndim = 1, inner_type = type;
+ TREE_CODE (TREE_TYPE (inner_type)) == ARRAY_TYPE
+ && TYPE_MULTI_ARRAY_P (TREE_TYPE (inner_type));
+ ndim++, inner_type = TREE_TYPE (inner_type))
+ ;
+
+ idx_arr = XALLOCAVEC (tree, ndim);
+
+ if (mech != By_Descriptor_NCA
+ && TREE_CODE (type) == ARRAY_TYPE && TYPE_CONVENTION_FORTRAN_P (type))
+ for (i = ndim - 1, inner_type = type;
+ i >= 0;
+ i--, inner_type = TREE_TYPE (inner_type))
+ idx_arr[i] = TYPE_DOMAIN (inner_type);
+ else
+ for (i = 0, inner_type = type;
+ i < ndim;
+ i++, inner_type = TREE_TYPE (inner_type))
+ idx_arr[i] = TYPE_DOMAIN (inner_type);
+
+ /* Now get the DTYPE value. */
+ switch (TREE_CODE (type))
+ {
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ if (TYPE_VAX_FLOATING_POINT_P (type))
+ switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type)))
+ {
+ case 6:
+ dtype = 10;
+ break;
+ case 9:
+ dtype = 11;
+ break;
+ case 15:
+ dtype = 27;
+ break;
+ }
+ else
+ switch (GET_MODE_BITSIZE (TYPE_MODE (type)))
+ {
+ case 8:
+ dtype = TYPE_UNSIGNED (type) ? 2 : 6;
+ break;
+ case 16:
+ dtype = TYPE_UNSIGNED (type) ? 3 : 7;
+ break;
+ case 32:
+ dtype = TYPE_UNSIGNED (type) ? 4 : 8;
+ break;
+ case 64:
+ dtype = TYPE_UNSIGNED (type) ? 5 : 9;
+ break;
+ case 128:
+ dtype = TYPE_UNSIGNED (type) ? 25 : 26;
+ break;
+ }
+ break;
+
+ case REAL_TYPE:
+ dtype = GET_MODE_BITSIZE (TYPE_MODE (type)) == 32 ? 52 : 53;
+ break;
+
+ case COMPLEX_TYPE:
+ if (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
+ && TYPE_VAX_FLOATING_POINT_P (type))
+ switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type)))
+ {
+ case 6:
+ dtype = 12;
+ break;
+ case 9:
+ dtype = 13;
+ break;
+ case 15:
+ dtype = 29;
+ }
+ else
+ dtype = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (type))) == 32 ? 54: 55;
+ break;
+
+ case ARRAY_TYPE:
+ dtype = 14;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Get the CLASS value. */
+ switch (mech)
+ {
+ case By_Descriptor_A:
+ klass = 4;
+ break;
+ case By_Descriptor_NCA:
+ klass = 10;
+ break;
+ case By_Descriptor_SB:
+ klass = 15;
+ break;
+ case By_Descriptor:
+ case By_Descriptor_S:
+ default:
+ klass = 1;
+ break;
+ }
+
+ /* Make the type for a 64-bit descriptor for VMS. The first six fields
+ are the same for all types. */
+ field_list
+ = make_descriptor_field ("MBO", gnat_type_for_size (16, 1),
+ record_type, size_int (1), field_list);
+ field_list
+ = make_descriptor_field ("DTYPE", gnat_type_for_size (8, 1),
+ record_type, size_int (dtype), field_list);
+ field_list
+ = make_descriptor_field ("CLASS", gnat_type_for_size (8, 1),
+ record_type, size_int (klass), field_list);
+ field_list
+ = make_descriptor_field ("MBMO", gnat_type_for_size (32, 1),
+ record_type, size_int (-1), field_list);
+ field_list
+ = make_descriptor_field ("LENGTH", gnat_type_for_size (64, 1),
+ record_type,
+ size_in_bytes (mech == By_Descriptor_A
+ ? inner_type : type),
+ field_list);
+
+ pointer64_type = build_pointer_type_for_mode (type, DImode, false);
+
+ field_list
+ = make_descriptor_field ("POINTER", pointer64_type, record_type,
+ build_unary_op (ADDR_EXPR, pointer64_type,
+ build0 (PLACEHOLDER_EXPR, type)),
+ field_list);
+
+ switch (mech)
+ {
+ case By_Descriptor:
+ case By_Descriptor_S:
+ break;
+
+ case By_Descriptor_SB:
+ field_list
+ = make_descriptor_field ("SB_L1", gnat_type_for_size (64, 1),
+ record_type,
+ (TREE_CODE (type) == ARRAY_TYPE
+ ? TYPE_MIN_VALUE (TYPE_DOMAIN (type))
+ : size_zero_node),
+ field_list);
+ field_list
+ = make_descriptor_field ("SB_U1", gnat_type_for_size (64, 1),
+ record_type,
+ (TREE_CODE (type) == ARRAY_TYPE
+ ? TYPE_MAX_VALUE (TYPE_DOMAIN (type))
+ : size_zero_node),
+ field_list);
+ break;
+
+ case By_Descriptor_A:
+ case By_Descriptor_NCA:
+ field_list
+ = make_descriptor_field ("SCALE", gnat_type_for_size (8, 1),
+ record_type, size_zero_node, field_list);
+
+ field_list
+ = make_descriptor_field ("DIGITS", gnat_type_for_size (8, 1),
+ record_type, size_zero_node, field_list);
+
+ dtype = (mech == By_Descriptor_NCA
+ ? 0
+ /* Set FL_COLUMN, FL_COEFF, and
+ FL_BOUNDS. */
+ : (TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_CONVENTION_FORTRAN_P (type)
+ ? 224 : 192));
+ field_list
+ = make_descriptor_field ("AFLAGS", gnat_type_for_size (8, 1),
+ record_type, size_int (dtype),
+ field_list);
+
+ field_list
+ = make_descriptor_field ("DIMCT", gnat_type_for_size (8, 1),
+ record_type, size_int (ndim), field_list);
+
+ field_list
+ = make_descriptor_field ("MBZ", gnat_type_for_size (32, 1),
+ record_type, size_int (0), field_list);
+ field_list
+ = make_descriptor_field ("ARSIZE", gnat_type_for_size (64, 1),
+ record_type, size_in_bytes (type),
+ field_list);
+
+ /* Now build a pointer to the 0,0,0... element. */
+ tem = build0 (PLACEHOLDER_EXPR, type);
+ for (i = 0, inner_type = type; i < ndim;
+ i++, inner_type = TREE_TYPE (inner_type))
+ tem = build4 (ARRAY_REF, TREE_TYPE (inner_type), tem,
+ convert (TYPE_DOMAIN (inner_type), size_zero_node),
+ NULL_TREE, NULL_TREE);
+
+ field_list
+ = make_descriptor_field ("A0", pointer64_type, record_type,
+ build1 (ADDR_EXPR, pointer64_type, tem),
+ field_list);
+
+ /* Next come the addressing coefficients. */
+ tem = size_one_node;
+ for (i = 0; i < ndim; i++)
+ {
+ char fname[3];
+ tree idx_length
+ = size_binop (MULT_EXPR, tem,
+ size_binop (PLUS_EXPR,
+ size_binop (MINUS_EXPR,
+ TYPE_MAX_VALUE (idx_arr[i]),
+ TYPE_MIN_VALUE (idx_arr[i])),
+ size_int (1)));
+
+ fname[0] = (mech == By_Descriptor_NCA ? 'S' : 'M');
+ fname[1] = '0' + i, fname[2] = 0;
+ field_list
+ = make_descriptor_field (fname, gnat_type_for_size (64, 1),
+ record_type, idx_length, field_list);
+
+ if (mech == By_Descriptor_NCA)
+ tem = idx_length;
+ }
+
+ /* Finally here are the bounds. */
+ for (i = 0; i < ndim; i++)
+ {
+ char fname[3];
+
+ fname[0] = 'L', fname[1] = '0' + i, fname[2] = 0;
+ field_list
+ = make_descriptor_field (fname, gnat_type_for_size (64, 1),
+ record_type,
+ TYPE_MIN_VALUE (idx_arr[i]), field_list);
+
+ fname[0] = 'U';
+ field_list
+ = make_descriptor_field (fname, gnat_type_for_size (64, 1),
+ record_type,
+ TYPE_MAX_VALUE (idx_arr[i]), field_list);
+ }
+ break;
+
+ default:
+ post_error ("unsupported descriptor type for &", gnat_entity);
+ }
+
+ TYPE_NAME (record_type) = create_concat_name (gnat_entity, "DESC64");
+ finish_record_type (record_type, nreverse (field_list), 0, false);
+ return record_type;
+}
+
+/* Fill in a VMS descriptor of GNU_TYPE for GNU_EXPR and return the result.
+ GNAT_ACTUAL is the actual parameter for which the descriptor is built. */
+
+tree
+fill_vms_descriptor (tree gnu_type, tree gnu_expr, Node_Id gnat_actual)
+{
+ vec<constructor_elt, va_gc> *v = NULL;
+ tree field;
+
+ gnu_expr = maybe_unconstrained_array (gnu_expr);
+ gnu_expr = gnat_protect_expr (gnu_expr);
+ gnat_mark_addressable (gnu_expr);
+
+ /* We may need to substitute both GNU_EXPR and a CALL_EXPR to the raise CE
+ routine in case we have a 32-bit descriptor. */
+ gnu_expr = build2 (COMPOUND_EXPR, void_type_node,
+ build_call_raise (CE_Range_Check_Failed, gnat_actual,
+ N_Raise_Constraint_Error),
+ gnu_expr);
+
+ for (field = TYPE_FIELDS (gnu_type); field; field = DECL_CHAIN (field))
+ {
+ tree value
+ = convert (TREE_TYPE (field),
+ SUBSTITUTE_PLACEHOLDER_IN_EXPR (DECL_INITIAL (field),
+ gnu_expr));
+ CONSTRUCTOR_APPEND_ELT (v, field, value);
+ }
+
+ return gnat_build_constructor (gnu_type, v);
+}
+
+/* Convert GNU_EXPR, a pointer to a 64bit VMS descriptor, to GNU_TYPE, a
+ regular pointer or fat pointer type. GNAT_SUBPROG is the subprogram to
+ which the VMS descriptor is passed. */
+
+static tree
+convert_vms_descriptor64 (tree gnu_type, tree gnu_expr, Entity_Id gnat_subprog)
+{
+ tree desc_type = TREE_TYPE (TREE_TYPE (gnu_expr));
+ tree desc = build1 (INDIRECT_REF, desc_type, gnu_expr);
+ /* The CLASS field is the 3rd field in the descriptor. */
+ tree klass = DECL_CHAIN (DECL_CHAIN (TYPE_FIELDS (desc_type)));
+ /* The POINTER field is the 6th field in the descriptor. */
+ tree pointer = DECL_CHAIN (DECL_CHAIN (DECL_CHAIN (klass)));
+
+ /* Retrieve the value of the POINTER field. */
+ tree gnu_expr64
+ = build3 (COMPONENT_REF, TREE_TYPE (pointer), desc, pointer, NULL_TREE);
+
+ if (POINTER_TYPE_P (gnu_type))
+ return convert (gnu_type, gnu_expr64);
+
+ else if (TYPE_IS_FAT_POINTER_P (gnu_type))
+ {
+ tree p_array_type = TREE_TYPE (TYPE_FIELDS (gnu_type));
+ tree p_bounds_type = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_type)));
+ tree template_type = TREE_TYPE (p_bounds_type);
+ tree min_field = TYPE_FIELDS (template_type);
+ tree max_field = DECL_CHAIN (TYPE_FIELDS (template_type));
+ tree template_tree, template_addr, aflags, dimct, t, u;
+ /* See the head comment of build_vms_descriptor. */
+ int iklass = TREE_INT_CST_LOW (DECL_INITIAL (klass));
+ tree lfield, ufield;
+ vec<constructor_elt, va_gc> *v;
+
+ /* Convert POINTER to the pointer-to-array type. */
+ gnu_expr64 = convert (p_array_type, gnu_expr64);
+
+ switch (iklass)
+ {
+ case 1: /* Class S */
+ case 15: /* Class SB */
+ /* Build {1, LENGTH} template; LENGTH64 is the 5th field. */
+ vec_alloc (v, 2);
+ t = DECL_CHAIN (DECL_CHAIN (klass));
+ t = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ CONSTRUCTOR_APPEND_ELT (v, min_field,
+ convert (TREE_TYPE (min_field),
+ integer_one_node));
+ CONSTRUCTOR_APPEND_ELT (v, max_field,
+ convert (TREE_TYPE (max_field), t));
+ template_tree = gnat_build_constructor (template_type, v);
+ template_addr = build_unary_op (ADDR_EXPR, NULL_TREE, template_tree);
+
+ /* For class S, we are done. */
+ if (iklass == 1)
+ break;
+
+ /* Test that we really have a SB descriptor, like DEC Ada. */
+ t = build3 (COMPONENT_REF, TREE_TYPE (klass), desc, klass, NULL);
+ u = convert (TREE_TYPE (klass), DECL_INITIAL (klass));
+ u = build_binary_op (EQ_EXPR, boolean_type_node, t, u);
+ /* If so, there is already a template in the descriptor and
+ it is located right after the POINTER field. The fields are
+ 64bits so they must be repacked. */
+ t = DECL_CHAIN (pointer);
+ lfield = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ lfield = convert (TREE_TYPE (TYPE_FIELDS (template_type)), lfield);
+
+ t = DECL_CHAIN (t);
+ ufield = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ ufield = convert
+ (TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (template_type))), ufield);
+
+ /* Build the template in the form of a constructor. */
+ vec_alloc (v, 2);
+ CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (template_type), lfield);
+ CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (template_type)),
+ ufield);
+ template_tree = gnat_build_constructor (template_type, v);
+
+ /* Otherwise use the {1, LENGTH} template we build above. */
+ template_addr = build3 (COND_EXPR, p_bounds_type, u,
+ build_unary_op (ADDR_EXPR, p_bounds_type,
+ template_tree),
+ template_addr);
+ break;
+
+ case 4: /* Class A */
+ /* The AFLAGS field is the 3rd field after the pointer in the
+ descriptor. */
+ t = DECL_CHAIN (DECL_CHAIN (DECL_CHAIN (pointer)));
+ aflags = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ /* The DIMCT field is the next field in the descriptor after
+ aflags. */
+ t = DECL_CHAIN (t);
+ dimct = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ /* Raise CONSTRAINT_ERROR if either more than 1 dimension
+ or FL_COEFF or FL_BOUNDS not set. */
+ u = build_int_cst (TREE_TYPE (aflags), 192);
+ u = build_binary_op (TRUTH_OR_EXPR, boolean_type_node,
+ build_binary_op (NE_EXPR, boolean_type_node,
+ dimct,
+ convert (TREE_TYPE (dimct),
+ size_one_node)),
+ build_binary_op (NE_EXPR, boolean_type_node,
+ build2 (BIT_AND_EXPR,
+ TREE_TYPE (aflags),
+ aflags, u),
+ u));
+ /* There is already a template in the descriptor and it is located
+ in block 3. The fields are 64bits so they must be repacked. */
+ t = DECL_CHAIN (DECL_CHAIN (DECL_CHAIN (DECL_CHAIN (DECL_CHAIN
+ (t)))));
+ lfield = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ lfield = convert (TREE_TYPE (TYPE_FIELDS (template_type)), lfield);
+
+ t = DECL_CHAIN (t);
+ ufield = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ ufield = convert
+ (TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (template_type))), ufield);
+
+ /* Build the template in the form of a constructor. */
+ vec_alloc (v, 2);
+ CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (template_type), lfield);
+ CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (template_type)),
+ ufield);
+ template_tree = gnat_build_constructor (template_type, v);
+ template_tree = build3 (COND_EXPR, template_type, u,
+ build_call_raise (CE_Length_Check_Failed, Empty,
+ N_Raise_Constraint_Error),
+ template_tree);
+ template_addr
+ = build_unary_op (ADDR_EXPR, p_bounds_type, template_tree);
+ break;
+
+ case 10: /* Class NCA */
+ default:
+ post_error ("unsupported descriptor type for &", gnat_subprog);
+ template_addr = integer_zero_node;
+ break;
+ }
+
+ /* Build the fat pointer in the form of a constructor. */
+ vec_alloc (v, 2);
+ CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (gnu_type), gnu_expr64);
+ CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (gnu_type)),
+ template_addr);
+ return gnat_build_constructor (gnu_type, v);
+ }
+
+ else
+ gcc_unreachable ();
+}
+
+/* Convert GNU_EXPR, a pointer to a 32bit VMS descriptor, to GNU_TYPE, a
+ regular pointer or fat pointer type. GNAT_SUBPROG is the subprogram to
+ which the VMS descriptor is passed. */
+
+static tree
+convert_vms_descriptor32 (tree gnu_type, tree gnu_expr, Entity_Id gnat_subprog)
+{
+ tree desc_type = TREE_TYPE (TREE_TYPE (gnu_expr));
+ tree desc = build1 (INDIRECT_REF, desc_type, gnu_expr);
+ /* The CLASS field is the 3rd field in the descriptor. */
+ tree klass = DECL_CHAIN (DECL_CHAIN (TYPE_FIELDS (desc_type)));
+ /* The POINTER field is the 4th field in the descriptor. */
+ tree pointer = DECL_CHAIN (klass);
+
+ /* Retrieve the value of the POINTER field. */
+ tree gnu_expr32
+ = build3 (COMPONENT_REF, TREE_TYPE (pointer), desc, pointer, NULL_TREE);
+
+ if (POINTER_TYPE_P (gnu_type))
+ return convert (gnu_type, gnu_expr32);
+
+ else if (TYPE_IS_FAT_POINTER_P (gnu_type))
+ {
+ tree p_array_type = TREE_TYPE (TYPE_FIELDS (gnu_type));
+ tree p_bounds_type = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_type)));
+ tree template_type = TREE_TYPE (p_bounds_type);
+ tree min_field = TYPE_FIELDS (template_type);
+ tree max_field = DECL_CHAIN (TYPE_FIELDS (template_type));
+ tree template_tree, template_addr, aflags, dimct, t, u;
+ /* See the head comment of build_vms_descriptor. */
+ int iklass = TREE_INT_CST_LOW (DECL_INITIAL (klass));
+ vec<constructor_elt, va_gc> *v;
+
+ /* Convert POINTER to the pointer-to-array type. */
+ gnu_expr32 = convert (p_array_type, gnu_expr32);
+
+ switch (iklass)
+ {
+ case 1: /* Class S */
+ case 15: /* Class SB */
+ /* Build {1, LENGTH} template; LENGTH is the 1st field. */
+ vec_alloc (v, 2);
+ t = TYPE_FIELDS (desc_type);
+ t = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ CONSTRUCTOR_APPEND_ELT (v, min_field,
+ convert (TREE_TYPE (min_field),
+ integer_one_node));
+ CONSTRUCTOR_APPEND_ELT (v, max_field,
+ convert (TREE_TYPE (max_field), t));
+ template_tree = gnat_build_constructor (template_type, v);
+ template_addr = build_unary_op (ADDR_EXPR, NULL_TREE, template_tree);
+
+ /* For class S, we are done. */
+ if (iklass == 1)
+ break;
+
+ /* Test that we really have a SB descriptor, like DEC Ada. */
+ t = build3 (COMPONENT_REF, TREE_TYPE (klass), desc, klass, NULL);
+ u = convert (TREE_TYPE (klass), DECL_INITIAL (klass));
+ u = build_binary_op (EQ_EXPR, boolean_type_node, t, u);
+ /* If so, there is already a template in the descriptor and
+ it is located right after the POINTER field. */
+ t = DECL_CHAIN (pointer);
+ template_tree
+ = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ /* Otherwise use the {1, LENGTH} template we build above. */
+ template_addr = build3 (COND_EXPR, p_bounds_type, u,
+ build_unary_op (ADDR_EXPR, p_bounds_type,
+ template_tree),
+ template_addr);
+ break;
+
+ case 4: /* Class A */
+ /* The AFLAGS field is the 7th field in the descriptor. */
+ t = DECL_CHAIN (DECL_CHAIN (DECL_CHAIN (pointer)));
+ aflags = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ /* The DIMCT field is the 8th field in the descriptor. */
+ t = DECL_CHAIN (t);
+ dimct = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ /* Raise CONSTRAINT_ERROR if either more than 1 dimension
+ or FL_COEFF or FL_BOUNDS not set. */
+ u = build_int_cst (TREE_TYPE (aflags), 192);
+ u = build_binary_op (TRUTH_OR_EXPR, boolean_type_node,
+ build_binary_op (NE_EXPR, boolean_type_node,
+ dimct,
+ convert (TREE_TYPE (dimct),
+ size_one_node)),
+ build_binary_op (NE_EXPR, boolean_type_node,
+ build2 (BIT_AND_EXPR,
+ TREE_TYPE (aflags),
+ aflags, u),
+ u));
+ /* There is already a template in the descriptor and it is
+ located at the start of block 3 (12th field). */
+ t = DECL_CHAIN (DECL_CHAIN (DECL_CHAIN (DECL_CHAIN (t))));
+ template_tree
+ = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
+ template_tree = build3 (COND_EXPR, TREE_TYPE (t), u,
+ build_call_raise (CE_Length_Check_Failed, Empty,
+ N_Raise_Constraint_Error),
+ template_tree);
+ template_addr
+ = build_unary_op (ADDR_EXPR, p_bounds_type, template_tree);
+ break;
+
+ case 10: /* Class NCA */
+ default:
+ post_error ("unsupported descriptor type for &", gnat_subprog);
+ template_addr = integer_zero_node;
+ break;
+ }
+
+ /* Build the fat pointer in the form of a constructor. */
+ vec_alloc (v, 2);
+ CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (gnu_type), gnu_expr32);
+ CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (gnu_type)),
+ template_addr);
+
+ return gnat_build_constructor (gnu_type, v);
+ }
+
+ else
+ gcc_unreachable ();
+}
+
+/* Convert GNU_EXPR, a pointer to a VMS descriptor, to GNU_TYPE, a regular
+ pointer or fat pointer type. GNU_EXPR_ALT_TYPE is the alternate (32-bit)
+ pointer type of GNU_EXPR. GNAT_SUBPROG is the subprogram to which the
+ descriptor is passed. */
+
+tree
+convert_vms_descriptor (tree gnu_type, tree gnu_expr, tree gnu_expr_alt_type,
+ Entity_Id gnat_subprog)
+{
+ tree desc_type = TREE_TYPE (TREE_TYPE (gnu_expr));
+ tree desc = build1 (INDIRECT_REF, desc_type, gnu_expr);
+ tree mbo = TYPE_FIELDS (desc_type);
+ const char *mbostr = IDENTIFIER_POINTER (DECL_NAME (mbo));
+ tree mbmo = DECL_CHAIN (DECL_CHAIN (DECL_CHAIN (mbo)));
+ tree is64bit, gnu_expr32, gnu_expr64;
+
+ /* If the field name is not MBO, it must be 32-bit and no alternate.
+ Otherwise primary must be 64-bit and alternate 32-bit. */
+ if (strcmp (mbostr, "MBO") != 0)
+ {
+ tree ret = convert_vms_descriptor32 (gnu_type, gnu_expr, gnat_subprog);
+ return ret;
+ }
+
+ /* Build the test for 64-bit descriptor. */
+ mbo = build3 (COMPONENT_REF, TREE_TYPE (mbo), desc, mbo, NULL_TREE);
+ mbmo = build3 (COMPONENT_REF, TREE_TYPE (mbmo), desc, mbmo, NULL_TREE);
+ is64bit
+ = build_binary_op (TRUTH_ANDIF_EXPR, boolean_type_node,
+ build_binary_op (EQ_EXPR, boolean_type_node,
+ convert (integer_type_node, mbo),
+ integer_one_node),
+ build_binary_op (EQ_EXPR, boolean_type_node,
+ convert (integer_type_node, mbmo),
+ integer_minus_one_node));
+
+ /* Build the 2 possible end results. */
+ gnu_expr64 = convert_vms_descriptor64 (gnu_type, gnu_expr, gnat_subprog);
+ gnu_expr = fold_convert (gnu_expr_alt_type, gnu_expr);
+ gnu_expr32 = convert_vms_descriptor32 (gnu_type, gnu_expr, gnat_subprog);
+ return build3 (COND_EXPR, gnu_type, is64bit, gnu_expr64, gnu_expr32);
+}
+
+/* Build a type to be used to represent an aliased object whose nominal type
+ is an unconstrained array. This consists of a RECORD_TYPE containing a
+ field of TEMPLATE_TYPE and a field of OBJECT_TYPE, which is an ARRAY_TYPE.
+ If ARRAY_TYPE is that of an unconstrained array, this is used to represent
+ an arbitrary unconstrained object. Use NAME as the name of the record.
+ DEBUG_INFO_P is true if we need to write debug information for the type. */
+
+tree
+build_unc_object_type (tree template_type, tree object_type, tree name,
+ bool debug_info_p)
+{
+ tree type = make_node (RECORD_TYPE);
+ tree template_field
+ = create_field_decl (get_identifier ("BOUNDS"), template_type, type,
+ NULL_TREE, NULL_TREE, 0, 1);
+ tree array_field
+ = create_field_decl (get_identifier ("ARRAY"), object_type, type,
+ NULL_TREE, NULL_TREE, 0, 1);
+
+ TYPE_NAME (type) = name;
+ TYPE_CONTAINS_TEMPLATE_P (type) = 1;
+ DECL_CHAIN (template_field) = array_field;
+ finish_record_type (type, template_field, 0, true);
+
+ /* Declare it now since it will never be declared otherwise. This is
+ necessary to ensure that its subtrees are properly marked. */
+ create_type_decl (name, type, true, debug_info_p, Empty);
+
+ return type;
+}
+
+/* Same, taking a thin or fat pointer type instead of a template type. */
+
+tree
+build_unc_object_type_from_ptr (tree thin_fat_ptr_type, tree object_type,
+ tree name, bool debug_info_p)
+{
+ tree template_type;
+
+ gcc_assert (TYPE_IS_FAT_OR_THIN_POINTER_P (thin_fat_ptr_type));
+
+ template_type
+ = (TYPE_IS_FAT_POINTER_P (thin_fat_ptr_type)
+ ? TREE_TYPE (TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (thin_fat_ptr_type))))
+ : TREE_TYPE (TYPE_FIELDS (TREE_TYPE (thin_fat_ptr_type))));
+
+ return
+ build_unc_object_type (template_type, object_type, name, debug_info_p);
+}
+
+/* Update anything previously pointing to OLD_TYPE to point to NEW_TYPE.
+ In the normal case this is just two adjustments, but we have more to
+ do if NEW_TYPE is an UNCONSTRAINED_ARRAY_TYPE. */
+
+void
+update_pointer_to (tree old_type, tree new_type)
+{
+ tree ptr = TYPE_POINTER_TO (old_type);
+ tree ref = TYPE_REFERENCE_TO (old_type);
+ tree t;
+
+ /* If this is the main variant, process all the other variants first. */
+ if (TYPE_MAIN_VARIANT (old_type) == old_type)
+ for (t = TYPE_NEXT_VARIANT (old_type); t; t = TYPE_NEXT_VARIANT (t))
+ update_pointer_to (t, new_type);
+
+ /* If no pointers and no references, we are done. */
+ if (!ptr && !ref)
+ return;
+
+ /* Merge the old type qualifiers in the new type.
+
+ Each old variant has qualifiers for specific reasons, and the new
+ designated type as well. Each set of qualifiers represents useful
+ information grabbed at some point, and merging the two simply unifies
+ these inputs into the final type description.
+
+ Consider for instance a volatile type frozen after an access to constant
+ type designating it; after the designated type's freeze, we get here with
+ a volatile NEW_TYPE and a dummy OLD_TYPE with a readonly variant, created
+ when the access type was processed. We will make a volatile and readonly
+ designated type, because that's what it really is.
+
+ We might also get here for a non-dummy OLD_TYPE variant with different
+ qualifiers than those of NEW_TYPE, for instance in some cases of pointers
+ to private record type elaboration (see the comments around the call to
+ this routine in gnat_to_gnu_entity <E_Access_Type>). We have to merge
+ the qualifiers in those cases too, to avoid accidentally discarding the
+ initial set, and will often end up with OLD_TYPE == NEW_TYPE then. */
+ new_type
+ = build_qualified_type (new_type,
+ TYPE_QUALS (old_type) | TYPE_QUALS (new_type));
+
+ /* If old type and new type are identical, there is nothing to do. */
+ if (old_type == new_type)
+ return;
+
+ /* Otherwise, first handle the simple case. */
+ if (TREE_CODE (new_type) != UNCONSTRAINED_ARRAY_TYPE)
+ {
+ tree new_ptr, new_ref;
+
+ /* If pointer or reference already points to new type, nothing to do.
+ This can happen as update_pointer_to can be invoked multiple times
+ on the same couple of types because of the type variants. */
+ if ((ptr && TREE_TYPE (ptr) == new_type)
+ || (ref && TREE_TYPE (ref) == new_type))
+ return;
+
+ /* Chain PTR and its variants at the end. */
+ new_ptr = TYPE_POINTER_TO (new_type);
+ if (new_ptr)
+ {
+ while (TYPE_NEXT_PTR_TO (new_ptr))
+ new_ptr = TYPE_NEXT_PTR_TO (new_ptr);
+ TYPE_NEXT_PTR_TO (new_ptr) = ptr;
+ }
+ else
+ TYPE_POINTER_TO (new_type) = ptr;
+
+ /* Now adjust them. */
+ for (; ptr; ptr = TYPE_NEXT_PTR_TO (ptr))
+ for (t = TYPE_MAIN_VARIANT (ptr); t; t = TYPE_NEXT_VARIANT (t))
+ {
+ TREE_TYPE (t) = new_type;
+ if (TYPE_NULL_BOUNDS (t))
+ TREE_TYPE (TREE_OPERAND (TYPE_NULL_BOUNDS (t), 0)) = new_type;
+ }
+
+ /* Chain REF and its variants at the end. */
+ new_ref = TYPE_REFERENCE_TO (new_type);
+ if (new_ref)
+ {
+ while (TYPE_NEXT_REF_TO (new_ref))
+ new_ref = TYPE_NEXT_REF_TO (new_ref);
+ TYPE_NEXT_REF_TO (new_ref) = ref;
+ }
+ else
+ TYPE_REFERENCE_TO (new_type) = ref;
+
+ /* Now adjust them. */
+ for (; ref; ref = TYPE_NEXT_REF_TO (ref))
+ for (t = TYPE_MAIN_VARIANT (ref); t; t = TYPE_NEXT_VARIANT (t))
+ TREE_TYPE (t) = new_type;
+
+ TYPE_POINTER_TO (old_type) = NULL_TREE;
+ TYPE_REFERENCE_TO (old_type) = NULL_TREE;
+ }
+
+ /* Now deal with the unconstrained array case. In this case the pointer
+ is actually a record where both fields are pointers to dummy nodes.
+ Turn them into pointers to the correct types using update_pointer_to.
+ Likewise for the pointer to the object record (thin pointer). */
+ else
+ {
+ tree new_ptr = TYPE_POINTER_TO (new_type);
+
+ gcc_assert (TYPE_IS_FAT_POINTER_P (ptr));
+
+ /* If PTR already points to NEW_TYPE, nothing to do. This can happen
+ since update_pointer_to can be invoked multiple times on the same
+ couple of types because of the type variants. */
+ if (TYPE_UNCONSTRAINED_ARRAY (ptr) == new_type)
+ return;
+
+ update_pointer_to
+ (TREE_TYPE (TREE_TYPE (TYPE_FIELDS (ptr))),
+ TREE_TYPE (TREE_TYPE (TYPE_FIELDS (new_ptr))));
+
+ update_pointer_to
+ (TREE_TYPE (TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (ptr)))),
+ TREE_TYPE (TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (new_ptr)))));
+
+ update_pointer_to (TYPE_OBJECT_RECORD_TYPE (old_type),
+ TYPE_OBJECT_RECORD_TYPE (new_type));
+
+ TYPE_POINTER_TO (old_type) = NULL_TREE;
+ }
+}
+
+/* Convert EXPR, a pointer to a constrained array, into a pointer to an
+ unconstrained one. This involves making or finding a template. */
+
+static tree
+convert_to_fat_pointer (tree type, tree expr)
+{
+ tree template_type = TREE_TYPE (TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (type))));
+ tree p_array_type = TREE_TYPE (TYPE_FIELDS (type));
+ tree etype = TREE_TYPE (expr);
+ tree template_addr;
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 2);
+
+ /* If EXPR is null, make a fat pointer that contains a null pointer to the
+ array (compare_fat_pointers ensures that this is the full discriminant)
+ and a valid pointer to the bounds. This latter property is necessary
+ since the compiler can hoist the load of the bounds done through it. */
+ if (integer_zerop (expr))
+ {
+ tree ptr_template_type = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (type)));
+ tree null_bounds, t;
+
+ if (TYPE_NULL_BOUNDS (ptr_template_type))
+ null_bounds = TYPE_NULL_BOUNDS (ptr_template_type);
+ else
+ {
+ /* The template type can still be dummy at this point so we build an
+ empty constructor. The middle-end will fill it in with zeros. */
+ t = build_constructor (template_type,
+ NULL);
+ TREE_CONSTANT (t) = TREE_STATIC (t) = 1;
+ null_bounds = build_unary_op (ADDR_EXPR, NULL_TREE, t);
+ SET_TYPE_NULL_BOUNDS (ptr_template_type, null_bounds);
+ }
+
+ CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (type),
+ fold_convert (p_array_type, null_pointer_node));
+ CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (type)), null_bounds);
+ t = build_constructor (type, v);
+ /* Do not set TREE_CONSTANT so as to force T to static memory. */
+ TREE_CONSTANT (t) = 0;
+ TREE_STATIC (t) = 1;
+
+ return t;
+ }
+
+ /* If EXPR is a thin pointer, make template and data from the record. */
+ if (TYPE_IS_THIN_POINTER_P (etype))
+ {
+ tree field = TYPE_FIELDS (TREE_TYPE (etype));
+
+ expr = gnat_protect_expr (expr);
+
+ /* If we have a TYPE_UNCONSTRAINED_ARRAY attached to the RECORD_TYPE,
+ the thin pointer value has been shifted so we shift it back to get
+ the template address. */
+ if (TYPE_UNCONSTRAINED_ARRAY (TREE_TYPE (etype)))
+ {
+ template_addr
+ = build_binary_op (POINTER_PLUS_EXPR, etype, expr,
+ fold_build1 (NEGATE_EXPR, sizetype,
+ byte_position
+ (DECL_CHAIN (field))));
+ template_addr
+ = fold_convert (TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (type))),
+ template_addr);
+ }
+
+ /* Otherwise we explicitly take the address of the fields. */
+ else
+ {
+ expr = build_unary_op (INDIRECT_REF, NULL_TREE, expr);
+ template_addr
+ = build_unary_op (ADDR_EXPR, NULL_TREE,
+ build_component_ref (expr, NULL_TREE, field,
+ false));
+ expr = build_unary_op (ADDR_EXPR, NULL_TREE,
+ build_component_ref (expr, NULL_TREE,
+ DECL_CHAIN (field),
+ false));
+ }
+ }
+
+ /* Otherwise, build the constructor for the template. */
+ else
+ template_addr
+ = build_unary_op (ADDR_EXPR, NULL_TREE,
+ build_template (template_type, TREE_TYPE (etype),
+ expr));
+
+ /* The final result is a constructor for the fat pointer.
+
+ If EXPR is an argument of a foreign convention subprogram, the type it
+ points to is directly the component type. In this case, the expression
+ type may not match the corresponding FIELD_DECL type at this point, so we
+ call "convert" here to fix that up if necessary. This type consistency is
+ required, for instance because it ensures that possible later folding of
+ COMPONENT_REFs against this constructor always yields something of the
+ same type as the initial reference.
+
+ Note that the call to "build_template" above is still fine because it
+ will only refer to the provided TEMPLATE_TYPE in this case. */
+ CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (type), convert (p_array_type, expr));
+ CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (type)), template_addr);
+ return gnat_build_constructor (type, v);
+}
+
+/* Create an expression whose value is that of EXPR,
+ converted to type TYPE. The TREE_TYPE of the value
+ is always TYPE. This function implements all reasonable
+ conversions; callers should filter out those that are
+ not permitted by the language being compiled. */
+
+tree
+convert (tree type, tree expr)
+{
+ tree etype = TREE_TYPE (expr);
+ enum tree_code ecode = TREE_CODE (etype);
+ enum tree_code code = TREE_CODE (type);
+
+ /* If the expression is already of the right type, we are done. */
+ if (etype == type)
+ return expr;
+
+ /* If both input and output have padding and are of variable size, do this
+ as an unchecked conversion. Likewise if one is a mere variant of the
+ other, so we avoid a pointless unpad/repad sequence. */
+ else if (code == RECORD_TYPE && ecode == RECORD_TYPE
+ && TYPE_PADDING_P (type) && TYPE_PADDING_P (etype)
+ && (!TREE_CONSTANT (TYPE_SIZE (type))
+ || !TREE_CONSTANT (TYPE_SIZE (etype))
+ || TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (etype)
+ || TYPE_NAME (TREE_TYPE (TYPE_FIELDS (type)))
+ == TYPE_NAME (TREE_TYPE (TYPE_FIELDS (etype)))))
+ ;
+
+ /* If the output type has padding, convert to the inner type and make a
+ constructor to build the record, unless a variable size is involved. */
+ else if (code == RECORD_TYPE && TYPE_PADDING_P (type))
+ {
+ vec<constructor_elt, va_gc> *v;
+
+ /* If we previously converted from another type and our type is
+ of variable size, remove the conversion to avoid the need for
+ variable-sized temporaries. Likewise for a conversion between
+ original and packable version. */
+ if (TREE_CODE (expr) == VIEW_CONVERT_EXPR
+ && (!TREE_CONSTANT (TYPE_SIZE (type))
+ || (ecode == RECORD_TYPE
+ && TYPE_NAME (etype)
+ == TYPE_NAME (TREE_TYPE (TREE_OPERAND (expr, 0))))))
+ expr = TREE_OPERAND (expr, 0);
+
+ /* If we are just removing the padding from expr, convert the original
+ object if we have variable size in order to avoid the need for some
+ variable-sized temporaries. Likewise if the padding is a variant
+ of the other, so we avoid a pointless unpad/repad sequence. */
+ if (TREE_CODE (expr) == COMPONENT_REF
+ && TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (expr, 0)))
+ && (!TREE_CONSTANT (TYPE_SIZE (type))
+ || TYPE_MAIN_VARIANT (type)
+ == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (expr, 0)))
+ || (ecode == RECORD_TYPE
+ && TYPE_NAME (etype)
+ == TYPE_NAME (TREE_TYPE (TYPE_FIELDS (type))))))
+ return convert (type, TREE_OPERAND (expr, 0));
+
+ /* If the inner type is of self-referential size and the expression type
+ is a record, do this as an unchecked conversion. But first pad the
+ expression if possible to have the same size on both sides. */
+ if (ecode == RECORD_TYPE
+ && CONTAINS_PLACEHOLDER_P (DECL_SIZE (TYPE_FIELDS (type))))
+ {
+ if (TREE_CODE (TYPE_SIZE (etype)) == INTEGER_CST)
+ expr = convert (maybe_pad_type (etype, TYPE_SIZE (type), 0, Empty,
+ false, false, false, true),
+ expr);
+ return unchecked_convert (type, expr, false);
+ }
+
+ /* If we are converting between array types with variable size, do the
+ final conversion as an unchecked conversion, again to avoid the need
+ for some variable-sized temporaries. If valid, this conversion is
+ very likely purely technical and without real effects. */
+ if (ecode == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (TYPE_FIELDS (type))) == ARRAY_TYPE
+ && !TREE_CONSTANT (TYPE_SIZE (etype))
+ && !TREE_CONSTANT (TYPE_SIZE (type)))
+ return unchecked_convert (type,
+ convert (TREE_TYPE (TYPE_FIELDS (type)),
+ expr),
+ false);
+
+ vec_alloc (v, 1);
+ CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (type),
+ convert (TREE_TYPE (TYPE_FIELDS (type)), expr));
+ return gnat_build_constructor (type, v);
+ }
+
+ /* If the input type has padding, remove it and convert to the output type.
+ The conditions ordering is arranged to ensure that the output type is not
+ a padding type here, as it is not clear whether the conversion would
+ always be correct if this was to happen. */
+ else if (ecode == RECORD_TYPE && TYPE_PADDING_P (etype))
+ {
+ tree unpadded;
+
+ /* If we have just converted to this padded type, just get the
+ inner expression. */
+ if (TREE_CODE (expr) == CONSTRUCTOR
+ && !vec_safe_is_empty (CONSTRUCTOR_ELTS (expr))
+ && (*CONSTRUCTOR_ELTS (expr))[0].index == TYPE_FIELDS (etype))
+ unpadded = (*CONSTRUCTOR_ELTS (expr))[0].value;
+
+ /* Otherwise, build an explicit component reference. */
+ else
+ unpadded
+ = build_component_ref (expr, NULL_TREE, TYPE_FIELDS (etype), false);
+
+ return convert (type, unpadded);
+ }
+
+ /* If the input is a biased type, adjust first. */
+ if (ecode == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (etype))
+ return convert (type, fold_build2 (PLUS_EXPR, TREE_TYPE (etype),
+ fold_convert (TREE_TYPE (etype),
+ expr),
+ TYPE_MIN_VALUE (etype)));
+
+ /* If the input is a justified modular type, we need to extract the actual
+ object before converting it to any other type with the exceptions of an
+ unconstrained array or of a mere type variant. It is useful to avoid the
+ extraction and conversion in the type variant case because it could end
+ up replacing a VAR_DECL expr by a constructor and we might be about the
+ take the address of the result. */
+ if (ecode == RECORD_TYPE && TYPE_JUSTIFIED_MODULAR_P (etype)
+ && code != UNCONSTRAINED_ARRAY_TYPE
+ && TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (etype))
+ return convert (type, build_component_ref (expr, NULL_TREE,
+ TYPE_FIELDS (etype), false));
+
+ /* If converting to a type that contains a template, convert to the data
+ type and then build the template. */
+ if (code == RECORD_TYPE && TYPE_CONTAINS_TEMPLATE_P (type))
+ {
+ tree obj_type = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (type)));
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 2);
+
+ /* If the source already has a template, get a reference to the
+ associated array only, as we are going to rebuild a template
+ for the target type anyway. */
+ expr = maybe_unconstrained_array (expr);
+
+ CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (type),
+ build_template (TREE_TYPE (TYPE_FIELDS (type)),
+ obj_type, NULL_TREE));
+ CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (type)),
+ convert (obj_type, expr));
+ return gnat_build_constructor (type, v);
+ }
+
+ /* There are some cases of expressions that we process specially. */
+ switch (TREE_CODE (expr))
+ {
+ case ERROR_MARK:
+ return expr;
+
+ case NULL_EXPR:
+ /* Just set its type here. For TRANSFORM_EXPR, we will do the actual
+ conversion in gnat_expand_expr. NULL_EXPR does not represent
+ and actual value, so no conversion is needed. */
+ expr = copy_node (expr);
+ TREE_TYPE (expr) = type;
+ return expr;
+
+ case STRING_CST:
+ /* If we are converting a STRING_CST to another constrained array type,
+ just make a new one in the proper type. */
+ if (code == ecode && AGGREGATE_TYPE_P (etype)
+ && !(TREE_CODE (TYPE_SIZE (etype)) == INTEGER_CST
+ && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST))
+ {
+ expr = copy_node (expr);
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+ break;
+
+ case VECTOR_CST:
+ /* If we are converting a VECTOR_CST to a mere type variant, just make
+ a new one in the proper type. */
+ if (code == ecode && gnat_types_compatible_p (type, etype))
+ {
+ expr = copy_node (expr);
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+
+ case CONSTRUCTOR:
+ /* If we are converting a CONSTRUCTOR to a mere type variant, or to
+ another padding type around the same type, just make a new one in
+ the proper type. */
+ if (code == ecode
+ && (gnat_types_compatible_p (type, etype)
+ || (code == RECORD_TYPE
+ && TYPE_PADDING_P (type) && TYPE_PADDING_P (etype)
+ && TREE_TYPE (TYPE_FIELDS (type))
+ == TREE_TYPE (TYPE_FIELDS (etype)))))
+ {
+ expr = copy_node (expr);
+ TREE_TYPE (expr) = type;
+ CONSTRUCTOR_ELTS (expr) = vec_safe_copy (CONSTRUCTOR_ELTS (expr));
+ return expr;
+ }
+
+ /* Likewise for a conversion between original and packable version, or
+ conversion between types of the same size and with the same list of
+ fields, but we have to work harder to preserve type consistency. */
+ if (code == ecode
+ && code == RECORD_TYPE
+ && (TYPE_NAME (type) == TYPE_NAME (etype)
+ || tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (etype))))
+
+ {
+ vec<constructor_elt, va_gc> *e = CONSTRUCTOR_ELTS (expr);
+ unsigned HOST_WIDE_INT len = vec_safe_length (e);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, len);
+ tree efield = TYPE_FIELDS (etype), field = TYPE_FIELDS (type);
+ unsigned HOST_WIDE_INT idx;
+ tree index, value;
+
+ /* Whether we need to clear TREE_CONSTANT et al. on the output
+ constructor when we convert in place. */
+ bool clear_constant = false;
+
+ FOR_EACH_CONSTRUCTOR_ELT(e, idx, index, value)
+ {
+ /* Skip the missing fields in the CONSTRUCTOR. */
+ while (efield && field && !SAME_FIELD_P (efield, index))
+ {
+ efield = DECL_CHAIN (efield);
+ field = DECL_CHAIN (field);
+ }
+ /* The field must be the same. */
+ if (!(efield && field && SAME_FIELD_P (efield, field)))
+ break;
+ constructor_elt elt
+ = {field, convert (TREE_TYPE (field), value)};
+ v->quick_push (elt);
+
+ /* If packing has made this field a bitfield and the input
+ value couldn't be emitted statically any more, we need to
+ clear TREE_CONSTANT on our output. */
+ if (!clear_constant
+ && TREE_CONSTANT (expr)
+ && !CONSTRUCTOR_BITFIELD_P (efield)
+ && CONSTRUCTOR_BITFIELD_P (field)
+ && !initializer_constant_valid_for_bitfield_p (value))
+ clear_constant = true;
+
+ efield = DECL_CHAIN (efield);
+ field = DECL_CHAIN (field);
+ }
+
+ /* If we have been able to match and convert all the input fields
+ to their output type, convert in place now. We'll fallback to a
+ view conversion downstream otherwise. */
+ if (idx == len)
+ {
+ expr = copy_node (expr);
+ TREE_TYPE (expr) = type;
+ CONSTRUCTOR_ELTS (expr) = v;
+ if (clear_constant)
+ TREE_CONSTANT (expr) = TREE_STATIC (expr) = 0;
+ return expr;
+ }
+ }
+
+ /* Likewise for a conversion between array type and vector type with a
+ compatible representative array. */
+ else if (code == VECTOR_TYPE
+ && ecode == ARRAY_TYPE
+ && gnat_types_compatible_p (TYPE_REPRESENTATIVE_ARRAY (type),
+ etype))
+ {
+ vec<constructor_elt, va_gc> *e = CONSTRUCTOR_ELTS (expr);
+ unsigned HOST_WIDE_INT len = vec_safe_length (e);
+ vec<constructor_elt, va_gc> *v;
+ unsigned HOST_WIDE_INT ix;
+ tree value;
+
+ /* Build a VECTOR_CST from a *constant* array constructor. */
+ if (TREE_CONSTANT (expr))
+ {
+ bool constant_p = true;
+
+ /* Iterate through elements and check if all constructor
+ elements are *_CSTs. */
+ FOR_EACH_CONSTRUCTOR_VALUE (e, ix, value)
+ if (!CONSTANT_CLASS_P (value))
+ {
+ constant_p = false;
+ break;
+ }
+
+ if (constant_p)
+ return build_vector_from_ctor (type,
+ CONSTRUCTOR_ELTS (expr));
+ }
+
+ /* Otherwise, build a regular vector constructor. */
+ vec_alloc (v, len);
+ FOR_EACH_CONSTRUCTOR_VALUE (e, ix, value)
+ {
+ constructor_elt elt = {NULL_TREE, value};
+ v->quick_push (elt);
+ }
+ expr = copy_node (expr);
+ TREE_TYPE (expr) = type;
+ CONSTRUCTOR_ELTS (expr) = v;
+ return expr;
+ }
+ break;
+
+ case UNCONSTRAINED_ARRAY_REF:
+ /* First retrieve the underlying array. */
+ expr = maybe_unconstrained_array (expr);
+ etype = TREE_TYPE (expr);
+ ecode = TREE_CODE (etype);
+ break;
+
+ case VIEW_CONVERT_EXPR:
+ {
+ /* GCC 4.x is very sensitive to type consistency overall, and view
+ conversions thus are very frequent. Even though just "convert"ing
+ the inner operand to the output type is fine in most cases, it
+ might expose unexpected input/output type mismatches in special
+ circumstances so we avoid such recursive calls when we can. */
+ tree op0 = TREE_OPERAND (expr, 0);
+
+ /* If we are converting back to the original type, we can just
+ lift the input conversion. This is a common occurrence with
+ switches back-and-forth amongst type variants. */
+ if (type == TREE_TYPE (op0))
+ return op0;
+
+ /* Otherwise, if we're converting between two aggregate or vector
+ types, we might be allowed to substitute the VIEW_CONVERT_EXPR
+ target type in place or to just convert the inner expression. */
+ if ((AGGREGATE_TYPE_P (type) && AGGREGATE_TYPE_P (etype))
+ || (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (etype)))
+ {
+ /* If we are converting between mere variants, we can just
+ substitute the VIEW_CONVERT_EXPR in place. */
+ if (gnat_types_compatible_p (type, etype))
+ return build1 (VIEW_CONVERT_EXPR, type, op0);
+
+ /* Otherwise, we may just bypass the input view conversion unless
+ one of the types is a fat pointer, which is handled by
+ specialized code below which relies on exact type matching. */
+ else if (!TYPE_IS_FAT_POINTER_P (type)
+ && !TYPE_IS_FAT_POINTER_P (etype))
+ return convert (type, op0);
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ /* Check for converting to a pointer to an unconstrained array. */
+ if (TYPE_IS_FAT_POINTER_P (type) && !TYPE_IS_FAT_POINTER_P (etype))
+ return convert_to_fat_pointer (type, expr);
+
+ /* If we are converting between two aggregate or vector types that are mere
+ variants, just make a VIEW_CONVERT_EXPR. Likewise when we are converting
+ to a vector type from its representative array type. */
+ else if ((code == ecode
+ && (AGGREGATE_TYPE_P (type) || VECTOR_TYPE_P (type))
+ && gnat_types_compatible_p (type, etype))
+ || (code == VECTOR_TYPE
+ && ecode == ARRAY_TYPE
+ && gnat_types_compatible_p (TYPE_REPRESENTATIVE_ARRAY (type),
+ etype)))
+ return build1 (VIEW_CONVERT_EXPR, type, expr);
+
+ /* If we are converting between tagged types, try to upcast properly. */
+ else if (ecode == RECORD_TYPE && code == RECORD_TYPE
+ && TYPE_ALIGN_OK (etype) && TYPE_ALIGN_OK (type))
+ {
+ tree child_etype = etype;
+ do {
+ tree field = TYPE_FIELDS (child_etype);
+ if (DECL_NAME (field) == parent_name_id && TREE_TYPE (field) == type)
+ return build_component_ref (expr, NULL_TREE, field, false);
+ child_etype = TREE_TYPE (field);
+ } while (TREE_CODE (child_etype) == RECORD_TYPE);
+ }
+
+ /* If we are converting from a smaller form of record type back to it, just
+ make a VIEW_CONVERT_EXPR. But first pad the expression to have the same
+ size on both sides. */
+ else if (ecode == RECORD_TYPE && code == RECORD_TYPE
+ && smaller_form_type_p (etype, type))
+ {
+ expr = convert (maybe_pad_type (etype, TYPE_SIZE (type), 0, Empty,
+ false, false, false, true),
+ expr);
+ return build1 (VIEW_CONVERT_EXPR, type, expr);
+ }
+
+ /* In all other cases of related types, make a NOP_EXPR. */
+ else if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (etype))
+ return fold_convert (type, expr);
+
+ switch (code)
+ {
+ case VOID_TYPE:
+ return fold_build1 (CONVERT_EXPR, type, expr);
+
+ case INTEGER_TYPE:
+ if (TYPE_HAS_ACTUAL_BOUNDS_P (type)
+ && (ecode == ARRAY_TYPE || ecode == UNCONSTRAINED_ARRAY_TYPE
+ || (ecode == RECORD_TYPE && TYPE_CONTAINS_TEMPLATE_P (etype))))
+ return unchecked_convert (type, expr, false);
+ else if (TYPE_BIASED_REPRESENTATION_P (type))
+ return fold_convert (type,
+ fold_build2 (MINUS_EXPR, TREE_TYPE (type),
+ convert (TREE_TYPE (type), expr),
+ TYPE_MIN_VALUE (type)));
+
+ /* ... fall through ... */
+
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ /* If we are converting an additive expression to an integer type
+ with lower precision, be wary of the optimization that can be
+ applied by convert_to_integer. There are 2 problematic cases:
+ - if the first operand was originally of a biased type,
+ because we could be recursively called to convert it
+ to an intermediate type and thus rematerialize the
+ additive operator endlessly,
+ - if the expression contains a placeholder, because an
+ intermediate conversion that changes the sign could
+ be inserted and thus introduce an artificial overflow
+ at compile time when the placeholder is substituted. */
+ if (code == INTEGER_TYPE
+ && ecode == INTEGER_TYPE
+ && TYPE_PRECISION (type) < TYPE_PRECISION (etype)
+ && (TREE_CODE (expr) == PLUS_EXPR || TREE_CODE (expr) == MINUS_EXPR))
+ {
+ tree op0 = get_unwidened (TREE_OPERAND (expr, 0), type);
+
+ if ((TREE_CODE (TREE_TYPE (op0)) == INTEGER_TYPE
+ && TYPE_BIASED_REPRESENTATION_P (TREE_TYPE (op0)))
+ || CONTAINS_PLACEHOLDER_P (expr))
+ return build1 (NOP_EXPR, type, expr);
+ }
+
+ return fold (convert_to_integer (type, expr));
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* If converting between two thin pointers, adjust if needed to account
+ for differing offsets from the base pointer, depending on whether
+ there is a TYPE_UNCONSTRAINED_ARRAY attached to the record type. */
+ if (TYPE_IS_THIN_POINTER_P (etype) && TYPE_IS_THIN_POINTER_P (type))
+ {
+ tree etype_pos
+ = TYPE_UNCONSTRAINED_ARRAY (TREE_TYPE (etype)) != NULL_TREE
+ ? byte_position (DECL_CHAIN (TYPE_FIELDS (TREE_TYPE (etype))))
+ : size_zero_node;
+ tree type_pos
+ = TYPE_UNCONSTRAINED_ARRAY (TREE_TYPE (type)) != NULL_TREE
+ ? byte_position (DECL_CHAIN (TYPE_FIELDS (TREE_TYPE (type))))
+ : size_zero_node;
+ tree byte_diff = size_diffop (type_pos, etype_pos);
+
+ expr = build1 (NOP_EXPR, type, expr);
+ if (integer_zerop (byte_diff))
+ return expr;
+
+ return build_binary_op (POINTER_PLUS_EXPR, type, expr,
+ fold_convert (sizetype, byte_diff));
+ }
+
+ /* If converting fat pointer to normal or thin pointer, get the pointer
+ to the array and then convert it. */
+ if (TYPE_IS_FAT_POINTER_P (etype))
+ expr
+ = build_component_ref (expr, NULL_TREE, TYPE_FIELDS (etype), false);
+
+ return fold (convert_to_pointer (type, expr));
+
+ case REAL_TYPE:
+ return fold (convert_to_real (type, expr));
+
+ case RECORD_TYPE:
+ if (TYPE_JUSTIFIED_MODULAR_P (type) && !AGGREGATE_TYPE_P (etype))
+ {
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 1);
+
+ CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (type),
+ convert (TREE_TYPE (TYPE_FIELDS (type)),
+ expr));
+ return gnat_build_constructor (type, v);
+ }
+
+ /* ... fall through ... */
+
+ case ARRAY_TYPE:
+ /* In these cases, assume the front-end has validated the conversion.
+ If the conversion is valid, it will be a bit-wise conversion, so
+ it can be viewed as an unchecked conversion. */
+ return unchecked_convert (type, expr, false);
+
+ case UNION_TYPE:
+ /* This is a either a conversion between a tagged type and some
+ subtype, which we have to mark as a UNION_TYPE because of
+ overlapping fields or a conversion of an Unchecked_Union. */
+ return unchecked_convert (type, expr, false);
+
+ case UNCONSTRAINED_ARRAY_TYPE:
+ /* If the input is a VECTOR_TYPE, convert to the representative
+ array type first. */
+ if (ecode == VECTOR_TYPE)
+ {
+ expr = convert (TYPE_REPRESENTATIVE_ARRAY (etype), expr);
+ etype = TREE_TYPE (expr);
+ ecode = TREE_CODE (etype);
+ }
+
+ /* If EXPR is a constrained array, take its address, convert it to a
+ fat pointer, and then dereference it. Likewise if EXPR is a
+ record containing both a template and a constrained array.
+ Note that a record representing a justified modular type
+ always represents a packed constrained array. */
+ if (ecode == ARRAY_TYPE
+ || (ecode == INTEGER_TYPE && TYPE_HAS_ACTUAL_BOUNDS_P (etype))
+ || (ecode == RECORD_TYPE && TYPE_CONTAINS_TEMPLATE_P (etype))
+ || (ecode == RECORD_TYPE && TYPE_JUSTIFIED_MODULAR_P (etype)))
+ return
+ build_unary_op
+ (INDIRECT_REF, NULL_TREE,
+ convert_to_fat_pointer (TREE_TYPE (type),
+ build_unary_op (ADDR_EXPR,
+ NULL_TREE, expr)));
+
+ /* Do something very similar for converting one unconstrained
+ array to another. */
+ else if (ecode == UNCONSTRAINED_ARRAY_TYPE)
+ return
+ build_unary_op (INDIRECT_REF, NULL_TREE,
+ convert (TREE_TYPE (type),
+ build_unary_op (ADDR_EXPR,
+ NULL_TREE, expr)));
+ else
+ gcc_unreachable ();
+
+ case COMPLEX_TYPE:
+ return fold (convert_to_complex (type, expr));
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Create an expression whose value is that of EXPR converted to the common
+ index type, which is sizetype. EXPR is supposed to be in the base type
+ of the GNAT index type. Calling it is equivalent to doing
+
+ convert (sizetype, expr)
+
+ but we try to distribute the type conversion with the knowledge that EXPR
+ cannot overflow in its type. This is a best-effort approach and we fall
+ back to the above expression as soon as difficulties are encountered.
+
+ This is necessary to overcome issues that arise when the GNAT base index
+ type and the GCC common index type (sizetype) don't have the same size,
+ which is quite frequent on 64-bit architectures. In this case, and if
+ the GNAT base index type is signed but the iteration type of the loop has
+ been forced to unsigned, the loop scalar evolution engine cannot compute
+ a simple evolution for the general induction variables associated with the
+ array indices, because it will preserve the wrap-around semantics in the
+ unsigned type of their "inner" part. As a result, many loop optimizations
+ are blocked.
+
+ The solution is to use a special (basic) induction variable that is at
+ least as large as sizetype, and to express the aforementioned general
+ induction variables in terms of this induction variable, eliminating
+ the problematic intermediate truncation to the GNAT base index type.
+ This is possible as long as the original expression doesn't overflow
+ and if the middle-end hasn't introduced artificial overflows in the
+ course of the various simplification it can make to the expression. */
+
+tree
+convert_to_index_type (tree expr)
+{
+ enum tree_code code = TREE_CODE (expr);
+ tree type = TREE_TYPE (expr);
+
+ /* If the type is unsigned, overflow is allowed so we cannot be sure that
+ EXPR doesn't overflow. Keep it simple if optimization is disabled. */
+ if (TYPE_UNSIGNED (type) || !optimize)
+ return convert (sizetype, expr);
+
+ switch (code)
+ {
+ case VAR_DECL:
+ /* The main effect of the function: replace a loop parameter with its
+ associated special induction variable. */
+ if (DECL_LOOP_PARM_P (expr) && DECL_INDUCTION_VAR (expr))
+ expr = DECL_INDUCTION_VAR (expr);
+ break;
+
+ CASE_CONVERT:
+ {
+ tree otype = TREE_TYPE (TREE_OPERAND (expr, 0));
+ /* Bail out as soon as we suspect some sort of type frobbing. */
+ if (TYPE_PRECISION (type) != TYPE_PRECISION (otype)
+ || TYPE_UNSIGNED (type) != TYPE_UNSIGNED (otype))
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case NON_LVALUE_EXPR:
+ return fold_build1 (code, sizetype,
+ convert_to_index_type (TREE_OPERAND (expr, 0)));
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case MULT_EXPR:
+ return fold_build2 (code, sizetype,
+ convert_to_index_type (TREE_OPERAND (expr, 0)),
+ convert_to_index_type (TREE_OPERAND (expr, 1)));
+
+ case COMPOUND_EXPR:
+ return fold_build2 (code, sizetype, TREE_OPERAND (expr, 0),
+ convert_to_index_type (TREE_OPERAND (expr, 1)));
+
+ case COND_EXPR:
+ return fold_build3 (code, sizetype, TREE_OPERAND (expr, 0),
+ convert_to_index_type (TREE_OPERAND (expr, 1)),
+ convert_to_index_type (TREE_OPERAND (expr, 2)));
+
+ default:
+ break;
+ }
+
+ return convert (sizetype, expr);
+}
+
+/* Remove all conversions that are done in EXP. This includes converting
+ from a padded type or to a justified modular type. If TRUE_ADDRESS
+ is true, always return the address of the containing object even if
+ the address is not bit-aligned. */
+
+tree
+remove_conversions (tree exp, bool true_address)
+{
+ switch (TREE_CODE (exp))
+ {
+ case CONSTRUCTOR:
+ if (true_address
+ && TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (TREE_TYPE (exp)))
+ return
+ remove_conversions ((*CONSTRUCTOR_ELTS (exp))[0].value, true);
+ break;
+
+ case COMPONENT_REF:
+ if (TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ return remove_conversions (TREE_OPERAND (exp, 0), true_address);
+ break;
+
+ CASE_CONVERT:
+ case VIEW_CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ return remove_conversions (TREE_OPERAND (exp, 0), true_address);
+
+ default:
+ break;
+ }
+
+ return exp;
+}
+
+/* If EXP's type is an UNCONSTRAINED_ARRAY_TYPE, return an expression that
+ refers to the underlying array. If it has TYPE_CONTAINS_TEMPLATE_P,
+ likewise return an expression pointing to the underlying array. */
+
+tree
+maybe_unconstrained_array (tree exp)
+{
+ enum tree_code code = TREE_CODE (exp);
+ tree type = TREE_TYPE (exp);
+
+ switch (TREE_CODE (type))
+ {
+ case UNCONSTRAINED_ARRAY_TYPE:
+ if (code == UNCONSTRAINED_ARRAY_REF)
+ {
+ const bool read_only = TREE_READONLY (exp);
+ const bool no_trap = TREE_THIS_NOTRAP (exp);
+
+ exp = TREE_OPERAND (exp, 0);
+ type = TREE_TYPE (exp);
+
+ if (TREE_CODE (exp) == COND_EXPR)
+ {
+ tree op1
+ = build_unary_op (INDIRECT_REF, NULL_TREE,
+ build_component_ref (TREE_OPERAND (exp, 1),
+ NULL_TREE,
+ TYPE_FIELDS (type),
+ false));
+ tree op2
+ = build_unary_op (INDIRECT_REF, NULL_TREE,
+ build_component_ref (TREE_OPERAND (exp, 2),
+ NULL_TREE,
+ TYPE_FIELDS (type),
+ false));
+
+ exp = build3 (COND_EXPR,
+ TREE_TYPE (TREE_TYPE (TYPE_FIELDS (type))),
+ TREE_OPERAND (exp, 0), op1, op2);
+ }
+ else
+ {
+ exp = build_unary_op (INDIRECT_REF, NULL_TREE,
+ build_component_ref (exp, NULL_TREE,
+ TYPE_FIELDS (type),
+ false));
+ TREE_READONLY (exp) = read_only;
+ TREE_THIS_NOTRAP (exp) = no_trap;
+ }
+ }
+
+ else if (code == NULL_EXPR)
+ exp = build1 (NULL_EXPR,
+ TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (type)))),
+ TREE_OPERAND (exp, 0));
+ break;
+
+ case RECORD_TYPE:
+ /* If this is a padded type and it contains a template, convert to the
+ unpadded type first. */
+ if (TYPE_PADDING_P (type)
+ && TREE_CODE (TREE_TYPE (TYPE_FIELDS (type))) == RECORD_TYPE
+ && TYPE_CONTAINS_TEMPLATE_P (TREE_TYPE (TYPE_FIELDS (type))))
+ {
+ exp = convert (TREE_TYPE (TYPE_FIELDS (type)), exp);
+ type = TREE_TYPE (exp);
+ }
+
+ if (TYPE_CONTAINS_TEMPLATE_P (type))
+ {
+ exp = build_component_ref (exp, NULL_TREE,
+ DECL_CHAIN (TYPE_FIELDS (type)),
+ false);
+ type = TREE_TYPE (exp);
+
+ /* If the array type is padded, convert to the unpadded type. */
+ if (TYPE_IS_PADDING_P (type))
+ exp = convert (TREE_TYPE (TYPE_FIELDS (type)), exp);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return exp;
+}
+
+/* Return true if EXPR is an expression that can be folded as an operand
+ of a VIEW_CONVERT_EXPR. See ada-tree.h for a complete rationale. */
+
+static bool
+can_fold_for_view_convert_p (tree expr)
+{
+ tree t1, t2;
+
+ /* The folder will fold NOP_EXPRs between integral types with the same
+ precision (in the middle-end's sense). We cannot allow it if the
+ types don't have the same precision in the Ada sense as well. */
+ if (TREE_CODE (expr) != NOP_EXPR)
+ return true;
+
+ t1 = TREE_TYPE (expr);
+ t2 = TREE_TYPE (TREE_OPERAND (expr, 0));
+
+ /* Defer to the folder for non-integral conversions. */
+ if (!(INTEGRAL_TYPE_P (t1) && INTEGRAL_TYPE_P (t2)))
+ return true;
+
+ /* Only fold conversions that preserve both precisions. */
+ if (TYPE_PRECISION (t1) == TYPE_PRECISION (t2)
+ && operand_equal_p (rm_size (t1), rm_size (t2), 0))
+ return true;
+
+ return false;
+}
+
+/* Return an expression that does an unchecked conversion of EXPR to TYPE.
+ If NOTRUNC_P is true, truncation operations should be suppressed.
+
+ Special care is required with (source or target) integral types whose
+ precision is not equal to their size, to make sure we fetch or assign
+ the value bits whose location might depend on the endianness, e.g.
+
+ Rmsize : constant := 8;
+ subtype Int is Integer range 0 .. 2 ** Rmsize - 1;
+
+ type Bit_Array is array (1 .. Rmsize) of Boolean;
+ pragma Pack (Bit_Array);
+
+ function To_Bit_Array is new Unchecked_Conversion (Int, Bit_Array);
+
+ Value : Int := 2#1000_0001#;
+ Vbits : Bit_Array := To_Bit_Array (Value);
+
+ we expect the 8 bits at Vbits'Address to always contain Value, while
+ their original location depends on the endianness, at Value'Address
+ on a little-endian architecture but not on a big-endian one. */
+
+tree
+unchecked_convert (tree type, tree expr, bool notrunc_p)
+{
+ tree etype = TREE_TYPE (expr);
+ enum tree_code ecode = TREE_CODE (etype);
+ enum tree_code code = TREE_CODE (type);
+ int c;
+
+ /* If the expression is already of the right type, we are done. */
+ if (etype == type)
+ return expr;
+
+ /* If both types types are integral just do a normal conversion.
+ Likewise for a conversion to an unconstrained array. */
+ if ((((INTEGRAL_TYPE_P (type)
+ && !(code == INTEGER_TYPE && TYPE_VAX_FLOATING_POINT_P (type)))
+ || (POINTER_TYPE_P (type) && !TYPE_IS_THIN_POINTER_P (type))
+ || (code == RECORD_TYPE && TYPE_JUSTIFIED_MODULAR_P (type)))
+ && ((INTEGRAL_TYPE_P (etype)
+ && !(ecode == INTEGER_TYPE && TYPE_VAX_FLOATING_POINT_P (etype)))
+ || (POINTER_TYPE_P (etype) && !TYPE_IS_THIN_POINTER_P (etype))
+ || (ecode == RECORD_TYPE && TYPE_JUSTIFIED_MODULAR_P (etype))))
+ || code == UNCONSTRAINED_ARRAY_TYPE)
+ {
+ if (ecode == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (etype))
+ {
+ tree ntype = copy_type (etype);
+ TYPE_BIASED_REPRESENTATION_P (ntype) = 0;
+ TYPE_MAIN_VARIANT (ntype) = ntype;
+ expr = build1 (NOP_EXPR, ntype, expr);
+ }
+
+ if (code == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (type))
+ {
+ tree rtype = copy_type (type);
+ TYPE_BIASED_REPRESENTATION_P (rtype) = 0;
+ TYPE_MAIN_VARIANT (rtype) = rtype;
+ expr = convert (rtype, expr);
+ expr = build1 (NOP_EXPR, type, expr);
+ }
+ else
+ expr = convert (type, expr);
+ }
+
+ /* If we are converting to an integral type whose precision is not equal
+ to its size, first unchecked convert to a record type that contains an
+ field of the given precision. Then extract the field. */
+ else if (INTEGRAL_TYPE_P (type)
+ && TYPE_RM_SIZE (type)
+ && 0 != compare_tree_int (TYPE_RM_SIZE (type),
+ GET_MODE_BITSIZE (TYPE_MODE (type))))
+ {
+ tree rec_type = make_node (RECORD_TYPE);
+ unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (type));
+ tree field_type, field;
+
+ if (TYPE_UNSIGNED (type))
+ field_type = make_unsigned_type (prec);
+ else
+ field_type = make_signed_type (prec);
+ SET_TYPE_RM_SIZE (field_type, TYPE_RM_SIZE (type));
+
+ field = create_field_decl (get_identifier ("OBJ"), field_type, rec_type,
+ NULL_TREE, bitsize_zero_node, 1, 0);
+
+ finish_record_type (rec_type, field, 1, false);
+
+ expr = unchecked_convert (rec_type, expr, notrunc_p);
+ expr = build_component_ref (expr, NULL_TREE, field, false);
+ expr = fold_build1 (NOP_EXPR, type, expr);
+ }
+
+ /* Similarly if we are converting from an integral type whose precision is
+ not equal to its size, first copy into a field of the given precision
+ and unchecked convert the record type. */
+ else if (INTEGRAL_TYPE_P (etype)
+ && TYPE_RM_SIZE (etype)
+ && 0 != compare_tree_int (TYPE_RM_SIZE (etype),
+ GET_MODE_BITSIZE (TYPE_MODE (etype))))
+ {
+ tree rec_type = make_node (RECORD_TYPE);
+ unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (etype));
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 1);
+ tree field_type, field;
+
+ if (TYPE_UNSIGNED (etype))
+ field_type = make_unsigned_type (prec);
+ else
+ field_type = make_signed_type (prec);
+ SET_TYPE_RM_SIZE (field_type, TYPE_RM_SIZE (etype));
+
+ field = create_field_decl (get_identifier ("OBJ"), field_type, rec_type,
+ NULL_TREE, bitsize_zero_node, 1, 0);
+
+ finish_record_type (rec_type, field, 1, false);
+
+ expr = fold_build1 (NOP_EXPR, field_type, expr);
+ CONSTRUCTOR_APPEND_ELT (v, field, expr);
+ expr = gnat_build_constructor (rec_type, v);
+ expr = unchecked_convert (type, expr, notrunc_p);
+ }
+
+ /* If we are converting from a scalar type to a type with a different size,
+ we need to pad to have the same size on both sides.
+
+ ??? We cannot do it unconditionally because unchecked conversions are
+ used liberally by the front-end to implement polymorphism, e.g. in:
+
+ S191s : constant ada__tags__addr_ptr := ada__tags__addr_ptr!(S190s);
+ return p___size__4 (p__object!(S191s.all));
+
+ so we skip all expressions that are references. */
+ else if (!REFERENCE_CLASS_P (expr)
+ && !AGGREGATE_TYPE_P (etype)
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && (c = tree_int_cst_compare (TYPE_SIZE (etype), TYPE_SIZE (type))))
+ {
+ if (c < 0)
+ {
+ expr = convert (maybe_pad_type (etype, TYPE_SIZE (type), 0, Empty,
+ false, false, false, true),
+ expr);
+ expr = unchecked_convert (type, expr, notrunc_p);
+ }
+ else
+ {
+ tree rec_type = maybe_pad_type (type, TYPE_SIZE (etype), 0, Empty,
+ false, false, false, true);
+ expr = unchecked_convert (rec_type, expr, notrunc_p);
+ expr = build_component_ref (expr, NULL_TREE, TYPE_FIELDS (rec_type),
+ false);
+ }
+ }
+
+ /* We have a special case when we are converting between two unconstrained
+ array types. In that case, take the address, convert the fat pointer
+ types, and dereference. */
+ else if (ecode == code && code == UNCONSTRAINED_ARRAY_TYPE)
+ expr = build_unary_op (INDIRECT_REF, NULL_TREE,
+ build1 (VIEW_CONVERT_EXPR, TREE_TYPE (type),
+ build_unary_op (ADDR_EXPR, NULL_TREE,
+ expr)));
+
+ /* Another special case is when we are converting to a vector type from its
+ representative array type; this a regular conversion. */
+ else if (code == VECTOR_TYPE
+ && ecode == ARRAY_TYPE
+ && gnat_types_compatible_p (TYPE_REPRESENTATIVE_ARRAY (type),
+ etype))
+ expr = convert (type, expr);
+
+ /* If we are converting a CONSTRUCTOR to a more aligned RECORD_TYPE, bump
+ the alignment of the CONSTRUCTOR to speed up the copy operation. */
+ else if (TREE_CODE (expr) == CONSTRUCTOR
+ && code == RECORD_TYPE
+ && TYPE_ALIGN (etype) < TYPE_ALIGN (type))
+ {
+ expr = convert (maybe_pad_type (etype, NULL_TREE, TYPE_ALIGN (type),
+ Empty, false, false, false, true),
+ expr);
+ return unchecked_convert (type, expr, notrunc_p);
+ }
+
+ /* Otherwise, just build a VIEW_CONVERT_EXPR of the expression. */
+ else
+ {
+ expr = maybe_unconstrained_array (expr);
+ etype = TREE_TYPE (expr);
+ ecode = TREE_CODE (etype);
+ if (can_fold_for_view_convert_p (expr))
+ expr = fold_build1 (VIEW_CONVERT_EXPR, type, expr);
+ else
+ expr = build1 (VIEW_CONVERT_EXPR, type, expr);
+ }
+
+ /* If the result is an integral type whose precision is not equal to its
+ size, sign- or zero-extend the result. We need not do this if the input
+ is an integral type of the same precision and signedness or if the output
+ is a biased type or if both the input and output are unsigned. */
+ if (!notrunc_p
+ && INTEGRAL_TYPE_P (type) && TYPE_RM_SIZE (type)
+ && !(code == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (type))
+ && 0 != compare_tree_int (TYPE_RM_SIZE (type),
+ GET_MODE_BITSIZE (TYPE_MODE (type)))
+ && !(INTEGRAL_TYPE_P (etype)
+ && TYPE_UNSIGNED (type) == TYPE_UNSIGNED (etype)
+ && operand_equal_p (TYPE_RM_SIZE (type),
+ (TYPE_RM_SIZE (etype) != 0
+ ? TYPE_RM_SIZE (etype) : TYPE_SIZE (etype)),
+ 0))
+ && !(TYPE_UNSIGNED (type) && TYPE_UNSIGNED (etype)))
+ {
+ tree base_type
+ = gnat_type_for_mode (TYPE_MODE (type), TYPE_UNSIGNED (type));
+ tree shift_expr
+ = convert (base_type,
+ size_binop (MINUS_EXPR,
+ bitsize_int
+ (GET_MODE_BITSIZE (TYPE_MODE (type))),
+ TYPE_RM_SIZE (type)));
+ expr
+ = convert (type,
+ build_binary_op (RSHIFT_EXPR, base_type,
+ build_binary_op (LSHIFT_EXPR, base_type,
+ convert (base_type, expr),
+ shift_expr),
+ shift_expr));
+ }
+
+ /* An unchecked conversion should never raise Constraint_Error. The code
+ below assumes that GCC's conversion routines overflow the same way that
+ the underlying hardware does. This is probably true. In the rare case
+ when it is false, we can rely on the fact that such conversions are
+ erroneous anyway. */
+ if (TREE_CODE (expr) == INTEGER_CST)
+ TREE_OVERFLOW (expr) = 0;
+
+ /* If the sizes of the types differ and this is an VIEW_CONVERT_EXPR,
+ show no longer constant. */
+ if (TREE_CODE (expr) == VIEW_CONVERT_EXPR
+ && !operand_equal_p (TYPE_SIZE_UNIT (type), TYPE_SIZE_UNIT (etype),
+ OEP_ONLY_CONST))
+ TREE_CONSTANT (expr) = 0;
+
+ return expr;
+}
+
+/* Return the appropriate GCC tree code for the specified GNAT_TYPE,
+ the latter being a record type as predicated by Is_Record_Type. */
+
+enum tree_code
+tree_code_for_record_type (Entity_Id gnat_type)
+{
+ Node_Id component_list, component;
+
+ /* Return UNION_TYPE if it's an Unchecked_Union whose non-discriminant
+ fields are all in the variant part. Otherwise, return RECORD_TYPE. */
+ if (!Is_Unchecked_Union (gnat_type))
+ return RECORD_TYPE;
+
+ gnat_type = Implementation_Base_Type (gnat_type);
+ component_list
+ = Component_List (Type_Definition (Declaration_Node (gnat_type)));
+
+ for (component = First_Non_Pragma (Component_Items (component_list));
+ Present (component);
+ component = Next_Non_Pragma (component))
+ if (Ekind (Defining_Entity (component)) == E_Component)
+ return RECORD_TYPE;
+
+ return UNION_TYPE;
+}
+
+/* Return true if GNAT_TYPE is a "double" floating-point type, i.e. whose
+ size is equal to 64 bits, or an array of such a type. Set ALIGN_CLAUSE
+ according to the presence of an alignment clause on the type or, if it
+ is an array, on the component type. */
+
+bool
+is_double_float_or_array (Entity_Id gnat_type, bool *align_clause)
+{
+ gnat_type = Underlying_Type (gnat_type);
+
+ *align_clause = Present (Alignment_Clause (gnat_type));
+
+ if (Is_Array_Type (gnat_type))
+ {
+ gnat_type = Underlying_Type (Component_Type (gnat_type));
+ if (Present (Alignment_Clause (gnat_type)))
+ *align_clause = true;
+ }
+
+ if (!Is_Floating_Point_Type (gnat_type))
+ return false;
+
+ if (UI_To_Int (Esize (gnat_type)) != 64)
+ return false;
+
+ return true;
+}
+
+/* Return true if GNAT_TYPE is a "double" or larger scalar type, i.e. whose
+ size is greater or equal to 64 bits, or an array of such a type. Set
+ ALIGN_CLAUSE according to the presence of an alignment clause on the
+ type or, if it is an array, on the component type. */
+
+bool
+is_double_scalar_or_array (Entity_Id gnat_type, bool *align_clause)
+{
+ gnat_type = Underlying_Type (gnat_type);
+
+ *align_clause = Present (Alignment_Clause (gnat_type));
+
+ if (Is_Array_Type (gnat_type))
+ {
+ gnat_type = Underlying_Type (Component_Type (gnat_type));
+ if (Present (Alignment_Clause (gnat_type)))
+ *align_clause = true;
+ }
+
+ if (!Is_Scalar_Type (gnat_type))
+ return false;
+
+ if (UI_To_Int (Esize (gnat_type)) < 64)
+ return false;
+
+ return true;
+}
+
+/* Return true if GNU_TYPE is suitable as the type of a non-aliased
+ component of an aggregate type. */
+
+bool
+type_for_nonaliased_component_p (tree gnu_type)
+{
+ /* If the type is passed by reference, we may have pointers to the
+ component so it cannot be made non-aliased. */
+ if (must_pass_by_ref (gnu_type) || default_pass_by_ref (gnu_type))
+ return false;
+
+ /* We used to say that any component of aggregate type is aliased
+ because the front-end may take 'Reference of it. The front-end
+ has been enhanced in the meantime so as to use a renaming instead
+ in most cases, but the back-end can probably take the address of
+ such a component too so we go for the conservative stance.
+
+ For instance, we might need the address of any array type, even
+ if normally passed by copy, to construct a fat pointer if the
+ component is used as an actual for an unconstrained formal.
+
+ Likewise for record types: even if a specific record subtype is
+ passed by copy, the parent type might be passed by ref (e.g. if
+ it's of variable size) and we might take the address of a child
+ component to pass to a parent formal. We have no way to check
+ for such conditions here. */
+ if (AGGREGATE_TYPE_P (gnu_type))
+ return false;
+
+ return true;
+}
+
+/* Return true if TYPE is a smaller form of ORIG_TYPE. */
+
+bool
+smaller_form_type_p (tree type, tree orig_type)
+{
+ tree size, osize;
+
+ /* We're not interested in variants here. */
+ if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig_type))
+ return false;
+
+ /* Like a variant, a packable version keeps the original TYPE_NAME. */
+ if (TYPE_NAME (type) != TYPE_NAME (orig_type))
+ return false;
+
+ size = TYPE_SIZE (type);
+ osize = TYPE_SIZE (orig_type);
+
+ if (!(TREE_CODE (size) == INTEGER_CST && TREE_CODE (osize) == INTEGER_CST))
+ return false;
+
+ return tree_int_cst_lt (size, osize) != 0;
+}
+
+/* Perform final processing on global variables. */
+
+static GTY (()) tree dummy_global;
+
+void
+gnat_write_global_declarations (void)
+{
+ unsigned int i;
+ tree iter;
+
+ /* If we have declared types as used at the global level, insert them in
+ the global hash table. We use a dummy variable for this purpose. */
+ if (types_used_by_cur_var_decl && !types_used_by_cur_var_decl->is_empty ())
+ {
+ struct varpool_node *node;
+ char *label;
+
+ ASM_FORMAT_PRIVATE_NAME (label, first_global_object_name, 0);
+ dummy_global
+ = build_decl (BUILTINS_LOCATION, VAR_DECL, get_identifier (label),
+ void_type_node);
+ TREE_STATIC (dummy_global) = 1;
+ TREE_ASM_WRITTEN (dummy_global) = 1;
+ node = varpool_node_for_decl (dummy_global);
+ node->force_output = 1;
+
+ while (!types_used_by_cur_var_decl->is_empty ())
+ {
+ tree t = types_used_by_cur_var_decl->pop ();
+ types_used_by_var_decl_insert (t, dummy_global);
+ }
+ }
+
+ /* Output debug information for all global type declarations first. This
+ ensures that global types whose compilation hasn't been finalized yet,
+ for example pointers to Taft amendment types, have their compilation
+ finalized in the right context. */
+ FOR_EACH_VEC_SAFE_ELT (global_decls, i, iter)
+ if (TREE_CODE (iter) == TYPE_DECL)
+ debug_hooks->global_decl (iter);
+
+ /* Proceed to optimize and emit assembly. */
+ finalize_compilation_unit ();
+
+ /* After cgraph has had a chance to emit everything that's going to
+ be emitted, output debug information for the rest of globals. */
+ if (!seen_error ())
+ {
+ timevar_push (TV_SYMOUT);
+ FOR_EACH_VEC_SAFE_ELT (global_decls, i, iter)
+ if (TREE_CODE (iter) != TYPE_DECL)
+ debug_hooks->global_decl (iter);
+ timevar_pop (TV_SYMOUT);
+ }
+}
+
+/* ************************************************************************
+ * * GCC builtins support *
+ * ************************************************************************ */
+
+/* The general scheme is fairly simple:
+
+ For each builtin function/type to be declared, gnat_install_builtins calls
+ internal facilities which eventually get to gnat_push_decl, which in turn
+ tracks the so declared builtin function decls in the 'builtin_decls' global
+ datastructure. When an Intrinsic subprogram declaration is processed, we
+ search this global datastructure to retrieve the associated BUILT_IN DECL
+ node. */
+
+/* Search the chain of currently available builtin declarations for a node
+ corresponding to function NAME (an IDENTIFIER_NODE). Return the first node
+ found, if any, or NULL_TREE otherwise. */
+tree
+builtin_decl_for (tree name)
+{
+ unsigned i;
+ tree decl;
+
+ FOR_EACH_VEC_SAFE_ELT (builtin_decls, i, decl)
+ if (DECL_NAME (decl) == name)
+ return decl;
+
+ return NULL_TREE;
+}
+
+/* The code below eventually exposes gnat_install_builtins, which declares
+ the builtin types and functions we might need, either internally or as
+ user accessible facilities.
+
+ ??? This is a first implementation shot, still in rough shape. It is
+ heavily inspired from the "C" family implementation, with chunks copied
+ verbatim from there.
+
+ Two obvious TODO candidates are
+ o Use a more efficient name/decl mapping scheme
+ o Devise a middle-end infrastructure to avoid having to copy
+ pieces between front-ends. */
+
+/* ----------------------------------------------------------------------- *
+ * BUILTIN ELEMENTARY TYPES *
+ * ----------------------------------------------------------------------- */
+
+/* Standard data types to be used in builtin argument declarations. */
+
+enum c_tree_index
+{
+ CTI_SIGNED_SIZE_TYPE, /* For format checking only. */
+ CTI_STRING_TYPE,
+ CTI_CONST_STRING_TYPE,
+
+ CTI_MAX
+};
+
+static tree c_global_trees[CTI_MAX];
+
+#define signed_size_type_node c_global_trees[CTI_SIGNED_SIZE_TYPE]
+#define string_type_node c_global_trees[CTI_STRING_TYPE]
+#define const_string_type_node c_global_trees[CTI_CONST_STRING_TYPE]
+
+/* ??? In addition some attribute handlers, we currently don't support a
+ (small) number of builtin-types, which in turns inhibits support for a
+ number of builtin functions. */
+#define wint_type_node void_type_node
+#define intmax_type_node void_type_node
+#define uintmax_type_node void_type_node
+
+/* Build the void_list_node (void_type_node having been created). */
+
+static tree
+build_void_list_node (void)
+{
+ tree t = build_tree_list (NULL_TREE, void_type_node);
+ return t;
+}
+
+/* Used to help initialize the builtin-types.def table. When a type of
+ the correct size doesn't exist, use error_mark_node instead of NULL.
+ The later results in segfaults even when a decl using the type doesn't
+ get invoked. */
+
+static tree
+builtin_type_for_size (int size, bool unsignedp)
+{
+ tree type = gnat_type_for_size (size, unsignedp);
+ return type ? type : error_mark_node;
+}
+
+/* Build/push the elementary type decls that builtin functions/types
+ will need. */
+
+static void
+install_builtin_elementary_types (void)
+{
+ signed_size_type_node = gnat_signed_type (size_type_node);
+ pid_type_node = integer_type_node;
+ void_list_node = build_void_list_node ();
+
+ string_type_node = build_pointer_type (char_type_node);
+ const_string_type_node
+ = build_pointer_type (build_qualified_type
+ (char_type_node, TYPE_QUAL_CONST));
+}
+
+/* ----------------------------------------------------------------------- *
+ * BUILTIN FUNCTION TYPES *
+ * ----------------------------------------------------------------------- */
+
+/* Now, builtin function types per se. */
+
+enum c_builtin_type
+{
+#define DEF_PRIMITIVE_TYPE(NAME, VALUE) NAME,
+#define DEF_FUNCTION_TYPE_0(NAME, RETURN) NAME,
+#define DEF_FUNCTION_TYPE_1(NAME, RETURN, ARG1) NAME,
+#define DEF_FUNCTION_TYPE_2(NAME, RETURN, ARG1, ARG2) NAME,
+#define DEF_FUNCTION_TYPE_3(NAME, RETURN, ARG1, ARG2, ARG3) NAME,
+#define DEF_FUNCTION_TYPE_4(NAME, RETURN, ARG1, ARG2, ARG3, ARG4) NAME,
+#define DEF_FUNCTION_TYPE_5(NAME, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5) NAME,
+#define DEF_FUNCTION_TYPE_6(NAME, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6) NAME,
+#define DEF_FUNCTION_TYPE_7(NAME, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7) NAME,
+#define DEF_FUNCTION_TYPE_8(NAME, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8) NAME,
+#define DEF_FUNCTION_TYPE_VAR_0(NAME, RETURN) NAME,
+#define DEF_FUNCTION_TYPE_VAR_1(NAME, RETURN, ARG1) NAME,
+#define DEF_FUNCTION_TYPE_VAR_2(NAME, RETURN, ARG1, ARG2) NAME,
+#define DEF_FUNCTION_TYPE_VAR_3(NAME, RETURN, ARG1, ARG2, ARG3) NAME,
+#define DEF_FUNCTION_TYPE_VAR_4(NAME, RETURN, ARG1, ARG2, ARG3, ARG4) NAME,
+#define DEF_FUNCTION_TYPE_VAR_5(NAME, RETURN, ARG1, ARG2, ARG3, ARG4, ARG6) \
+ NAME,
+#define DEF_POINTER_TYPE(NAME, TYPE) NAME,
+#include "builtin-types.def"
+#undef DEF_PRIMITIVE_TYPE
+#undef DEF_FUNCTION_TYPE_0
+#undef DEF_FUNCTION_TYPE_1
+#undef DEF_FUNCTION_TYPE_2
+#undef DEF_FUNCTION_TYPE_3
+#undef DEF_FUNCTION_TYPE_4
+#undef DEF_FUNCTION_TYPE_5
+#undef DEF_FUNCTION_TYPE_6
+#undef DEF_FUNCTION_TYPE_7
+#undef DEF_FUNCTION_TYPE_8
+#undef DEF_FUNCTION_TYPE_VAR_0
+#undef DEF_FUNCTION_TYPE_VAR_1
+#undef DEF_FUNCTION_TYPE_VAR_2
+#undef DEF_FUNCTION_TYPE_VAR_3
+#undef DEF_FUNCTION_TYPE_VAR_4
+#undef DEF_FUNCTION_TYPE_VAR_5
+#undef DEF_POINTER_TYPE
+ BT_LAST
+};
+
+typedef enum c_builtin_type builtin_type;
+
+/* A temporary array used in communication with def_fn_type. */
+static GTY(()) tree builtin_types[(int) BT_LAST + 1];
+
+/* A helper function for install_builtin_types. Build function type
+ for DEF with return type RET and N arguments. If VAR is true, then the
+ function should be variadic after those N arguments.
+
+ Takes special care not to ICE if any of the types involved are
+ error_mark_node, which indicates that said type is not in fact available
+ (see builtin_type_for_size). In which case the function type as a whole
+ should be error_mark_node. */
+
+static void
+def_fn_type (builtin_type def, builtin_type ret, bool var, int n, ...)
+{
+ tree t;
+ tree *args = XALLOCAVEC (tree, n);
+ va_list list;
+ int i;
+
+ va_start (list, n);
+ for (i = 0; i < n; ++i)
+ {
+ builtin_type a = (builtin_type) va_arg (list, int);
+ t = builtin_types[a];
+ if (t == error_mark_node)
+ goto egress;
+ args[i] = t;
+ }
+
+ t = builtin_types[ret];
+ if (t == error_mark_node)
+ goto egress;
+ if (var)
+ t = build_varargs_function_type_array (t, n, args);
+ else
+ t = build_function_type_array (t, n, args);
+
+ egress:
+ builtin_types[def] = t;
+ va_end (list);
+}
+
+/* Build the builtin function types and install them in the builtin_types
+ array for later use in builtin function decls. */
+
+static void
+install_builtin_function_types (void)
+{
+ tree va_list_ref_type_node;
+ tree va_list_arg_type_node;
+
+ if (TREE_CODE (va_list_type_node) == ARRAY_TYPE)
+ {
+ va_list_arg_type_node = va_list_ref_type_node =
+ build_pointer_type (TREE_TYPE (va_list_type_node));
+ }
+ else
+ {
+ va_list_arg_type_node = va_list_type_node;
+ va_list_ref_type_node = build_reference_type (va_list_type_node);
+ }
+
+#define DEF_PRIMITIVE_TYPE(ENUM, VALUE) \
+ builtin_types[ENUM] = VALUE;
+#define DEF_FUNCTION_TYPE_0(ENUM, RETURN) \
+ def_fn_type (ENUM, RETURN, 0, 0);
+#define DEF_FUNCTION_TYPE_1(ENUM, RETURN, ARG1) \
+ def_fn_type (ENUM, RETURN, 0, 1, ARG1);
+#define DEF_FUNCTION_TYPE_2(ENUM, RETURN, ARG1, ARG2) \
+ def_fn_type (ENUM, RETURN, 0, 2, ARG1, ARG2);
+#define DEF_FUNCTION_TYPE_3(ENUM, RETURN, ARG1, ARG2, ARG3) \
+ def_fn_type (ENUM, RETURN, 0, 3, ARG1, ARG2, ARG3);
+#define DEF_FUNCTION_TYPE_4(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4) \
+ def_fn_type (ENUM, RETURN, 0, 4, ARG1, ARG2, ARG3, ARG4);
+#define DEF_FUNCTION_TYPE_5(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5) \
+ def_fn_type (ENUM, RETURN, 0, 5, ARG1, ARG2, ARG3, ARG4, ARG5);
+#define DEF_FUNCTION_TYPE_6(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5, \
+ ARG6) \
+ def_fn_type (ENUM, RETURN, 0, 6, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
+#define DEF_FUNCTION_TYPE_7(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5, \
+ ARG6, ARG7) \
+ def_fn_type (ENUM, RETURN, 0, 7, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7);
+#define DEF_FUNCTION_TYPE_8(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5, \
+ ARG6, ARG7, ARG8) \
+ def_fn_type (ENUM, RETURN, 0, 8, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, \
+ ARG7, ARG8);
+#define DEF_FUNCTION_TYPE_VAR_0(ENUM, RETURN) \
+ def_fn_type (ENUM, RETURN, 1, 0);
+#define DEF_FUNCTION_TYPE_VAR_1(ENUM, RETURN, ARG1) \
+ def_fn_type (ENUM, RETURN, 1, 1, ARG1);
+#define DEF_FUNCTION_TYPE_VAR_2(ENUM, RETURN, ARG1, ARG2) \
+ def_fn_type (ENUM, RETURN, 1, 2, ARG1, ARG2);
+#define DEF_FUNCTION_TYPE_VAR_3(ENUM, RETURN, ARG1, ARG2, ARG3) \
+ def_fn_type (ENUM, RETURN, 1, 3, ARG1, ARG2, ARG3);
+#define DEF_FUNCTION_TYPE_VAR_4(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4) \
+ def_fn_type (ENUM, RETURN, 1, 4, ARG1, ARG2, ARG3, ARG4);
+#define DEF_FUNCTION_TYPE_VAR_5(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5) \
+ def_fn_type (ENUM, RETURN, 1, 5, ARG1, ARG2, ARG3, ARG4, ARG5);
+#define DEF_POINTER_TYPE(ENUM, TYPE) \
+ builtin_types[(int) ENUM] = build_pointer_type (builtin_types[(int) TYPE]);
+
+#include "builtin-types.def"
+
+#undef DEF_PRIMITIVE_TYPE
+#undef DEF_FUNCTION_TYPE_1
+#undef DEF_FUNCTION_TYPE_2
+#undef DEF_FUNCTION_TYPE_3
+#undef DEF_FUNCTION_TYPE_4
+#undef DEF_FUNCTION_TYPE_5
+#undef DEF_FUNCTION_TYPE_6
+#undef DEF_FUNCTION_TYPE_VAR_0
+#undef DEF_FUNCTION_TYPE_VAR_1
+#undef DEF_FUNCTION_TYPE_VAR_2
+#undef DEF_FUNCTION_TYPE_VAR_3
+#undef DEF_FUNCTION_TYPE_VAR_4
+#undef DEF_FUNCTION_TYPE_VAR_5
+#undef DEF_POINTER_TYPE
+ builtin_types[(int) BT_LAST] = NULL_TREE;
+}
+
+/* ----------------------------------------------------------------------- *
+ * BUILTIN ATTRIBUTES *
+ * ----------------------------------------------------------------------- */
+
+enum built_in_attribute
+{
+#define DEF_ATTR_NULL_TREE(ENUM) ENUM,
+#define DEF_ATTR_INT(ENUM, VALUE) ENUM,
+#define DEF_ATTR_STRING(ENUM, VALUE) ENUM,
+#define DEF_ATTR_IDENT(ENUM, STRING) ENUM,
+#define DEF_ATTR_TREE_LIST(ENUM, PURPOSE, VALUE, CHAIN) ENUM,
+#include "builtin-attrs.def"
+#undef DEF_ATTR_NULL_TREE
+#undef DEF_ATTR_INT
+#undef DEF_ATTR_STRING
+#undef DEF_ATTR_IDENT
+#undef DEF_ATTR_TREE_LIST
+ ATTR_LAST
+};
+
+static GTY(()) tree built_in_attributes[(int) ATTR_LAST];
+
+static void
+install_builtin_attributes (void)
+{
+ /* Fill in the built_in_attributes array. */
+#define DEF_ATTR_NULL_TREE(ENUM) \
+ built_in_attributes[(int) ENUM] = NULL_TREE;
+#define DEF_ATTR_INT(ENUM, VALUE) \
+ built_in_attributes[(int) ENUM] = build_int_cst (NULL_TREE, VALUE);
+#define DEF_ATTR_STRING(ENUM, VALUE) \
+ built_in_attributes[(int) ENUM] = build_string (strlen (VALUE), VALUE);
+#define DEF_ATTR_IDENT(ENUM, STRING) \
+ built_in_attributes[(int) ENUM] = get_identifier (STRING);
+#define DEF_ATTR_TREE_LIST(ENUM, PURPOSE, VALUE, CHAIN) \
+ built_in_attributes[(int) ENUM] \
+ = tree_cons (built_in_attributes[(int) PURPOSE], \
+ built_in_attributes[(int) VALUE], \
+ built_in_attributes[(int) CHAIN]);
+#include "builtin-attrs.def"
+#undef DEF_ATTR_NULL_TREE
+#undef DEF_ATTR_INT
+#undef DEF_ATTR_STRING
+#undef DEF_ATTR_IDENT
+#undef DEF_ATTR_TREE_LIST
+}
+
+/* Handle a "const" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_const_attribute (tree *node, tree ARG_UNUSED (name),
+ tree ARG_UNUSED (args), int ARG_UNUSED (flags),
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) == FUNCTION_DECL)
+ TREE_READONLY (*node) = 1;
+ else
+ *no_add_attrs = true;
+
+ return NULL_TREE;
+}
+
+/* Handle a "nothrow" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_nothrow_attribute (tree *node, tree ARG_UNUSED (name),
+ tree ARG_UNUSED (args), int ARG_UNUSED (flags),
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) == FUNCTION_DECL)
+ TREE_NOTHROW (*node) = 1;
+ else
+ *no_add_attrs = true;
+
+ return NULL_TREE;
+}
+
+/* Handle a "pure" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_pure_attribute (tree *node, tree name, tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) == FUNCTION_DECL)
+ DECL_PURE_P (*node) = 1;
+ /* ??? TODO: Support types. */
+ else
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle a "no vops" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_novops_attribute (tree *node, tree ARG_UNUSED (name),
+ tree ARG_UNUSED (args), int ARG_UNUSED (flags),
+ bool *ARG_UNUSED (no_add_attrs))
+{
+ gcc_assert (TREE_CODE (*node) == FUNCTION_DECL);
+ DECL_IS_NOVOPS (*node) = 1;
+ return NULL_TREE;
+}
+
+/* Helper for nonnull attribute handling; fetch the operand number
+ from the attribute argument list. */
+
+static bool
+get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp)
+{
+ /* Verify the arg number is a constant. */
+ if (TREE_CODE (arg_num_expr) != INTEGER_CST
+ || TREE_INT_CST_HIGH (arg_num_expr) != 0)
+ return false;
+
+ *valp = TREE_INT_CST_LOW (arg_num_expr);
+ return true;
+}
+
+/* Handle the "nonnull" attribute. */
+static tree
+handle_nonnull_attribute (tree *node, tree ARG_UNUSED (name),
+ tree args, int ARG_UNUSED (flags),
+ bool *no_add_attrs)
+{
+ tree type = *node;
+ unsigned HOST_WIDE_INT attr_arg_num;
+
+ /* If no arguments are specified, all pointer arguments should be
+ non-null. Verify a full prototype is given so that the arguments
+ will have the correct types when we actually check them later. */
+ if (!args)
+ {
+ if (!prototype_p (type))
+ {
+ error ("nonnull attribute without arguments on a non-prototype");
+ *no_add_attrs = true;
+ }
+ return NULL_TREE;
+ }
+
+ /* Argument list specified. Verify that each argument number references
+ a pointer argument. */
+ for (attr_arg_num = 1; args; args = TREE_CHAIN (args))
+ {
+ unsigned HOST_WIDE_INT arg_num = 0, ck_num;
+
+ if (!get_nonnull_operand (TREE_VALUE (args), &arg_num))
+ {
+ error ("nonnull argument has invalid operand number (argument %lu)",
+ (unsigned long) attr_arg_num);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ if (prototype_p (type))
+ {
+ function_args_iterator iter;
+ tree argument;
+
+ function_args_iter_init (&iter, type);
+ for (ck_num = 1; ; ck_num++, function_args_iter_next (&iter))
+ {
+ argument = function_args_iter_cond (&iter);
+ if (!argument || ck_num == arg_num)
+ break;
+ }
+
+ if (!argument
+ || TREE_CODE (argument) == VOID_TYPE)
+ {
+ error ("nonnull argument with out-of-range operand number "
+ "(argument %lu, operand %lu)",
+ (unsigned long) attr_arg_num, (unsigned long) arg_num);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ if (TREE_CODE (argument) != POINTER_TYPE)
+ {
+ error ("nonnull argument references non-pointer operand "
+ "(argument %lu, operand %lu)",
+ (unsigned long) attr_arg_num, (unsigned long) arg_num);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+ }
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle a "sentinel" attribute. */
+
+static tree
+handle_sentinel_attribute (tree *node, tree name, tree args,
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ if (!prototype_p (*node))
+ {
+ warning (OPT_Wattributes,
+ "%qs attribute requires prototypes with named arguments",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ else
+ {
+ if (!stdarg_p (*node))
+ {
+ warning (OPT_Wattributes,
+ "%qs attribute only applies to variadic functions",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ }
+
+ if (args)
+ {
+ tree position = TREE_VALUE (args);
+
+ if (TREE_CODE (position) != INTEGER_CST)
+ {
+ warning (0, "requested position is not an integer constant");
+ *no_add_attrs = true;
+ }
+ else
+ {
+ if (tree_int_cst_lt (position, integer_zero_node))
+ {
+ warning (0, "requested position is less than zero");
+ *no_add_attrs = true;
+ }
+ }
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle a "noreturn" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_noreturn_attribute (tree *node, tree name, tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ tree type = TREE_TYPE (*node);
+
+ /* See FIXME comment in c_common_attribute_table. */
+ if (TREE_CODE (*node) == FUNCTION_DECL)
+ TREE_THIS_VOLATILE (*node) = 1;
+ else if (TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE)
+ TREE_TYPE (*node)
+ = build_pointer_type
+ (build_type_variant (TREE_TYPE (type),
+ TYPE_READONLY (TREE_TYPE (type)), 1));
+ else
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle a "leaf" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_leaf_attribute (tree *node, tree name,
+ tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qE attribute ignored", name);
+ *no_add_attrs = true;
+ }
+ if (!TREE_PUBLIC (*node))
+ {
+ warning (OPT_Wattributes, "%qE attribute has no effect", name);
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle a "malloc" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_malloc_attribute (tree *node, tree name, tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) == FUNCTION_DECL
+ && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (*node))))
+ DECL_IS_MALLOC (*node) = 1;
+ else
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Fake handler for attributes we don't properly support. */
+
+tree
+fake_attribute_handler (tree * ARG_UNUSED (node),
+ tree ARG_UNUSED (name),
+ tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags),
+ bool * ARG_UNUSED (no_add_attrs))
+{
+ return NULL_TREE;
+}
+
+/* Handle a "type_generic" attribute. */
+
+static tree
+handle_type_generic_attribute (tree *node, tree ARG_UNUSED (name),
+ tree ARG_UNUSED (args), int ARG_UNUSED (flags),
+ bool * ARG_UNUSED (no_add_attrs))
+{
+ /* Ensure we have a function type. */
+ gcc_assert (TREE_CODE (*node) == FUNCTION_TYPE);
+
+ /* Ensure we have a variadic function. */
+ gcc_assert (!prototype_p (*node) || stdarg_p (*node));
+
+ return NULL_TREE;
+}
+
+/* Handle a "vector_size" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_vector_size_attribute (tree *node, tree name, tree args,
+ int ARG_UNUSED (flags),
+ bool *no_add_attrs)
+{
+ unsigned HOST_WIDE_INT vecsize, nunits;
+ enum machine_mode orig_mode;
+ tree type = *node, new_type, size;
+
+ *no_add_attrs = true;
+
+ size = TREE_VALUE (args);
+
+ if (!tree_fits_uhwi_p (size))
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ return NULL_TREE;
+ }
+
+ /* Get the vector size (in bytes). */
+ vecsize = tree_to_uhwi (size);
+
+ /* We need to provide for vector pointers, vector arrays, and
+ functions returning vectors. For example:
+
+ __attribute__((vector_size(16))) short *foo;
+
+ In this case, the mode is SI, but the type being modified is
+ HI, so we need to look further. */
+
+ while (POINTER_TYPE_P (type)
+ || TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+
+ /* Get the mode of the type being modified. */
+ orig_mode = TYPE_MODE (type);
+
+ if ((!INTEGRAL_TYPE_P (type)
+ && !SCALAR_FLOAT_TYPE_P (type)
+ && !FIXED_POINT_TYPE_P (type))
+ || (!SCALAR_FLOAT_MODE_P (orig_mode)
+ && GET_MODE_CLASS (orig_mode) != MODE_INT
+ && !ALL_SCALAR_FIXED_POINT_MODE_P (orig_mode))
+ || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
+ || TREE_CODE (type) == BOOLEAN_TYPE)
+ {
+ error ("invalid vector type for attribute %qs",
+ IDENTIFIER_POINTER (name));
+ return NULL_TREE;
+ }
+
+ if (vecsize % tree_to_uhwi (TYPE_SIZE_UNIT (type)))
+ {
+ error ("vector size not an integral multiple of component size");
+ return NULL;
+ }
+
+ if (vecsize == 0)
+ {
+ error ("zero vector size");
+ return NULL;
+ }
+
+ /* Calculate how many units fit in the vector. */
+ nunits = vecsize / tree_to_uhwi (TYPE_SIZE_UNIT (type));
+ if (nunits & (nunits - 1))
+ {
+ error ("number of components of the vector not a power of two");
+ return NULL_TREE;
+ }
+
+ new_type = build_vector_type (type, nunits);
+
+ /* Build back pointers if needed. */
+ *node = reconstruct_complex_type (*node, new_type);
+
+ return NULL_TREE;
+}
+
+/* Handle a "vector_type" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_vector_type_attribute (tree *node, tree name, tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags),
+ bool *no_add_attrs)
+{
+ /* Vector representative type and size. */
+ tree rep_type = *node;
+ tree rep_size = TYPE_SIZE_UNIT (rep_type);
+
+ /* Vector size in bytes and number of units. */
+ unsigned HOST_WIDE_INT vec_bytes, vec_units;
+
+ /* Vector element type and mode. */
+ tree elem_type;
+ enum machine_mode elem_mode;
+
+ *no_add_attrs = true;
+
+ if (TREE_CODE (rep_type) != ARRAY_TYPE)
+ {
+ error ("attribute %qs applies to array types only",
+ IDENTIFIER_POINTER (name));
+ return NULL_TREE;
+ }
+
+ /* Silently punt on variable sizes. We can't make vector types for them,
+ need to ignore them on front-end generated subtypes of unconstrained
+ bases, and this attribute is for binding implementors, not end-users, so
+ we should never get there from legitimate explicit uses. */
+
+ if (!tree_fits_uhwi_p (rep_size))
+ return NULL_TREE;
+
+ /* Get the element type/mode and check this is something we know
+ how to make vectors of. */
+
+ elem_type = TREE_TYPE (rep_type);
+ elem_mode = TYPE_MODE (elem_type);
+
+ if ((!INTEGRAL_TYPE_P (elem_type)
+ && !SCALAR_FLOAT_TYPE_P (elem_type)
+ && !FIXED_POINT_TYPE_P (elem_type))
+ || (!SCALAR_FLOAT_MODE_P (elem_mode)
+ && GET_MODE_CLASS (elem_mode) != MODE_INT
+ && !ALL_SCALAR_FIXED_POINT_MODE_P (elem_mode))
+ || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (elem_type)))
+ {
+ error ("invalid element type for attribute %qs",
+ IDENTIFIER_POINTER (name));
+ return NULL_TREE;
+ }
+
+ /* Sanity check the vector size and element type consistency. */
+
+ vec_bytes = tree_to_uhwi (rep_size);
+
+ if (vec_bytes % tree_to_uhwi (TYPE_SIZE_UNIT (elem_type)))
+ {
+ error ("vector size not an integral multiple of component size");
+ return NULL;
+ }
+
+ if (vec_bytes == 0)
+ {
+ error ("zero vector size");
+ return NULL;
+ }
+
+ vec_units = vec_bytes / tree_to_uhwi (TYPE_SIZE_UNIT (elem_type));
+ if (vec_units & (vec_units - 1))
+ {
+ error ("number of components of the vector not a power of two");
+ return NULL_TREE;
+ }
+
+ /* Build the vector type and replace. */
+
+ *node = build_vector_type (elem_type, vec_units);
+ TYPE_REPRESENTATIVE_ARRAY (*node) = rep_type;
+
+ return NULL_TREE;
+}
+
+/* ----------------------------------------------------------------------- *
+ * BUILTIN FUNCTIONS *
+ * ----------------------------------------------------------------------- */
+
+/* Worker for DEF_BUILTIN. Possibly define a builtin function with one or two
+ names. Does not declare a non-__builtin_ function if flag_no_builtin, or
+ if nonansi_p and flag_no_nonansi_builtin. */
+
+static void
+def_builtin_1 (enum built_in_function fncode,
+ const char *name,
+ enum built_in_class fnclass,
+ tree fntype, tree libtype,
+ bool both_p, bool fallback_p,
+ bool nonansi_p ATTRIBUTE_UNUSED,
+ tree fnattrs, bool implicit_p)
+{
+ tree decl;
+ const char *libname;
+
+ /* Preserve an already installed decl. It most likely was setup in advance
+ (e.g. as part of the internal builtins) for specific reasons. */
+ if (builtin_decl_explicit (fncode) != NULL_TREE)
+ return;
+
+ gcc_assert ((!both_p && !fallback_p)
+ || !strncmp (name, "__builtin_",
+ strlen ("__builtin_")));
+
+ libname = name + strlen ("__builtin_");
+ decl = add_builtin_function (name, fntype, fncode, fnclass,
+ (fallback_p ? libname : NULL),
+ fnattrs);
+ if (both_p)
+ /* ??? This is normally further controlled by command-line options
+ like -fno-builtin, but we don't have them for Ada. */
+ add_builtin_function (libname, libtype, fncode, fnclass,
+ NULL, fnattrs);
+
+ set_builtin_decl (fncode, decl, implicit_p);
+}
+
+static int flag_isoc94 = 0;
+static int flag_isoc99 = 0;
+
+/* Install what the common builtins.def offers. */
+
+static void
+install_builtin_functions (void)
+{
+#define DEF_BUILTIN(ENUM, NAME, CLASS, TYPE, LIBTYPE, BOTH_P, FALLBACK_P, \
+ NONANSI_P, ATTRS, IMPLICIT, COND) \
+ if (NAME && COND) \
+ def_builtin_1 (ENUM, NAME, CLASS, \
+ builtin_types[(int) TYPE], \
+ builtin_types[(int) LIBTYPE], \
+ BOTH_P, FALLBACK_P, NONANSI_P, \
+ built_in_attributes[(int) ATTRS], IMPLICIT);
+#include "builtins.def"
+#undef DEF_BUILTIN
+}
+
+/* ----------------------------------------------------------------------- *
+ * BUILTIN FUNCTIONS *
+ * ----------------------------------------------------------------------- */
+
+/* Install the builtin functions we might need. */
+
+void
+gnat_install_builtins (void)
+{
+ install_builtin_elementary_types ();
+ install_builtin_function_types ();
+ install_builtin_attributes ();
+
+ /* Install builtins used by generic middle-end pieces first. Some of these
+ know about internal specificities and control attributes accordingly, for
+ instance __builtin_alloca vs no-throw and -fstack-check. We will ignore
+ the generic definition from builtins.def. */
+ build_common_builtin_nodes ();
+
+ /* Now, install the target specific builtins, such as the AltiVec family on
+ ppc, and the common set as exposed by builtins.def. */
+ targetm.init_builtins ();
+ install_builtin_functions ();
+}
+
+#include "gt-ada-utils.h"
+#include "gtype-ada.h"
diff --git a/gcc-4.9/gcc/ada/gcc-interface/utils2.c b/gcc-4.9/gcc/ada/gcc-interface/utils2.c
new file mode 100644
index 000000000..dd4151b5b
--- /dev/null
+++ b/gcc-4.9/gcc/ada/gcc-interface/utils2.c
@@ -0,0 +1,2852 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * U T I L S 2 *
+ * *
+ * C Implementation File *
+ * *
+ * Copyright (C) 1992-2014, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License along with GCC; see the file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "varasm.h"
+#include "flags.h"
+#include "toplev.h"
+#include "ggc.h"
+#include "tree-inline.h"
+
+#include "ada.h"
+#include "types.h"
+#include "atree.h"
+#include "elists.h"
+#include "namet.h"
+#include "nlists.h"
+#include "snames.h"
+#include "stringt.h"
+#include "uintp.h"
+#include "fe.h"
+#include "sinfo.h"
+#include "einfo.h"
+#include "ada-tree.h"
+#include "gigi.h"
+
+/* Return the base type of TYPE. */
+
+tree
+get_base_type (tree type)
+{
+ if (TREE_CODE (type) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (type))
+ type = TREE_TYPE (TYPE_FIELDS (type));
+
+ while (TREE_TYPE (type)
+ && (TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == REAL_TYPE))
+ type = TREE_TYPE (type);
+
+ return type;
+}
+
+/* EXP is a GCC tree representing an address. See if we can find how
+ strictly the object at that address is aligned. Return that alignment
+ in bits. If we don't know anything about the alignment, return 0. */
+
+unsigned int
+known_alignment (tree exp)
+{
+ unsigned int this_alignment;
+ unsigned int lhs, rhs;
+
+ switch (TREE_CODE (exp))
+ {
+ CASE_CONVERT:
+ case VIEW_CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ /* Conversions between pointers and integers don't change the alignment
+ of the underlying object. */
+ this_alignment = known_alignment (TREE_OPERAND (exp, 0));
+ break;
+
+ case COMPOUND_EXPR:
+ /* The value of a COMPOUND_EXPR is that of it's second operand. */
+ this_alignment = known_alignment (TREE_OPERAND (exp, 1));
+ break;
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ /* If two address are added, the alignment of the result is the
+ minimum of the two alignments. */
+ lhs = known_alignment (TREE_OPERAND (exp, 0));
+ rhs = known_alignment (TREE_OPERAND (exp, 1));
+ this_alignment = MIN (lhs, rhs);
+ break;
+
+ case POINTER_PLUS_EXPR:
+ lhs = known_alignment (TREE_OPERAND (exp, 0));
+ rhs = known_alignment (TREE_OPERAND (exp, 1));
+ /* If we don't know the alignment of the offset, we assume that
+ of the base. */
+ if (rhs == 0)
+ this_alignment = lhs;
+ else
+ this_alignment = MIN (lhs, rhs);
+ break;
+
+ case COND_EXPR:
+ /* If there is a choice between two values, use the smallest one. */
+ lhs = known_alignment (TREE_OPERAND (exp, 1));
+ rhs = known_alignment (TREE_OPERAND (exp, 2));
+ this_alignment = MIN (lhs, rhs);
+ break;
+
+ case INTEGER_CST:
+ {
+ unsigned HOST_WIDE_INT c = TREE_INT_CST_LOW (exp);
+ /* The first part of this represents the lowest bit in the constant,
+ but it is originally in bytes, not bits. */
+ this_alignment = MIN (BITS_PER_UNIT * (c & -c), BIGGEST_ALIGNMENT);
+ }
+ break;
+
+ case MULT_EXPR:
+ /* If we know the alignment of just one side, use it. Otherwise,
+ use the product of the alignments. */
+ lhs = known_alignment (TREE_OPERAND (exp, 0));
+ rhs = known_alignment (TREE_OPERAND (exp, 1));
+
+ if (lhs == 0)
+ this_alignment = rhs;
+ else if (rhs == 0)
+ this_alignment = lhs;
+ else
+ this_alignment = MIN (lhs * rhs, BIGGEST_ALIGNMENT);
+ break;
+
+ case BIT_AND_EXPR:
+ /* A bit-and expression is as aligned as the maximum alignment of the
+ operands. We typically get here for a complex lhs and a constant
+ negative power of two on the rhs to force an explicit alignment, so
+ don't bother looking at the lhs. */
+ this_alignment = known_alignment (TREE_OPERAND (exp, 1));
+ break;
+
+ case ADDR_EXPR:
+ this_alignment = expr_align (TREE_OPERAND (exp, 0));
+ break;
+
+ case CALL_EXPR:
+ {
+ tree t = maybe_inline_call_in_expr (exp);
+ if (t)
+ return known_alignment (t);
+ }
+
+ /* Fall through... */
+
+ default:
+ /* For other pointer expressions, we assume that the pointed-to object
+ is at least as aligned as the pointed-to type. Beware that we can
+ have a dummy type here (e.g. a Taft Amendment type), for which the
+ alignment is meaningless and should be ignored. */
+ if (POINTER_TYPE_P (TREE_TYPE (exp))
+ && !TYPE_IS_DUMMY_P (TREE_TYPE (TREE_TYPE (exp))))
+ this_alignment = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (exp)));
+ else
+ this_alignment = 0;
+ break;
+ }
+
+ return this_alignment;
+}
+
+/* We have a comparison or assignment operation on two types, T1 and T2, which
+ are either both array types or both record types. T1 is assumed to be for
+ the left hand side operand, and T2 for the right hand side. Return the
+ type that both operands should be converted to for the operation, if any.
+ Otherwise return zero. */
+
+static tree
+find_common_type (tree t1, tree t2)
+{
+ /* ??? As of today, various constructs lead to here with types of different
+ sizes even when both constants (e.g. tagged types, packable vs regular
+ component types, padded vs unpadded types, ...). While some of these
+ would better be handled upstream (types should be made consistent before
+ calling into build_binary_op), some others are really expected and we
+ have to be careful. */
+
+ /* We must avoid writing more than what the target can hold if this is for
+ an assignment and the case of tagged types is handled in build_binary_op
+ so we use the lhs type if it is known to be smaller or of constant size
+ and the rhs type is not, whatever the modes. We also force t1 in case of
+ constant size equality to minimize occurrences of view conversions on the
+ lhs of an assignment, except for the case of record types with a variant
+ part on the lhs but not on the rhs to make the conversion simpler. */
+ if (TREE_CONSTANT (TYPE_SIZE (t1))
+ && (!TREE_CONSTANT (TYPE_SIZE (t2))
+ || tree_int_cst_lt (TYPE_SIZE (t1), TYPE_SIZE (t2))
+ || (TYPE_SIZE (t1) == TYPE_SIZE (t2)
+ && !(TREE_CODE (t1) == RECORD_TYPE
+ && TREE_CODE (t2) == RECORD_TYPE
+ && get_variant_part (t1) != NULL_TREE
+ && get_variant_part (t2) == NULL_TREE))))
+ return t1;
+
+ /* Otherwise, if the lhs type is non-BLKmode, use it. Note that we know
+ that we will not have any alignment problems since, if we did, the
+ non-BLKmode type could not have been used. */
+ if (TYPE_MODE (t1) != BLKmode)
+ return t1;
+
+ /* If the rhs type is of constant size, use it whatever the modes. At
+ this point it is known to be smaller, or of constant size and the
+ lhs type is not. */
+ if (TREE_CONSTANT (TYPE_SIZE (t2)))
+ return t2;
+
+ /* Otherwise, if the rhs type is non-BLKmode, use it. */
+ if (TYPE_MODE (t2) != BLKmode)
+ return t2;
+
+ /* In this case, both types have variable size and BLKmode. It's
+ probably best to leave the "type mismatch" because changing it
+ could cause a bad self-referential reference. */
+ return NULL_TREE;
+}
+
+/* Return an expression tree representing an equality comparison of A1 and A2,
+ two objects of type ARRAY_TYPE. The result should be of type RESULT_TYPE.
+
+ Two arrays are equal in one of two ways: (1) if both have zero length in
+ some dimension (not necessarily the same dimension) or (2) if the lengths
+ in each dimension are equal and the data is equal. We perform the length
+ tests in as efficient a manner as possible. */
+
+static tree
+compare_arrays (location_t loc, tree result_type, tree a1, tree a2)
+{
+ tree result = convert (result_type, boolean_true_node);
+ tree a1_is_null = convert (result_type, boolean_false_node);
+ tree a2_is_null = convert (result_type, boolean_false_node);
+ tree t1 = TREE_TYPE (a1);
+ tree t2 = TREE_TYPE (a2);
+ bool a1_side_effects_p = TREE_SIDE_EFFECTS (a1);
+ bool a2_side_effects_p = TREE_SIDE_EFFECTS (a2);
+ bool length_zero_p = false;
+
+ /* If either operand has side-effects, they have to be evaluated only once
+ in spite of the multiple references to the operand in the comparison. */
+ if (a1_side_effects_p)
+ a1 = gnat_protect_expr (a1);
+
+ if (a2_side_effects_p)
+ a2 = gnat_protect_expr (a2);
+
+ /* Process each dimension separately and compare the lengths. If any
+ dimension has a length known to be zero, set LENGTH_ZERO_P to true
+ in order to suppress the comparison of the data at the end. */
+ while (TREE_CODE (t1) == ARRAY_TYPE && TREE_CODE (t2) == ARRAY_TYPE)
+ {
+ tree lb1 = TYPE_MIN_VALUE (TYPE_DOMAIN (t1));
+ tree ub1 = TYPE_MAX_VALUE (TYPE_DOMAIN (t1));
+ tree lb2 = TYPE_MIN_VALUE (TYPE_DOMAIN (t2));
+ tree ub2 = TYPE_MAX_VALUE (TYPE_DOMAIN (t2));
+ tree length1 = size_binop (PLUS_EXPR, size_binop (MINUS_EXPR, ub1, lb1),
+ size_one_node);
+ tree length2 = size_binop (PLUS_EXPR, size_binop (MINUS_EXPR, ub2, lb2),
+ size_one_node);
+ tree comparison, this_a1_is_null, this_a2_is_null;
+
+ /* If the length of the first array is a constant, swap our operands
+ unless the length of the second array is the constant zero. */
+ if (TREE_CODE (length1) == INTEGER_CST && !integer_zerop (length2))
+ {
+ tree tem;
+ bool btem;
+
+ tem = a1, a1 = a2, a2 = tem;
+ tem = t1, t1 = t2, t2 = tem;
+ tem = lb1, lb1 = lb2, lb2 = tem;
+ tem = ub1, ub1 = ub2, ub2 = tem;
+ tem = length1, length1 = length2, length2 = tem;
+ tem = a1_is_null, a1_is_null = a2_is_null, a2_is_null = tem;
+ btem = a1_side_effects_p, a1_side_effects_p = a2_side_effects_p,
+ a2_side_effects_p = btem;
+ }
+
+ /* If the length of the second array is the constant zero, we can just
+ use the original stored bounds for the first array and see whether
+ last < first holds. */
+ if (integer_zerop (length2))
+ {
+ length_zero_p = true;
+
+ ub1 = TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t1)));
+ lb1 = TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t1)));
+
+ comparison = fold_build2_loc (loc, LT_EXPR, result_type, ub1, lb1);
+ comparison = SUBSTITUTE_PLACEHOLDER_IN_EXPR (comparison, a1);
+ if (EXPR_P (comparison))
+ SET_EXPR_LOCATION (comparison, loc);
+
+ this_a1_is_null = comparison;
+ this_a2_is_null = convert (result_type, boolean_true_node);
+ }
+
+ /* Otherwise, if the length is some other constant value, we know that
+ this dimension in the second array cannot be superflat, so we can
+ just use its length computed from the actual stored bounds. */
+ else if (TREE_CODE (length2) == INTEGER_CST)
+ {
+ tree bt;
+
+ ub1 = TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t1)));
+ lb1 = TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t1)));
+ /* Note that we know that UB2 and LB2 are constant and hence
+ cannot contain a PLACEHOLDER_EXPR. */
+ ub2 = TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t2)));
+ lb2 = TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t2)));
+ bt = get_base_type (TREE_TYPE (ub1));
+
+ comparison
+ = fold_build2_loc (loc, EQ_EXPR, result_type,
+ build_binary_op (MINUS_EXPR, bt, ub1, lb1),
+ build_binary_op (MINUS_EXPR, bt, ub2, lb2));
+ comparison = SUBSTITUTE_PLACEHOLDER_IN_EXPR (comparison, a1);
+ if (EXPR_P (comparison))
+ SET_EXPR_LOCATION (comparison, loc);
+
+ this_a1_is_null
+ = fold_build2_loc (loc, LT_EXPR, result_type, ub1, lb1);
+
+ this_a2_is_null = convert (result_type, boolean_false_node);
+ }
+
+ /* Otherwise, compare the computed lengths. */
+ else
+ {
+ length1 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (length1, a1);
+ length2 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (length2, a2);
+
+ comparison
+ = fold_build2_loc (loc, EQ_EXPR, result_type, length1, length2);
+
+ /* If the length expression is of the form (cond ? val : 0), assume
+ that cond is equivalent to (length != 0). That's guaranteed by
+ construction of the array types in gnat_to_gnu_entity. */
+ if (TREE_CODE (length1) == COND_EXPR
+ && integer_zerop (TREE_OPERAND (length1, 2)))
+ this_a1_is_null
+ = invert_truthvalue_loc (loc, TREE_OPERAND (length1, 0));
+ else
+ this_a1_is_null = fold_build2_loc (loc, EQ_EXPR, result_type,
+ length1, size_zero_node);
+
+ /* Likewise for the second array. */
+ if (TREE_CODE (length2) == COND_EXPR
+ && integer_zerop (TREE_OPERAND (length2, 2)))
+ this_a2_is_null
+ = invert_truthvalue_loc (loc, TREE_OPERAND (length2, 0));
+ else
+ this_a2_is_null = fold_build2_loc (loc, EQ_EXPR, result_type,
+ length2, size_zero_node);
+ }
+
+ /* Append expressions for this dimension to the final expressions. */
+ result = build_binary_op (TRUTH_ANDIF_EXPR, result_type,
+ result, comparison);
+
+ a1_is_null = build_binary_op (TRUTH_ORIF_EXPR, result_type,
+ this_a1_is_null, a1_is_null);
+
+ a2_is_null = build_binary_op (TRUTH_ORIF_EXPR, result_type,
+ this_a2_is_null, a2_is_null);
+
+ t1 = TREE_TYPE (t1);
+ t2 = TREE_TYPE (t2);
+ }
+
+ /* Unless the length of some dimension is known to be zero, compare the
+ data in the array. */
+ if (!length_zero_p)
+ {
+ tree type = find_common_type (TREE_TYPE (a1), TREE_TYPE (a2));
+ tree comparison;
+
+ if (type)
+ {
+ a1 = convert (type, a1),
+ a2 = convert (type, a2);
+ }
+
+ comparison = fold_build2_loc (loc, EQ_EXPR, result_type, a1, a2);
+
+ result
+ = build_binary_op (TRUTH_ANDIF_EXPR, result_type, result, comparison);
+ }
+
+ /* The result is also true if both sizes are zero. */
+ result = build_binary_op (TRUTH_ORIF_EXPR, result_type,
+ build_binary_op (TRUTH_ANDIF_EXPR, result_type,
+ a1_is_null, a2_is_null),
+ result);
+
+ /* If either operand has side-effects, they have to be evaluated before
+ starting the comparison above since the place they would be otherwise
+ evaluated could be wrong. */
+ if (a1_side_effects_p)
+ result = build2 (COMPOUND_EXPR, result_type, a1, result);
+
+ if (a2_side_effects_p)
+ result = build2 (COMPOUND_EXPR, result_type, a2, result);
+
+ return result;
+}
+
+/* Return an expression tree representing an equality comparison of P1 and P2,
+ two objects of fat pointer type. The result should be of type RESULT_TYPE.
+
+ Two fat pointers are equal in one of two ways: (1) if both have a null
+ pointer to the array or (2) if they contain the same couple of pointers.
+ We perform the comparison in as efficient a manner as possible. */
+
+static tree
+compare_fat_pointers (location_t loc, tree result_type, tree p1, tree p2)
+{
+ tree p1_array, p2_array, p1_bounds, p2_bounds, same_array, same_bounds;
+ tree p1_array_is_null, p2_array_is_null;
+
+ /* If either operand has side-effects, they have to be evaluated only once
+ in spite of the multiple references to the operand in the comparison. */
+ p1 = gnat_protect_expr (p1);
+ p2 = gnat_protect_expr (p2);
+
+ /* The constant folder doesn't fold fat pointer types so we do it here. */
+ if (TREE_CODE (p1) == CONSTRUCTOR)
+ p1_array = (*CONSTRUCTOR_ELTS (p1))[0].value;
+ else
+ p1_array = build_component_ref (p1, NULL_TREE,
+ TYPE_FIELDS (TREE_TYPE (p1)), true);
+
+ p1_array_is_null
+ = fold_build2_loc (loc, EQ_EXPR, result_type, p1_array,
+ fold_convert_loc (loc, TREE_TYPE (p1_array),
+ null_pointer_node));
+
+ if (TREE_CODE (p2) == CONSTRUCTOR)
+ p2_array = (*CONSTRUCTOR_ELTS (p2))[0].value;
+ else
+ p2_array = build_component_ref (p2, NULL_TREE,
+ TYPE_FIELDS (TREE_TYPE (p2)), true);
+
+ p2_array_is_null
+ = fold_build2_loc (loc, EQ_EXPR, result_type, p2_array,
+ fold_convert_loc (loc, TREE_TYPE (p2_array),
+ null_pointer_node));
+
+ /* If one of the pointers to the array is null, just compare the other. */
+ if (integer_zerop (p1_array))
+ return p2_array_is_null;
+ else if (integer_zerop (p2_array))
+ return p1_array_is_null;
+
+ /* Otherwise, do the fully-fledged comparison. */
+ same_array
+ = fold_build2_loc (loc, EQ_EXPR, result_type, p1_array, p2_array);
+
+ if (TREE_CODE (p1) == CONSTRUCTOR)
+ p1_bounds = (*CONSTRUCTOR_ELTS (p1))[1].value;
+ else
+ p1_bounds
+ = build_component_ref (p1, NULL_TREE,
+ DECL_CHAIN (TYPE_FIELDS (TREE_TYPE (p1))), true);
+
+ if (TREE_CODE (p2) == CONSTRUCTOR)
+ p2_bounds = (*CONSTRUCTOR_ELTS (p2))[1].value;
+ else
+ p2_bounds
+ = build_component_ref (p2, NULL_TREE,
+ DECL_CHAIN (TYPE_FIELDS (TREE_TYPE (p2))), true);
+
+ same_bounds
+ = fold_build2_loc (loc, EQ_EXPR, result_type, p1_bounds, p2_bounds);
+
+ /* P1_ARRAY == P2_ARRAY && (P1_ARRAY == NULL || P1_BOUNDS == P2_BOUNDS). */
+ return build_binary_op (TRUTH_ANDIF_EXPR, result_type, same_array,
+ build_binary_op (TRUTH_ORIF_EXPR, result_type,
+ p1_array_is_null, same_bounds));
+}
+
+/* Compute the result of applying OP_CODE to LHS and RHS, where both are of
+ type TYPE. We know that TYPE is a modular type with a nonbinary
+ modulus. */
+
+static tree
+nonbinary_modular_operation (enum tree_code op_code, tree type, tree lhs,
+ tree rhs)
+{
+ tree modulus = TYPE_MODULUS (type);
+ unsigned int needed_precision = tree_floor_log2 (modulus) + 1;
+ unsigned int precision;
+ bool unsignedp = true;
+ tree op_type = type;
+ tree result;
+
+ /* If this is an addition of a constant, convert it to a subtraction
+ of a constant since we can do that faster. */
+ if (op_code == PLUS_EXPR && TREE_CODE (rhs) == INTEGER_CST)
+ {
+ rhs = fold_build2 (MINUS_EXPR, type, modulus, rhs);
+ op_code = MINUS_EXPR;
+ }
+
+ /* For the logical operations, we only need PRECISION bits. For
+ addition and subtraction, we need one more and for multiplication we
+ need twice as many. But we never want to make a size smaller than
+ our size. */
+ if (op_code == PLUS_EXPR || op_code == MINUS_EXPR)
+ needed_precision += 1;
+ else if (op_code == MULT_EXPR)
+ needed_precision *= 2;
+
+ precision = MAX (needed_precision, TYPE_PRECISION (op_type));
+
+ /* Unsigned will do for everything but subtraction. */
+ if (op_code == MINUS_EXPR)
+ unsignedp = false;
+
+ /* If our type is the wrong signedness or isn't wide enough, make a new
+ type and convert both our operands to it. */
+ if (TYPE_PRECISION (op_type) < precision
+ || TYPE_UNSIGNED (op_type) != unsignedp)
+ {
+ /* Copy the node so we ensure it can be modified to make it modular. */
+ op_type = copy_node (gnat_type_for_size (precision, unsignedp));
+ modulus = convert (op_type, modulus);
+ SET_TYPE_MODULUS (op_type, modulus);
+ TYPE_MODULAR_P (op_type) = 1;
+ lhs = convert (op_type, lhs);
+ rhs = convert (op_type, rhs);
+ }
+
+ /* Do the operation, then we'll fix it up. */
+ result = fold_build2 (op_code, op_type, lhs, rhs);
+
+ /* For multiplication, we have no choice but to do a full modulus
+ operation. However, we want to do this in the narrowest
+ possible size. */
+ if (op_code == MULT_EXPR)
+ {
+ tree div_type = copy_node (gnat_type_for_size (needed_precision, 1));
+ modulus = convert (div_type, modulus);
+ SET_TYPE_MODULUS (div_type, modulus);
+ TYPE_MODULAR_P (div_type) = 1;
+ result = convert (op_type,
+ fold_build2 (TRUNC_MOD_EXPR, div_type,
+ convert (div_type, result), modulus));
+ }
+
+ /* For subtraction, add the modulus back if we are negative. */
+ else if (op_code == MINUS_EXPR)
+ {
+ result = gnat_protect_expr (result);
+ result = fold_build3 (COND_EXPR, op_type,
+ fold_build2 (LT_EXPR, boolean_type_node, result,
+ convert (op_type, integer_zero_node)),
+ fold_build2 (PLUS_EXPR, op_type, result, modulus),
+ result);
+ }
+
+ /* For the other operations, subtract the modulus if we are >= it. */
+ else
+ {
+ result = gnat_protect_expr (result);
+ result = fold_build3 (COND_EXPR, op_type,
+ fold_build2 (GE_EXPR, boolean_type_node,
+ result, modulus),
+ fold_build2 (MINUS_EXPR, op_type,
+ result, modulus),
+ result);
+ }
+
+ return convert (type, result);
+}
+
+/* This page contains routines that implement the Ada semantics with regard
+ to atomic objects. They are fully piggybacked on the middle-end support
+ for atomic loads and stores.
+
+ *** Memory barriers and volatile objects ***
+
+ We implement the weakened form of the C.6(16) clause that was introduced
+ in Ada 2012 (AI05-117). Earlier forms of this clause wouldn't have been
+ implementable without significant performance hits on modern platforms.
+
+ We also take advantage of the requirements imposed on shared variables by
+ 9.10 (conditions for sequential actions) to have non-erroneous execution
+ and consider that C.6(16) and C.6(17) only prescribe an uniform order of
+ volatile updates with regard to sequential actions, i.e. with regard to
+ reads or updates of atomic objects.
+
+ As such, an update of an atomic object by a task requires that all earlier
+ accesses to volatile objects have completed. Similarly, later accesses to
+ volatile objects cannot be reordered before the update of the atomic object.
+ So, memory barriers both before and after the atomic update are needed.
+
+ For a read of an atomic object, to avoid seeing writes of volatile objects
+ by a task earlier than by the other tasks, a memory barrier is needed before
+ the atomic read. Finally, to avoid reordering later reads or updates of
+ volatile objects to before the atomic read, a barrier is needed after the
+ atomic read.
+
+ So, memory barriers are needed before and after atomic reads and updates.
+ And, in order to simplify the implementation, we use full memory barriers
+ in all cases, i.e. we enforce sequential consistency for atomic accesses. */
+
+/* Return the size of TYPE, which must be a positive power of 2. */
+
+static unsigned int
+resolve_atomic_size (tree type)
+{
+ unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE_UNIT (type));
+
+ if (size == 1 || size == 2 || size == 4 || size == 8 || size == 16)
+ return size;
+
+ /* We shouldn't reach here without having already detected that the size
+ isn't compatible with an atomic access. */
+ gcc_assert (Serious_Errors_Detected);
+
+ return 0;
+}
+
+/* Build an atomic load for the underlying atomic object in SRC. */
+
+tree
+build_atomic_load (tree src)
+{
+ tree ptr_type
+ = build_pointer_type
+ (build_qualified_type (void_type_node, TYPE_QUAL_VOLATILE));
+ tree mem_model = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST);
+ tree orig_src = src;
+ tree t, addr, val;
+ unsigned int size;
+ int fncode;
+
+ /* Remove conversions to get the address of the underlying object. */
+ src = remove_conversions (src, false);
+ size = resolve_atomic_size (TREE_TYPE (src));
+ if (size == 0)
+ return orig_src;
+
+ fncode = (int) BUILT_IN_ATOMIC_LOAD_N + exact_log2 (size) + 1;
+ t = builtin_decl_implicit ((enum built_in_function) fncode);
+
+ addr = build_unary_op (ADDR_EXPR, ptr_type, src);
+ val = build_call_expr (t, 2, addr, mem_model);
+
+ /* First reinterpret the loaded bits in the original type of the load,
+ then convert to the expected result type. */
+ t = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (src), val);
+ return convert (TREE_TYPE (orig_src), t);
+}
+
+/* Build an atomic store from SRC to the underlying atomic object in DEST. */
+
+tree
+build_atomic_store (tree dest, tree src)
+{
+ tree ptr_type
+ = build_pointer_type
+ (build_qualified_type (void_type_node, TYPE_QUAL_VOLATILE));
+ tree mem_model = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST);
+ tree orig_dest = dest;
+ tree t, int_type, addr;
+ unsigned int size;
+ int fncode;
+
+ /* Remove conversions to get the address of the underlying object. */
+ dest = remove_conversions (dest, false);
+ size = resolve_atomic_size (TREE_TYPE (dest));
+ if (size == 0)
+ return build_binary_op (MODIFY_EXPR, NULL_TREE, orig_dest, src);
+
+ fncode = (int) BUILT_IN_ATOMIC_STORE_N + exact_log2 (size) + 1;
+ t = builtin_decl_implicit ((enum built_in_function) fncode);
+ int_type = gnat_type_for_size (BITS_PER_UNIT * size, 1);
+
+ /* First convert the bits to be stored to the original type of the store,
+ then reinterpret them in the effective type. But if the original type
+ is a padded type with the same size, convert to the inner type instead,
+ as we don't want to artificially introduce a CONSTRUCTOR here. */
+ if (TYPE_IS_PADDING_P (TREE_TYPE (dest))
+ && TYPE_SIZE (TREE_TYPE (dest))
+ == TYPE_SIZE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (dest)))))
+ src = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (dest))), src);
+ else
+ src = convert (TREE_TYPE (dest), src);
+ src = fold_build1 (VIEW_CONVERT_EXPR, int_type, src);
+ addr = build_unary_op (ADDR_EXPR, ptr_type, dest);
+
+ return build_call_expr (t, 3, addr, src, mem_model);
+}
+
+/* Make a binary operation of kind OP_CODE. RESULT_TYPE is the type
+ desired for the result. Usually the operation is to be performed
+ in that type. For INIT_EXPR and MODIFY_EXPR, RESULT_TYPE must be
+ NULL_TREE. For ARRAY_REF, RESULT_TYPE may be NULL_TREE, in which
+ case the type to be used will be derived from the operands.
+
+ This function is very much unlike the ones for C and C++ since we
+ have already done any type conversion and matching required. All we
+ have to do here is validate the work done by SEM and handle subtypes. */
+
+tree
+build_binary_op (enum tree_code op_code, tree result_type,
+ tree left_operand, tree right_operand)
+{
+ tree left_type = TREE_TYPE (left_operand);
+ tree right_type = TREE_TYPE (right_operand);
+ tree left_base_type = get_base_type (left_type);
+ tree right_base_type = get_base_type (right_type);
+ tree operation_type = result_type;
+ tree best_type = NULL_TREE;
+ tree modulus, result;
+ bool has_side_effects = false;
+
+ if (operation_type
+ && TREE_CODE (operation_type) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (operation_type))
+ operation_type = TREE_TYPE (TYPE_FIELDS (operation_type));
+
+ if (operation_type
+ && TREE_CODE (operation_type) == INTEGER_TYPE
+ && TYPE_EXTRA_SUBTYPE_P (operation_type))
+ operation_type = get_base_type (operation_type);
+
+ modulus = (operation_type
+ && TREE_CODE (operation_type) == INTEGER_TYPE
+ && TYPE_MODULAR_P (operation_type)
+ ? TYPE_MODULUS (operation_type) : NULL_TREE);
+
+ switch (op_code)
+ {
+ case INIT_EXPR:
+ case MODIFY_EXPR:
+#ifdef ENABLE_CHECKING
+ gcc_assert (result_type == NULL_TREE);
+#endif
+ /* If there were integral or pointer conversions on the LHS, remove
+ them; we'll be putting them back below if needed. Likewise for
+ conversions between array and record types, except for justified
+ modular types. But don't do this if the right operand is not
+ BLKmode (for packed arrays) unless we are not changing the mode. */
+ while ((CONVERT_EXPR_P (left_operand)
+ || TREE_CODE (left_operand) == VIEW_CONVERT_EXPR)
+ && (((INTEGRAL_TYPE_P (left_type)
+ || POINTER_TYPE_P (left_type))
+ && (INTEGRAL_TYPE_P (TREE_TYPE
+ (TREE_OPERAND (left_operand, 0)))
+ || POINTER_TYPE_P (TREE_TYPE
+ (TREE_OPERAND (left_operand, 0)))))
+ || (((TREE_CODE (left_type) == RECORD_TYPE
+ && !TYPE_JUSTIFIED_MODULAR_P (left_type))
+ || TREE_CODE (left_type) == ARRAY_TYPE)
+ && ((TREE_CODE (TREE_TYPE
+ (TREE_OPERAND (left_operand, 0)))
+ == RECORD_TYPE)
+ || (TREE_CODE (TREE_TYPE
+ (TREE_OPERAND (left_operand, 0)))
+ == ARRAY_TYPE))
+ && (TYPE_MODE (right_type) == BLKmode
+ || (TYPE_MODE (left_type)
+ == TYPE_MODE (TREE_TYPE
+ (TREE_OPERAND
+ (left_operand, 0))))))))
+ {
+ left_operand = TREE_OPERAND (left_operand, 0);
+ left_type = TREE_TYPE (left_operand);
+ }
+
+ /* If a class-wide type may be involved, force use of the RHS type. */
+ if ((TREE_CODE (right_type) == RECORD_TYPE
+ || TREE_CODE (right_type) == UNION_TYPE)
+ && TYPE_ALIGN_OK (right_type))
+ operation_type = right_type;
+
+ /* If we are copying between padded objects with compatible types, use
+ the padded view of the objects, this is very likely more efficient.
+ Likewise for a padded object that is assigned a constructor, if we
+ can convert the constructor to the inner type, to avoid putting a
+ VIEW_CONVERT_EXPR on the LHS. But don't do so if we wouldn't have
+ actually copied anything. */
+ else if (TYPE_IS_PADDING_P (left_type)
+ && TREE_CONSTANT (TYPE_SIZE (left_type))
+ && ((TREE_CODE (right_operand) == COMPONENT_REF
+ && TYPE_MAIN_VARIANT (left_type)
+ == TYPE_MAIN_VARIANT
+ (TREE_TYPE (TREE_OPERAND (right_operand, 0))))
+ || (TREE_CODE (right_operand) == CONSTRUCTOR
+ && !CONTAINS_PLACEHOLDER_P
+ (DECL_SIZE (TYPE_FIELDS (left_type)))))
+ && !integer_zerop (TYPE_SIZE (right_type)))
+ {
+ /* We make an exception for a BLKmode type padding a non-BLKmode
+ inner type and do the conversion of the LHS right away, since
+ unchecked_convert wouldn't do it properly. */
+ if (TYPE_MODE (left_type) == BLKmode
+ && TYPE_MODE (right_type) != BLKmode
+ && TREE_CODE (right_operand) != CONSTRUCTOR)
+ {
+ operation_type = right_type;
+ left_operand = convert (operation_type, left_operand);
+ left_type = operation_type;
+ }
+ else
+ operation_type = left_type;
+ }
+
+ /* If we have a call to a function that returns an unconstrained type
+ with default discriminant on the RHS, use the RHS type (which is
+ padded) as we cannot compute the size of the actual assignment. */
+ else if (TREE_CODE (right_operand) == CALL_EXPR
+ && TYPE_IS_PADDING_P (right_type)
+ && CONTAINS_PLACEHOLDER_P
+ (TYPE_SIZE (TREE_TYPE (TYPE_FIELDS (right_type)))))
+ operation_type = right_type;
+
+ /* Find the best type to use for copying between aggregate types. */
+ else if (((TREE_CODE (left_type) == ARRAY_TYPE
+ && TREE_CODE (right_type) == ARRAY_TYPE)
+ || (TREE_CODE (left_type) == RECORD_TYPE
+ && TREE_CODE (right_type) == RECORD_TYPE))
+ && (best_type = find_common_type (left_type, right_type)))
+ operation_type = best_type;
+
+ /* Otherwise use the LHS type. */
+ else
+ operation_type = left_type;
+
+ /* Ensure everything on the LHS is valid. If we have a field reference,
+ strip anything that get_inner_reference can handle. Then remove any
+ conversions between types having the same code and mode. And mark
+ VIEW_CONVERT_EXPRs with TREE_ADDRESSABLE. When done, we must have
+ either an INDIRECT_REF, a NULL_EXPR or a DECL node. */
+ result = left_operand;
+ while (true)
+ {
+ tree restype = TREE_TYPE (result);
+
+ if (TREE_CODE (result) == COMPONENT_REF
+ || TREE_CODE (result) == ARRAY_REF
+ || TREE_CODE (result) == ARRAY_RANGE_REF)
+ while (handled_component_p (result))
+ result = TREE_OPERAND (result, 0);
+ else if (TREE_CODE (result) == REALPART_EXPR
+ || TREE_CODE (result) == IMAGPART_EXPR
+ || (CONVERT_EXPR_P (result)
+ && (((TREE_CODE (restype)
+ == TREE_CODE (TREE_TYPE
+ (TREE_OPERAND (result, 0))))
+ && (TYPE_MODE (TREE_TYPE
+ (TREE_OPERAND (result, 0)))
+ == TYPE_MODE (restype)))
+ || TYPE_ALIGN_OK (restype))))
+ result = TREE_OPERAND (result, 0);
+ else if (TREE_CODE (result) == VIEW_CONVERT_EXPR)
+ {
+ TREE_ADDRESSABLE (result) = 1;
+ result = TREE_OPERAND (result, 0);
+ }
+ else
+ break;
+ }
+
+ gcc_assert (TREE_CODE (result) == INDIRECT_REF
+ || TREE_CODE (result) == NULL_EXPR
+ || DECL_P (result));
+
+ /* Convert the right operand to the operation type unless it is
+ either already of the correct type or if the type involves a
+ placeholder, since the RHS may not have the same record type. */
+ if (operation_type != right_type
+ && !CONTAINS_PLACEHOLDER_P (TYPE_SIZE (operation_type)))
+ {
+ right_operand = convert (operation_type, right_operand);
+ right_type = operation_type;
+ }
+
+ /* If the left operand is not of the same type as the operation
+ type, wrap it up in a VIEW_CONVERT_EXPR. */
+ if (left_type != operation_type)
+ left_operand = unchecked_convert (operation_type, left_operand, false);
+
+ has_side_effects = true;
+ modulus = NULL_TREE;
+ break;
+
+ case ARRAY_REF:
+ if (!operation_type)
+ operation_type = TREE_TYPE (left_type);
+
+ /* ... fall through ... */
+
+ case ARRAY_RANGE_REF:
+ /* First look through conversion between type variants. Note that
+ this changes neither the operation type nor the type domain. */
+ if (TREE_CODE (left_operand) == VIEW_CONVERT_EXPR
+ && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (left_operand, 0)))
+ == TYPE_MAIN_VARIANT (left_type))
+ {
+ left_operand = TREE_OPERAND (left_operand, 0);
+ left_type = TREE_TYPE (left_operand);
+ }
+
+ /* For a range, make sure the element type is consistent. */
+ if (op_code == ARRAY_RANGE_REF
+ && TREE_TYPE (operation_type) != TREE_TYPE (left_type))
+ operation_type = build_array_type (TREE_TYPE (left_type),
+ TYPE_DOMAIN (operation_type));
+
+ /* Then convert the right operand to its base type. This will prevent
+ unneeded sign conversions when sizetype is wider than integer. */
+ right_operand = convert (right_base_type, right_operand);
+ right_operand = convert_to_index_type (right_operand);
+ modulus = NULL_TREE;
+ break;
+
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_XOR_EXPR:
+#ifdef ENABLE_CHECKING
+ gcc_assert (TREE_CODE (get_base_type (result_type)) == BOOLEAN_TYPE);
+#endif
+ operation_type = left_base_type;
+ left_operand = convert (operation_type, left_operand);
+ right_operand = convert (operation_type, right_operand);
+ break;
+
+ case GE_EXPR:
+ case LE_EXPR:
+ case GT_EXPR:
+ case LT_EXPR:
+ case EQ_EXPR:
+ case NE_EXPR:
+#ifdef ENABLE_CHECKING
+ gcc_assert (TREE_CODE (get_base_type (result_type)) == BOOLEAN_TYPE);
+#endif
+ /* If either operand is a NULL_EXPR, just return a new one. */
+ if (TREE_CODE (left_operand) == NULL_EXPR)
+ return build2 (op_code, result_type,
+ build1 (NULL_EXPR, integer_type_node,
+ TREE_OPERAND (left_operand, 0)),
+ integer_zero_node);
+
+ else if (TREE_CODE (right_operand) == NULL_EXPR)
+ return build2 (op_code, result_type,
+ build1 (NULL_EXPR, integer_type_node,
+ TREE_OPERAND (right_operand, 0)),
+ integer_zero_node);
+
+ /* If either object is a justified modular types, get the
+ fields from within. */
+ if (TREE_CODE (left_type) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (left_type))
+ {
+ left_operand = convert (TREE_TYPE (TYPE_FIELDS (left_type)),
+ left_operand);
+ left_type = TREE_TYPE (left_operand);
+ left_base_type = get_base_type (left_type);
+ }
+
+ if (TREE_CODE (right_type) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (right_type))
+ {
+ right_operand = convert (TREE_TYPE (TYPE_FIELDS (right_type)),
+ right_operand);
+ right_type = TREE_TYPE (right_operand);
+ right_base_type = get_base_type (right_type);
+ }
+
+ /* If both objects are arrays, compare them specially. */
+ if ((TREE_CODE (left_type) == ARRAY_TYPE
+ || (TREE_CODE (left_type) == INTEGER_TYPE
+ && TYPE_HAS_ACTUAL_BOUNDS_P (left_type)))
+ && (TREE_CODE (right_type) == ARRAY_TYPE
+ || (TREE_CODE (right_type) == INTEGER_TYPE
+ && TYPE_HAS_ACTUAL_BOUNDS_P (right_type))))
+ {
+ result = compare_arrays (input_location,
+ result_type, left_operand, right_operand);
+ if (op_code == NE_EXPR)
+ result = invert_truthvalue_loc (EXPR_LOCATION (result), result);
+ else
+ gcc_assert (op_code == EQ_EXPR);
+
+ return result;
+ }
+
+ /* Otherwise, the base types must be the same, unless they are both fat
+ pointer types or record types. In the latter case, use the best type
+ and convert both operands to that type. */
+ if (left_base_type != right_base_type)
+ {
+ if (TYPE_IS_FAT_POINTER_P (left_base_type)
+ && TYPE_IS_FAT_POINTER_P (right_base_type))
+ {
+ gcc_assert (TYPE_MAIN_VARIANT (left_base_type)
+ == TYPE_MAIN_VARIANT (right_base_type));
+ best_type = left_base_type;
+ }
+
+ else if (TREE_CODE (left_base_type) == RECORD_TYPE
+ && TREE_CODE (right_base_type) == RECORD_TYPE)
+ {
+ /* The only way this is permitted is if both types have the same
+ name. In that case, one of them must not be self-referential.
+ Use it as the best type. Even better with a fixed size. */
+ gcc_assert (TYPE_NAME (left_base_type)
+ && TYPE_NAME (left_base_type)
+ == TYPE_NAME (right_base_type));
+
+ if (TREE_CONSTANT (TYPE_SIZE (left_base_type)))
+ best_type = left_base_type;
+ else if (TREE_CONSTANT (TYPE_SIZE (right_base_type)))
+ best_type = right_base_type;
+ else if (!CONTAINS_PLACEHOLDER_P (TYPE_SIZE (left_base_type)))
+ best_type = left_base_type;
+ else if (!CONTAINS_PLACEHOLDER_P (TYPE_SIZE (right_base_type)))
+ best_type = right_base_type;
+ else
+ gcc_unreachable ();
+ }
+
+ else
+ gcc_unreachable ();
+
+ left_operand = convert (best_type, left_operand);
+ right_operand = convert (best_type, right_operand);
+ }
+ else
+ {
+ left_operand = convert (left_base_type, left_operand);
+ right_operand = convert (right_base_type, right_operand);
+ }
+
+ /* If both objects are fat pointers, compare them specially. */
+ if (TYPE_IS_FAT_POINTER_P (left_base_type))
+ {
+ result
+ = compare_fat_pointers (input_location,
+ result_type, left_operand, right_operand);
+ if (op_code == NE_EXPR)
+ result = invert_truthvalue_loc (EXPR_LOCATION (result), result);
+ else
+ gcc_assert (op_code == EQ_EXPR);
+
+ return result;
+ }
+
+ modulus = NULL_TREE;
+ break;
+
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ /* The RHS of a shift can be any type. Also, ignore any modulus
+ (we used to abort, but this is needed for unchecked conversion
+ to modular types). Otherwise, processing is the same as normal. */
+ gcc_assert (operation_type == left_base_type);
+ modulus = NULL_TREE;
+ left_operand = convert (operation_type, left_operand);
+ break;
+
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ /* For binary modulus, if the inputs are in range, so are the
+ outputs. */
+ if (modulus && integer_pow2p (modulus))
+ modulus = NULL_TREE;
+ goto common;
+
+ case COMPLEX_EXPR:
+ gcc_assert (TREE_TYPE (result_type) == left_base_type
+ && TREE_TYPE (result_type) == right_base_type);
+ left_operand = convert (left_base_type, left_operand);
+ right_operand = convert (right_base_type, right_operand);
+ break;
+
+ case TRUNC_DIV_EXPR: case TRUNC_MOD_EXPR:
+ case CEIL_DIV_EXPR: case CEIL_MOD_EXPR:
+ case FLOOR_DIV_EXPR: case FLOOR_MOD_EXPR:
+ case ROUND_DIV_EXPR: case ROUND_MOD_EXPR:
+ /* These always produce results lower than either operand. */
+ modulus = NULL_TREE;
+ goto common;
+
+ case POINTER_PLUS_EXPR:
+ gcc_assert (operation_type == left_base_type
+ && sizetype == right_base_type);
+ left_operand = convert (operation_type, left_operand);
+ right_operand = convert (sizetype, right_operand);
+ break;
+
+ case PLUS_NOMOD_EXPR:
+ case MINUS_NOMOD_EXPR:
+ if (op_code == PLUS_NOMOD_EXPR)
+ op_code = PLUS_EXPR;
+ else
+ op_code = MINUS_EXPR;
+ modulus = NULL_TREE;
+
+ /* ... fall through ... */
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ /* Avoid doing arithmetics in ENUMERAL_TYPE or BOOLEAN_TYPE like the
+ other compilers. Contrary to C, Ada doesn't allow arithmetics in
+ these types but can generate addition/subtraction for Succ/Pred. */
+ if (operation_type
+ && (TREE_CODE (operation_type) == ENUMERAL_TYPE
+ || TREE_CODE (operation_type) == BOOLEAN_TYPE))
+ operation_type = left_base_type = right_base_type
+ = gnat_type_for_mode (TYPE_MODE (operation_type),
+ TYPE_UNSIGNED (operation_type));
+
+ /* ... fall through ... */
+
+ default:
+ common:
+ /* The result type should be the same as the base types of the
+ both operands (and they should be the same). Convert
+ everything to the result type. */
+
+ gcc_assert (operation_type == left_base_type
+ && left_base_type == right_base_type);
+ left_operand = convert (operation_type, left_operand);
+ right_operand = convert (operation_type, right_operand);
+ }
+
+ if (modulus && !integer_pow2p (modulus))
+ {
+ result = nonbinary_modular_operation (op_code, operation_type,
+ left_operand, right_operand);
+ modulus = NULL_TREE;
+ }
+ /* If either operand is a NULL_EXPR, just return a new one. */
+ else if (TREE_CODE (left_operand) == NULL_EXPR)
+ return build1 (NULL_EXPR, operation_type, TREE_OPERAND (left_operand, 0));
+ else if (TREE_CODE (right_operand) == NULL_EXPR)
+ return build1 (NULL_EXPR, operation_type, TREE_OPERAND (right_operand, 0));
+ else if (op_code == ARRAY_REF || op_code == ARRAY_RANGE_REF)
+ result = fold (build4 (op_code, operation_type, left_operand,
+ right_operand, NULL_TREE, NULL_TREE));
+ else if (op_code == INIT_EXPR || op_code == MODIFY_EXPR)
+ result = build2 (op_code, void_type_node, left_operand, right_operand);
+ else
+ result
+ = fold_build2 (op_code, operation_type, left_operand, right_operand);
+
+ if (TREE_CONSTANT (result))
+ ;
+ else if (op_code == ARRAY_REF || op_code == ARRAY_RANGE_REF)
+ {
+ TREE_THIS_NOTRAP (result) = 1;
+ if (TYPE_VOLATILE (operation_type))
+ TREE_THIS_VOLATILE (result) = 1;
+ }
+ else
+ TREE_CONSTANT (result)
+ |= (TREE_CONSTANT (left_operand) && TREE_CONSTANT (right_operand));
+
+ TREE_SIDE_EFFECTS (result) |= has_side_effects;
+
+ /* If we are working with modular types, perform the MOD operation
+ if something above hasn't eliminated the need for it. */
+ if (modulus)
+ result = fold_build2 (FLOOR_MOD_EXPR, operation_type, result,
+ convert (operation_type, modulus));
+
+ if (result_type && result_type != operation_type)
+ result = convert (result_type, result);
+
+ return result;
+}
+
+/* Similar, but for unary operations. */
+
+tree
+build_unary_op (enum tree_code op_code, tree result_type, tree operand)
+{
+ tree type = TREE_TYPE (operand);
+ tree base_type = get_base_type (type);
+ tree operation_type = result_type;
+ tree result;
+
+ if (operation_type
+ && TREE_CODE (operation_type) == RECORD_TYPE
+ && TYPE_JUSTIFIED_MODULAR_P (operation_type))
+ operation_type = TREE_TYPE (TYPE_FIELDS (operation_type));
+
+ if (operation_type
+ && TREE_CODE (operation_type) == INTEGER_TYPE
+ && TYPE_EXTRA_SUBTYPE_P (operation_type))
+ operation_type = get_base_type (operation_type);
+
+ switch (op_code)
+ {
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ if (!operation_type)
+ result_type = operation_type = TREE_TYPE (type);
+ else
+ gcc_assert (result_type == TREE_TYPE (type));
+
+ result = fold_build1 (op_code, operation_type, operand);
+ break;
+
+ case TRUTH_NOT_EXPR:
+#ifdef ENABLE_CHECKING
+ gcc_assert (TREE_CODE (get_base_type (result_type)) == BOOLEAN_TYPE);
+#endif
+ result = invert_truthvalue_loc (EXPR_LOCATION (operand), operand);
+ /* When not optimizing, fold the result as invert_truthvalue_loc
+ doesn't fold the result of comparisons. This is intended to undo
+ the trick used for boolean rvalues in gnat_to_gnu. */
+ if (!optimize)
+ result = fold (result);
+ break;
+
+ case ATTR_ADDR_EXPR:
+ case ADDR_EXPR:
+ switch (TREE_CODE (operand))
+ {
+ case INDIRECT_REF:
+ case UNCONSTRAINED_ARRAY_REF:
+ result = TREE_OPERAND (operand, 0);
+
+ /* Make sure the type here is a pointer, not a reference.
+ GCC wants pointer types for function addresses. */
+ if (!result_type)
+ result_type = build_pointer_type (type);
+
+ /* If the underlying object can alias everything, propagate the
+ property since we are effectively retrieving the object. */
+ if (POINTER_TYPE_P (TREE_TYPE (result))
+ && TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (result)))
+ {
+ if (TREE_CODE (result_type) == POINTER_TYPE
+ && !TYPE_REF_CAN_ALIAS_ALL (result_type))
+ result_type
+ = build_pointer_type_for_mode (TREE_TYPE (result_type),
+ TYPE_MODE (result_type),
+ true);
+ else if (TREE_CODE (result_type) == REFERENCE_TYPE
+ && !TYPE_REF_CAN_ALIAS_ALL (result_type))
+ result_type
+ = build_reference_type_for_mode (TREE_TYPE (result_type),
+ TYPE_MODE (result_type),
+ true);
+ }
+ break;
+
+ case NULL_EXPR:
+ result = operand;
+ TREE_TYPE (result) = type = build_pointer_type (type);
+ break;
+
+ case COMPOUND_EXPR:
+ /* Fold a compound expression if it has unconstrained array type
+ since the middle-end cannot handle it. But we don't it in the
+ general case because it may introduce aliasing issues if the
+ first operand is an indirect assignment and the second operand
+ the corresponding address, e.g. for an allocator. */
+ if (TREE_CODE (type) == UNCONSTRAINED_ARRAY_TYPE)
+ {
+ result = build_unary_op (ADDR_EXPR, result_type,
+ TREE_OPERAND (operand, 1));
+ result = build2 (COMPOUND_EXPR, TREE_TYPE (result),
+ TREE_OPERAND (operand, 0), result);
+ break;
+ }
+ goto common;
+
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ /* If this is for 'Address, find the address of the prefix and add
+ the offset to the field. Otherwise, do this the normal way. */
+ if (op_code == ATTR_ADDR_EXPR)
+ {
+ HOST_WIDE_INT bitsize;
+ HOST_WIDE_INT bitpos;
+ tree offset, inner;
+ enum machine_mode mode;
+ int unsignedp, volatilep;
+
+ inner = get_inner_reference (operand, &bitsize, &bitpos, &offset,
+ &mode, &unsignedp, &volatilep,
+ false);
+
+ /* If INNER is a padding type whose field has a self-referential
+ size, convert to that inner type. We know the offset is zero
+ and we need to have that type visible. */
+ if (TYPE_IS_PADDING_P (TREE_TYPE (inner))
+ && CONTAINS_PLACEHOLDER_P
+ (TYPE_SIZE (TREE_TYPE (TYPE_FIELDS
+ (TREE_TYPE (inner))))))
+ inner = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (inner))),
+ inner);
+
+ /* Compute the offset as a byte offset from INNER. */
+ if (!offset)
+ offset = size_zero_node;
+
+ offset = size_binop (PLUS_EXPR, offset,
+ size_int (bitpos / BITS_PER_UNIT));
+
+ /* Take the address of INNER, convert the offset to void *, and
+ add then. It will later be converted to the desired result
+ type, if any. */
+ inner = build_unary_op (ADDR_EXPR, NULL_TREE, inner);
+ inner = convert (ptr_void_type_node, inner);
+ result = build_binary_op (POINTER_PLUS_EXPR, ptr_void_type_node,
+ inner, offset);
+ result = convert (build_pointer_type (TREE_TYPE (operand)),
+ result);
+ break;
+ }
+ goto common;
+
+ case CONSTRUCTOR:
+ /* If this is just a constructor for a padded record, we can
+ just take the address of the single field and convert it to
+ a pointer to our type. */
+ if (TYPE_IS_PADDING_P (type))
+ {
+ result = (*CONSTRUCTOR_ELTS (operand))[0].value;
+ result = convert (build_pointer_type (TREE_TYPE (operand)),
+ build_unary_op (ADDR_EXPR, NULL_TREE, result));
+ break;
+ }
+
+ goto common;
+
+ case NOP_EXPR:
+ if (AGGREGATE_TYPE_P (type)
+ && AGGREGATE_TYPE_P (TREE_TYPE (TREE_OPERAND (operand, 0))))
+ return build_unary_op (ADDR_EXPR, result_type,
+ TREE_OPERAND (operand, 0));
+
+ /* ... fallthru ... */
+
+ case VIEW_CONVERT_EXPR:
+ /* If this just a variant conversion or if the conversion doesn't
+ change the mode, get the result type from this type and go down.
+ This is needed for conversions of CONST_DECLs, to eventually get
+ to the address of their CORRESPONDING_VARs. */
+ if ((TYPE_MAIN_VARIANT (type)
+ == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (operand, 0))))
+ || (TYPE_MODE (type) != BLKmode
+ && (TYPE_MODE (type)
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (operand, 0))))))
+ return build_unary_op (ADDR_EXPR,
+ (result_type ? result_type
+ : build_pointer_type (type)),
+ TREE_OPERAND (operand, 0));
+ goto common;
+
+ case CONST_DECL:
+ operand = DECL_CONST_CORRESPONDING_VAR (operand);
+
+ /* ... fall through ... */
+
+ default:
+ common:
+
+ /* If we are taking the address of a padded record whose field
+ contains a template, take the address of the field. */
+ if (TYPE_IS_PADDING_P (type)
+ && TREE_CODE (TREE_TYPE (TYPE_FIELDS (type))) == RECORD_TYPE
+ && TYPE_CONTAINS_TEMPLATE_P (TREE_TYPE (TYPE_FIELDS (type))))
+ {
+ type = TREE_TYPE (TYPE_FIELDS (type));
+ operand = convert (type, operand);
+ }
+
+ gnat_mark_addressable (operand);
+ result = build_fold_addr_expr (operand);
+ }
+
+ TREE_CONSTANT (result) = staticp (operand) || TREE_CONSTANT (operand);
+ break;
+
+ case INDIRECT_REF:
+ {
+ tree t = remove_conversions (operand, false);
+ bool can_never_be_null = DECL_P (t) && DECL_CAN_NEVER_BE_NULL_P (t);
+
+ /* If TYPE is a thin pointer, either first retrieve the base if this
+ is an expression with an offset built for the initialization of an
+ object with an unconstrained nominal subtype, or else convert to
+ the fat pointer. */
+ if (TYPE_IS_THIN_POINTER_P (type))
+ {
+ tree rec_type = TREE_TYPE (type);
+
+ if (TREE_CODE (operand) == POINTER_PLUS_EXPR
+ && TREE_OPERAND (operand, 1)
+ == byte_position (DECL_CHAIN (TYPE_FIELDS (rec_type)))
+ && TREE_CODE (TREE_OPERAND (operand, 0)) == NOP_EXPR)
+ {
+ operand = TREE_OPERAND (TREE_OPERAND (operand, 0), 0);
+ type = TREE_TYPE (operand);
+ }
+ else if (TYPE_UNCONSTRAINED_ARRAY (rec_type))
+ {
+ operand
+ = convert (TREE_TYPE (TYPE_UNCONSTRAINED_ARRAY (rec_type)),
+ operand);
+ type = TREE_TYPE (operand);
+ }
+ }
+
+ /* If we want to refer to an unconstrained array, use the appropriate
+ expression. But this will never survive down to the back-end. */
+ if (TYPE_IS_FAT_POINTER_P (type))
+ {
+ result = build1 (UNCONSTRAINED_ARRAY_REF,
+ TYPE_UNCONSTRAINED_ARRAY (type), operand);
+ TREE_READONLY (result)
+ = TYPE_READONLY (TYPE_UNCONSTRAINED_ARRAY (type));
+ }
+
+ /* If we are dereferencing an ADDR_EXPR, return its operand. */
+ else if (TREE_CODE (operand) == ADDR_EXPR)
+ result = TREE_OPERAND (operand, 0);
+
+ /* Otherwise, build and fold the indirect reference. */
+ else
+ {
+ result = build_fold_indirect_ref (operand);
+ TREE_READONLY (result) = TYPE_READONLY (TREE_TYPE (type));
+ }
+
+ if (!TYPE_IS_FAT_POINTER_P (type) && TYPE_VOLATILE (TREE_TYPE (type)))
+ {
+ TREE_SIDE_EFFECTS (result) = 1;
+ if (TREE_CODE (result) == INDIRECT_REF)
+ TREE_THIS_VOLATILE (result) = TYPE_VOLATILE (TREE_TYPE (result));
+ }
+
+ if ((TREE_CODE (result) == INDIRECT_REF
+ || TREE_CODE (result) == UNCONSTRAINED_ARRAY_REF)
+ && can_never_be_null)
+ TREE_THIS_NOTRAP (result) = 1;
+
+ break;
+ }
+
+ case NEGATE_EXPR:
+ case BIT_NOT_EXPR:
+ {
+ tree modulus = ((operation_type
+ && TREE_CODE (operation_type) == INTEGER_TYPE
+ && TYPE_MODULAR_P (operation_type))
+ ? TYPE_MODULUS (operation_type) : NULL_TREE);
+ int mod_pow2 = modulus && integer_pow2p (modulus);
+
+ /* If this is a modular type, there are various possibilities
+ depending on the operation and whether the modulus is a
+ power of two or not. */
+
+ if (modulus)
+ {
+ gcc_assert (operation_type == base_type);
+ operand = convert (operation_type, operand);
+
+ /* The fastest in the negate case for binary modulus is
+ the straightforward code; the TRUNC_MOD_EXPR below
+ is an AND operation. */
+ if (op_code == NEGATE_EXPR && mod_pow2)
+ result = fold_build2 (TRUNC_MOD_EXPR, operation_type,
+ fold_build1 (NEGATE_EXPR, operation_type,
+ operand),
+ modulus);
+
+ /* For nonbinary negate case, return zero for zero operand,
+ else return the modulus minus the operand. If the modulus
+ is a power of two minus one, we can do the subtraction
+ as an XOR since it is equivalent and faster on most machines. */
+ else if (op_code == NEGATE_EXPR && !mod_pow2)
+ {
+ if (integer_pow2p (fold_build2 (PLUS_EXPR, operation_type,
+ modulus,
+ convert (operation_type,
+ integer_one_node))))
+ result = fold_build2 (BIT_XOR_EXPR, operation_type,
+ operand, modulus);
+ else
+ result = fold_build2 (MINUS_EXPR, operation_type,
+ modulus, operand);
+
+ result = fold_build3 (COND_EXPR, operation_type,
+ fold_build2 (NE_EXPR,
+ boolean_type_node,
+ operand,
+ convert
+ (operation_type,
+ integer_zero_node)),
+ result, operand);
+ }
+ else
+ {
+ /* For the NOT cases, we need a constant equal to
+ the modulus minus one. For a binary modulus, we
+ XOR against the constant and subtract the operand from
+ that constant for nonbinary modulus. */
+
+ tree cnst = fold_build2 (MINUS_EXPR, operation_type, modulus,
+ convert (operation_type,
+ integer_one_node));
+
+ if (mod_pow2)
+ result = fold_build2 (BIT_XOR_EXPR, operation_type,
+ operand, cnst);
+ else
+ result = fold_build2 (MINUS_EXPR, operation_type,
+ cnst, operand);
+ }
+
+ break;
+ }
+ }
+
+ /* ... fall through ... */
+
+ default:
+ gcc_assert (operation_type == base_type);
+ result = fold_build1 (op_code, operation_type,
+ convert (operation_type, operand));
+ }
+
+ if (result_type && TREE_TYPE (result) != result_type)
+ result = convert (result_type, result);
+
+ return result;
+}
+
+/* Similar, but for COND_EXPR. */
+
+tree
+build_cond_expr (tree result_type, tree condition_operand,
+ tree true_operand, tree false_operand)
+{
+ bool addr_p = false;
+ tree result;
+
+ /* The front-end verified that result, true and false operands have
+ same base type. Convert everything to the result type. */
+ true_operand = convert (result_type, true_operand);
+ false_operand = convert (result_type, false_operand);
+
+ /* If the result type is unconstrained, take the address of the operands and
+ then dereference the result. Likewise if the result type is passed by
+ reference, because creating a temporary of this type is not allowed. */
+ if (TREE_CODE (result_type) == UNCONSTRAINED_ARRAY_TYPE
+ || TYPE_IS_BY_REFERENCE_P (result_type)
+ || CONTAINS_PLACEHOLDER_P (TYPE_SIZE (result_type)))
+ {
+ result_type = build_pointer_type (result_type);
+ true_operand = build_unary_op (ADDR_EXPR, result_type, true_operand);
+ false_operand = build_unary_op (ADDR_EXPR, result_type, false_operand);
+ addr_p = true;
+ }
+
+ result = fold_build3 (COND_EXPR, result_type, condition_operand,
+ true_operand, false_operand);
+
+ /* If we have a common SAVE_EXPR (possibly surrounded by arithmetics)
+ in both arms, make sure it gets evaluated by moving it ahead of the
+ conditional expression. This is necessary because it is evaluated
+ in only one place at run time and would otherwise be uninitialized
+ in one of the arms. */
+ true_operand = skip_simple_arithmetic (true_operand);
+ false_operand = skip_simple_arithmetic (false_operand);
+
+ if (true_operand == false_operand && TREE_CODE (true_operand) == SAVE_EXPR)
+ result = build2 (COMPOUND_EXPR, result_type, true_operand, result);
+
+ if (addr_p)
+ result = build_unary_op (INDIRECT_REF, NULL_TREE, result);
+
+ return result;
+}
+
+/* Similar, but for COMPOUND_EXPR. */
+
+tree
+build_compound_expr (tree result_type, tree stmt_operand, tree expr_operand)
+{
+ bool addr_p = false;
+ tree result;
+
+ /* If the result type is unconstrained, take the address of the operand and
+ then dereference the result. Likewise if the result type is passed by
+ reference, but this is natively handled in the gimplifier. */
+ if (TREE_CODE (result_type) == UNCONSTRAINED_ARRAY_TYPE
+ || CONTAINS_PLACEHOLDER_P (TYPE_SIZE (result_type)))
+ {
+ result_type = build_pointer_type (result_type);
+ expr_operand = build_unary_op (ADDR_EXPR, result_type, expr_operand);
+ addr_p = true;
+ }
+
+ result = fold_build2 (COMPOUND_EXPR, result_type, stmt_operand,
+ expr_operand);
+
+ if (addr_p)
+ result = build_unary_op (INDIRECT_REF, NULL_TREE, result);
+
+ return result;
+}
+
+/* Conveniently construct a function call expression. FNDECL names the
+ function to be called, N is the number of arguments, and the "..."
+ parameters are the argument expressions. Unlike build_call_expr
+ this doesn't fold the call, hence it will always return a CALL_EXPR. */
+
+tree
+build_call_n_expr (tree fndecl, int n, ...)
+{
+ va_list ap;
+ tree fntype = TREE_TYPE (fndecl);
+ tree fn = build1 (ADDR_EXPR, build_pointer_type (fntype), fndecl);
+
+ va_start (ap, n);
+ fn = build_call_valist (TREE_TYPE (fntype), fn, n, ap);
+ va_end (ap);
+ return fn;
+}
+
+/* Call a function that raises an exception and pass the line number and file
+ name, if requested. MSG says which exception function to call.
+
+ GNAT_NODE is the gnat node conveying the source location for which the
+ error should be signaled, or Empty in which case the error is signaled on
+ the current ref_file_name/input_line.
+
+ KIND says which kind of exception this is for
+ (N_Raise_{Constraint,Storage,Program}_Error). */
+
+tree
+build_call_raise (int msg, Node_Id gnat_node, char kind)
+{
+ tree fndecl = gnat_raise_decls[msg];
+ tree label = get_exception_label (kind);
+ tree filename;
+ int line_number;
+ const char *str;
+ int len;
+
+ /* If this is to be done as a goto, handle that case. */
+ if (label)
+ {
+ Entity_Id local_raise = Get_Local_Raise_Call_Entity ();
+ tree gnu_result = build1 (GOTO_EXPR, void_type_node, label);
+
+ /* If Local_Raise is present, generate
+ Local_Raise (exception'Identity); */
+ if (Present (local_raise))
+ {
+ tree gnu_local_raise
+ = gnat_to_gnu_entity (local_raise, NULL_TREE, 0);
+ tree gnu_exception_entity
+ = gnat_to_gnu_entity (Get_RT_Exception_Entity (msg), NULL_TREE, 0);
+ tree gnu_call
+ = build_call_n_expr (gnu_local_raise, 1,
+ build_unary_op (ADDR_EXPR, NULL_TREE,
+ gnu_exception_entity));
+
+ gnu_result = build2 (COMPOUND_EXPR, void_type_node,
+ gnu_call, gnu_result);}
+
+ return gnu_result;
+ }
+
+ str
+ = (Debug_Flag_NN || Exception_Locations_Suppressed)
+ ? ""
+ : (gnat_node != Empty && Sloc (gnat_node) != No_Location)
+ ? IDENTIFIER_POINTER
+ (get_identifier (Get_Name_String
+ (Debug_Source_Name
+ (Get_Source_File_Index (Sloc (gnat_node))))))
+ : ref_filename;
+
+ len = strlen (str);
+ filename = build_string (len, str);
+ line_number
+ = (gnat_node != Empty && Sloc (gnat_node) != No_Location)
+ ? Get_Logical_Line_Number (Sloc(gnat_node))
+ : LOCATION_LINE (input_location);
+
+ TREE_TYPE (filename) = build_array_type (unsigned_char_type_node,
+ build_index_type (size_int (len)));
+
+ return
+ build_call_n_expr (fndecl, 2,
+ build1 (ADDR_EXPR,
+ build_pointer_type (unsigned_char_type_node),
+ filename),
+ build_int_cst (NULL_TREE, line_number));
+}
+
+/* Similar to build_call_raise, for an index or range check exception as
+ determined by MSG, with extra information generated of the form
+ "INDEX out of range FIRST..LAST". */
+
+tree
+build_call_raise_range (int msg, Node_Id gnat_node,
+ tree index, tree first, tree last)
+{
+ tree fndecl = gnat_raise_decls_ext[msg];
+ tree filename;
+ int line_number, column_number;
+ const char *str;
+ int len;
+
+ str
+ = (Debug_Flag_NN || Exception_Locations_Suppressed)
+ ? ""
+ : (gnat_node != Empty && Sloc (gnat_node) != No_Location)
+ ? IDENTIFIER_POINTER
+ (get_identifier (Get_Name_String
+ (Debug_Source_Name
+ (Get_Source_File_Index (Sloc (gnat_node))))))
+ : ref_filename;
+
+ len = strlen (str);
+ filename = build_string (len, str);
+ if (gnat_node != Empty && Sloc (gnat_node) != No_Location)
+ {
+ line_number = Get_Logical_Line_Number (Sloc (gnat_node));
+ column_number = Get_Column_Number (Sloc (gnat_node));
+ }
+ else
+ {
+ line_number = LOCATION_LINE (input_location);
+ column_number = 0;
+ }
+
+ TREE_TYPE (filename) = build_array_type (unsigned_char_type_node,
+ build_index_type (size_int (len)));
+
+ return
+ build_call_n_expr (fndecl, 6,
+ build1 (ADDR_EXPR,
+ build_pointer_type (unsigned_char_type_node),
+ filename),
+ build_int_cst (NULL_TREE, line_number),
+ build_int_cst (NULL_TREE, column_number),
+ convert (integer_type_node, index),
+ convert (integer_type_node, first),
+ convert (integer_type_node, last));
+}
+
+/* Similar to build_call_raise, with extra information about the column
+ where the check failed. */
+
+tree
+build_call_raise_column (int msg, Node_Id gnat_node)
+{
+ tree fndecl = gnat_raise_decls_ext[msg];
+ tree filename;
+ int line_number, column_number;
+ const char *str;
+ int len;
+
+ str
+ = (Debug_Flag_NN || Exception_Locations_Suppressed)
+ ? ""
+ : (gnat_node != Empty && Sloc (gnat_node) != No_Location)
+ ? IDENTIFIER_POINTER
+ (get_identifier (Get_Name_String
+ (Debug_Source_Name
+ (Get_Source_File_Index (Sloc (gnat_node))))))
+ : ref_filename;
+
+ len = strlen (str);
+ filename = build_string (len, str);
+ if (gnat_node != Empty && Sloc (gnat_node) != No_Location)
+ {
+ line_number = Get_Logical_Line_Number (Sloc (gnat_node));
+ column_number = Get_Column_Number (Sloc (gnat_node));
+ }
+ else
+ {
+ line_number = LOCATION_LINE (input_location);
+ column_number = 0;
+ }
+
+ TREE_TYPE (filename) = build_array_type (unsigned_char_type_node,
+ build_index_type (size_int (len)));
+
+ return
+ build_call_n_expr (fndecl, 3,
+ build1 (ADDR_EXPR,
+ build_pointer_type (unsigned_char_type_node),
+ filename),
+ build_int_cst (NULL_TREE, line_number),
+ build_int_cst (NULL_TREE, column_number));
+}
+
+/* qsort comparer for the bit positions of two constructor elements
+ for record components. */
+
+static int
+compare_elmt_bitpos (const PTR rt1, const PTR rt2)
+{
+ const constructor_elt * const elmt1 = (const constructor_elt * const) rt1;
+ const constructor_elt * const elmt2 = (const constructor_elt * const) rt2;
+ const_tree const field1 = elmt1->index;
+ const_tree const field2 = elmt2->index;
+ const int ret
+ = tree_int_cst_compare (bit_position (field1), bit_position (field2));
+
+ return ret ? ret : (int) (DECL_UID (field1) - DECL_UID (field2));
+}
+
+/* Return a CONSTRUCTOR of TYPE whose elements are V. */
+
+tree
+gnat_build_constructor (tree type, vec<constructor_elt, va_gc> *v)
+{
+ bool allconstant = (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST);
+ bool read_only = true;
+ bool side_effects = false;
+ tree result, obj, val;
+ unsigned int n_elmts;
+
+ /* Scan the elements to see if they are all constant or if any has side
+ effects, to let us set global flags on the resulting constructor. Count
+ the elements along the way for possible sorting purposes below. */
+ FOR_EACH_CONSTRUCTOR_ELT (v, n_elmts, obj, val)
+ {
+ /* The predicate must be in keeping with output_constructor. */
+ if ((!TREE_CONSTANT (val) && !TREE_STATIC (val))
+ || (TREE_CODE (type) == RECORD_TYPE
+ && CONSTRUCTOR_BITFIELD_P (obj)
+ && !initializer_constant_valid_for_bitfield_p (val))
+ || !initializer_constant_valid_p (val, TREE_TYPE (val)))
+ allconstant = false;
+
+ if (!TREE_READONLY (val))
+ read_only = false;
+
+ if (TREE_SIDE_EFFECTS (val))
+ side_effects = true;
+ }
+
+ /* For record types with constant components only, sort field list
+ by increasing bit position. This is necessary to ensure the
+ constructor can be output as static data. */
+ if (allconstant && TREE_CODE (type) == RECORD_TYPE && n_elmts > 1)
+ v->qsort (compare_elmt_bitpos);
+
+ result = build_constructor (type, v);
+ CONSTRUCTOR_NO_CLEARING (result) = 1;
+ TREE_CONSTANT (result) = TREE_STATIC (result) = allconstant;
+ TREE_SIDE_EFFECTS (result) = side_effects;
+ TREE_READONLY (result) = TYPE_READONLY (type) || read_only || allconstant;
+ return result;
+}
+
+/* Return a COMPONENT_REF to access a field that is given by COMPONENT,
+ an IDENTIFIER_NODE giving the name of the field, or FIELD, a FIELD_DECL,
+ for the field. Don't fold the result if NO_FOLD_P is true.
+
+ We also handle the fact that we might have been passed a pointer to the
+ actual record and know how to look for fields in variant parts. */
+
+static tree
+build_simple_component_ref (tree record_variable, tree component, tree field,
+ bool no_fold_p)
+{
+ tree record_type = TYPE_MAIN_VARIANT (TREE_TYPE (record_variable));
+ tree base, ref;
+
+ gcc_assert (RECORD_OR_UNION_TYPE_P (record_type)
+ && COMPLETE_TYPE_P (record_type)
+ && (component == NULL_TREE) != (field == NULL_TREE));
+
+ /* If no field was specified, look for a field with the specified name in
+ the current record only. */
+ if (!field)
+ for (field = TYPE_FIELDS (record_type);
+ field;
+ field = DECL_CHAIN (field))
+ if (DECL_NAME (field) == component)
+ break;
+
+ if (!field)
+ return NULL_TREE;
+
+ /* If this field is not in the specified record, see if we can find a field
+ in the specified record whose original field is the same as this one. */
+ if (DECL_CONTEXT (field) != record_type)
+ {
+ tree new_field;
+
+ /* First loop through normal components. */
+ for (new_field = TYPE_FIELDS (record_type);
+ new_field;
+ new_field = DECL_CHAIN (new_field))
+ if (SAME_FIELD_P (field, new_field))
+ break;
+
+ /* Next, see if we're looking for an inherited component in an extension.
+ If so, look through the extension directly, unless the type contains
+ a placeholder, as it might be needed for a later substitution. */
+ if (!new_field
+ && TREE_CODE (record_variable) == VIEW_CONVERT_EXPR
+ && TYPE_ALIGN_OK (record_type)
+ && !type_contains_placeholder_p (record_type)
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (record_variable, 0)))
+ == RECORD_TYPE
+ && TYPE_ALIGN_OK (TREE_TYPE (TREE_OPERAND (record_variable, 0))))
+ {
+ ref = build_simple_component_ref (TREE_OPERAND (record_variable, 0),
+ NULL_TREE, field, no_fold_p);
+ if (ref)
+ return ref;
+ }
+
+ /* Next, loop through DECL_INTERNAL_P components if we haven't found the
+ component in the first search. Doing this search in two steps is
+ required to avoid hidden homonymous fields in the _Parent field. */
+ if (!new_field)
+ for (new_field = TYPE_FIELDS (record_type);
+ new_field;
+ new_field = DECL_CHAIN (new_field))
+ if (DECL_INTERNAL_P (new_field))
+ {
+ tree field_ref
+ = build_simple_component_ref (record_variable,
+ NULL_TREE, new_field, no_fold_p);
+ ref = build_simple_component_ref (field_ref, NULL_TREE, field,
+ no_fold_p);
+ if (ref)
+ return ref;
+ }
+
+ field = new_field;
+ }
+
+ if (!field)
+ return NULL_TREE;
+
+ /* If the field's offset has overflowed, do not try to access it, as doing
+ so may trigger sanity checks deeper in the back-end. Note that we don't
+ need to warn since this will be done on trying to declare the object. */
+ if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST
+ && TREE_OVERFLOW (DECL_FIELD_OFFSET (field)))
+ return NULL_TREE;
+
+ /* We have found a suitable field. Before building the COMPONENT_REF, get
+ the base object of the record variable if possible. */
+ base = record_variable;
+
+ if (TREE_CODE (record_variable) == VIEW_CONVERT_EXPR)
+ {
+ tree inner_variable = TREE_OPERAND (record_variable, 0);
+ tree inner_type = TYPE_MAIN_VARIANT (TREE_TYPE (inner_variable));
+
+ /* Look through a conversion between type variants. This is transparent
+ as far as the field is concerned. */
+ if (inner_type == record_type)
+ base = inner_variable;
+
+ /* Look through a conversion between original and packable version, but
+ the field needs to be adjusted in this case. */
+ else if (TYPE_NAME (inner_type) == TYPE_NAME (record_type))
+ {
+ tree new_field;
+
+ for (new_field = TYPE_FIELDS (inner_type);
+ new_field;
+ new_field = DECL_CHAIN (new_field))
+ if (SAME_FIELD_P (field, new_field))
+ break;
+ if (new_field)
+ {
+ field = new_field;
+ base = inner_variable;
+ }
+ }
+ }
+
+ ref = build3 (COMPONENT_REF, TREE_TYPE (field), base, field, NULL_TREE);
+
+ if (TREE_READONLY (record_variable)
+ || TREE_READONLY (field)
+ || TYPE_READONLY (record_type))
+ TREE_READONLY (ref) = 1;
+
+ if (TREE_THIS_VOLATILE (record_variable)
+ || TREE_THIS_VOLATILE (field)
+ || TYPE_VOLATILE (record_type))
+ TREE_THIS_VOLATILE (ref) = 1;
+
+ if (no_fold_p)
+ return ref;
+
+ /* The generic folder may punt in this case because the inner array type
+ can be self-referential, but folding is in fact not problematic. */
+ if (TREE_CODE (base) == CONSTRUCTOR
+ && TYPE_CONTAINS_TEMPLATE_P (TREE_TYPE (base)))
+ {
+ vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (base);
+ unsigned HOST_WIDE_INT idx;
+ tree index, value;
+ FOR_EACH_CONSTRUCTOR_ELT (elts, idx, index, value)
+ if (index == field)
+ return value;
+ return ref;
+ }
+
+ return fold (ref);
+}
+
+/* Likewise, but generate a Constraint_Error if the reference could not be
+ found. */
+
+tree
+build_component_ref (tree record_variable, tree component, tree field,
+ bool no_fold_p)
+{
+ tree ref = build_simple_component_ref (record_variable, component, field,
+ no_fold_p);
+ if (ref)
+ return ref;
+
+ /* If FIELD was specified, assume this is an invalid user field so raise
+ Constraint_Error. Otherwise, we have no type to return so abort. */
+ gcc_assert (field);
+ return build1 (NULL_EXPR, TREE_TYPE (field),
+ build_call_raise (CE_Discriminant_Check_Failed, Empty,
+ N_Raise_Constraint_Error));
+}
+
+/* Helper for build_call_alloc_dealloc, with arguments to be interpreted
+ identically. Process the case where a GNAT_PROC to call is provided. */
+
+static inline tree
+build_call_alloc_dealloc_proc (tree gnu_obj, tree gnu_size, tree gnu_type,
+ Entity_Id gnat_proc, Entity_Id gnat_pool)
+{
+ tree gnu_proc = gnat_to_gnu (gnat_proc);
+ tree gnu_call;
+
+ /* A storage pool's underlying type is a record type (for both predefined
+ storage pools and GNAT simple storage pools). The secondary stack uses
+ the same mechanism, but its pool object (SS_Pool) is an integer. */
+ if (Is_Record_Type (Underlying_Type (Etype (gnat_pool))))
+ {
+ /* The size is the third parameter; the alignment is the
+ same type. */
+ Entity_Id gnat_size_type
+ = Etype (Next_Formal (Next_Formal (First_Formal (gnat_proc))));
+ tree gnu_size_type = gnat_to_gnu_type (gnat_size_type);
+
+ tree gnu_pool = gnat_to_gnu (gnat_pool);
+ tree gnu_pool_addr = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_pool);
+ tree gnu_align = size_int (TYPE_ALIGN (gnu_type) / BITS_PER_UNIT);
+
+ gnu_size = convert (gnu_size_type, gnu_size);
+ gnu_align = convert (gnu_size_type, gnu_align);
+
+ /* The first arg is always the address of the storage pool; next
+ comes the address of the object, for a deallocator, then the
+ size and alignment. */
+ if (gnu_obj)
+ gnu_call = build_call_n_expr (gnu_proc, 4, gnu_pool_addr, gnu_obj,
+ gnu_size, gnu_align);
+ else
+ gnu_call = build_call_n_expr (gnu_proc, 3, gnu_pool_addr,
+ gnu_size, gnu_align);
+ }
+
+ /* Secondary stack case. */
+ else
+ {
+ /* The size is the second parameter. */
+ Entity_Id gnat_size_type
+ = Etype (Next_Formal (First_Formal (gnat_proc)));
+ tree gnu_size_type = gnat_to_gnu_type (gnat_size_type);
+
+ gnu_size = convert (gnu_size_type, gnu_size);
+
+ /* The first arg is the address of the object, for a deallocator,
+ then the size. */
+ if (gnu_obj)
+ gnu_call = build_call_n_expr (gnu_proc, 2, gnu_obj, gnu_size);
+ else
+ gnu_call = build_call_n_expr (gnu_proc, 1, gnu_size);
+ }
+
+ return gnu_call;
+}
+
+/* Helper for build_call_alloc_dealloc, to build and return an allocator for
+ DATA_SIZE bytes aimed at containing a DATA_TYPE object, using the default
+ __gnat_malloc allocator. Honor DATA_TYPE alignments greater than what the
+ latter offers. */
+
+static inline tree
+maybe_wrap_malloc (tree data_size, tree data_type, Node_Id gnat_node)
+{
+ /* When the DATA_TYPE alignment is stricter than what malloc offers
+ (super-aligned case), we allocate an "aligning" wrapper type and return
+ the address of its single data field with the malloc's return value
+ stored just in front. */
+
+ unsigned int data_align = TYPE_ALIGN (data_type);
+ unsigned int system_allocator_alignment
+ = get_target_system_allocator_alignment () * BITS_PER_UNIT;
+
+ tree aligning_type
+ = ((data_align > system_allocator_alignment)
+ ? make_aligning_type (data_type, data_align, data_size,
+ system_allocator_alignment,
+ POINTER_SIZE / BITS_PER_UNIT,
+ gnat_node)
+ : NULL_TREE);
+
+ tree size_to_malloc
+ = aligning_type ? TYPE_SIZE_UNIT (aligning_type) : data_size;
+
+ tree malloc_ptr;
+
+ /* On VMS, if pointers are 64-bit and the allocator size is 32-bit or
+ Convention C, allocate 32-bit memory. */
+ if (TARGET_ABI_OPEN_VMS
+ && POINTER_SIZE == 64
+ && Nkind (gnat_node) == N_Allocator
+ && (UI_To_Int (Esize (Etype (gnat_node))) == 32
+ || Convention (Etype (gnat_node)) == Convention_C))
+ malloc_ptr = build_call_n_expr (malloc32_decl, 1, size_to_malloc);
+ else
+ malloc_ptr = build_call_n_expr (malloc_decl, 1, size_to_malloc);
+
+ if (aligning_type)
+ {
+ /* Latch malloc's return value and get a pointer to the aligning field
+ first. */
+ tree storage_ptr = gnat_protect_expr (malloc_ptr);
+
+ tree aligning_record_addr
+ = convert (build_pointer_type (aligning_type), storage_ptr);
+
+ tree aligning_record
+ = build_unary_op (INDIRECT_REF, NULL_TREE, aligning_record_addr);
+
+ tree aligning_field
+ = build_component_ref (aligning_record, NULL_TREE,
+ TYPE_FIELDS (aligning_type), false);
+
+ tree aligning_field_addr
+ = build_unary_op (ADDR_EXPR, NULL_TREE, aligning_field);
+
+ /* Then arrange to store the allocator's return value ahead
+ and return. */
+ tree storage_ptr_slot_addr
+ = build_binary_op (POINTER_PLUS_EXPR, ptr_void_type_node,
+ convert (ptr_void_type_node, aligning_field_addr),
+ size_int (-(HOST_WIDE_INT) POINTER_SIZE
+ / BITS_PER_UNIT));
+
+ tree storage_ptr_slot
+ = build_unary_op (INDIRECT_REF, NULL_TREE,
+ convert (build_pointer_type (ptr_void_type_node),
+ storage_ptr_slot_addr));
+
+ return
+ build2 (COMPOUND_EXPR, TREE_TYPE (aligning_field_addr),
+ build_binary_op (INIT_EXPR, NULL_TREE,
+ storage_ptr_slot, storage_ptr),
+ aligning_field_addr);
+ }
+ else
+ return malloc_ptr;
+}
+
+/* Helper for build_call_alloc_dealloc, to release a DATA_TYPE object
+ designated by DATA_PTR using the __gnat_free entry point. */
+
+static inline tree
+maybe_wrap_free (tree data_ptr, tree data_type)
+{
+ /* In the regular alignment case, we pass the data pointer straight to free.
+ In the superaligned case, we need to retrieve the initial allocator
+ return value, stored in front of the data block at allocation time. */
+
+ unsigned int data_align = TYPE_ALIGN (data_type);
+ unsigned int system_allocator_alignment
+ = get_target_system_allocator_alignment () * BITS_PER_UNIT;
+
+ tree free_ptr;
+
+ if (data_align > system_allocator_alignment)
+ {
+ /* DATA_FRONT_PTR (void *)
+ = (void *)DATA_PTR - (void *)sizeof (void *)) */
+ tree data_front_ptr
+ = build_binary_op
+ (POINTER_PLUS_EXPR, ptr_void_type_node,
+ convert (ptr_void_type_node, data_ptr),
+ size_int (-(HOST_WIDE_INT) POINTER_SIZE / BITS_PER_UNIT));
+
+ /* FREE_PTR (void *) = *(void **)DATA_FRONT_PTR */
+ free_ptr
+ = build_unary_op
+ (INDIRECT_REF, NULL_TREE,
+ convert (build_pointer_type (ptr_void_type_node), data_front_ptr));
+ }
+ else
+ free_ptr = data_ptr;
+
+ return build_call_n_expr (free_decl, 1, free_ptr);
+}
+
+/* Build a GCC tree to call an allocation or deallocation function.
+ If GNU_OBJ is nonzero, it is an object to deallocate. Otherwise,
+ generate an allocator.
+
+ GNU_SIZE is the number of bytes to allocate and GNU_TYPE is the contained
+ object type, used to determine the to-be-honored address alignment.
+ GNAT_PROC, if present, is a procedure to call and GNAT_POOL is the storage
+ pool to use. If not present, malloc and free are used. GNAT_NODE is used
+ to provide an error location for restriction violation messages. */
+
+tree
+build_call_alloc_dealloc (tree gnu_obj, tree gnu_size, tree gnu_type,
+ Entity_Id gnat_proc, Entity_Id gnat_pool,
+ Node_Id gnat_node)
+{
+ gnu_size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (gnu_size, gnu_obj);
+
+ /* Explicit proc to call ? This one is assumed to deal with the type
+ alignment constraints. */
+ if (Present (gnat_proc))
+ return build_call_alloc_dealloc_proc (gnu_obj, gnu_size, gnu_type,
+ gnat_proc, gnat_pool);
+
+ /* Otherwise, object to "free" or "malloc" with possible special processing
+ for alignments stricter than what the default allocator honors. */
+ else if (gnu_obj)
+ return maybe_wrap_free (gnu_obj, gnu_type);
+ else
+ {
+ /* Assert that we no longer can be called with this special pool. */
+ gcc_assert (gnat_pool != -1);
+
+ /* Check that we aren't violating the associated restriction. */
+ if (!(Nkind (gnat_node) == N_Allocator && Comes_From_Source (gnat_node)))
+ Check_No_Implicit_Heap_Alloc (gnat_node);
+
+ return maybe_wrap_malloc (gnu_size, gnu_type, gnat_node);
+ }
+}
+
+/* Build a GCC tree that corresponds to allocating an object of TYPE whose
+ initial value is INIT, if INIT is nonzero. Convert the expression to
+ RESULT_TYPE, which must be some pointer type, and return the result.
+
+ GNAT_PROC and GNAT_POOL optionally give the procedure to call and
+ the storage pool to use. GNAT_NODE is used to provide an error
+ location for restriction violation messages. If IGNORE_INIT_TYPE is
+ true, ignore the type of INIT for the purpose of determining the size;
+ this will cause the maximum size to be allocated if TYPE is of
+ self-referential size. */
+
+tree
+build_allocator (tree type, tree init, tree result_type, Entity_Id gnat_proc,
+ Entity_Id gnat_pool, Node_Id gnat_node, bool ignore_init_type)
+{
+ tree size, storage, storage_deref, storage_init;
+
+ /* If the initializer, if present, is a NULL_EXPR, just return a new one. */
+ if (init && TREE_CODE (init) == NULL_EXPR)
+ return build1 (NULL_EXPR, result_type, TREE_OPERAND (init, 0));
+
+ /* If the initializer, if present, is a COND_EXPR, deal with each branch. */
+ else if (init && TREE_CODE (init) == COND_EXPR)
+ return build3 (COND_EXPR, result_type, TREE_OPERAND (init, 0),
+ build_allocator (type, TREE_OPERAND (init, 1), result_type,
+ gnat_proc, gnat_pool, gnat_node,
+ ignore_init_type),
+ build_allocator (type, TREE_OPERAND (init, 2), result_type,
+ gnat_proc, gnat_pool, gnat_node,
+ ignore_init_type));
+
+ /* If RESULT_TYPE is a fat or thin pointer, set SIZE to be the sum of the
+ sizes of the object and its template. Allocate the whole thing and
+ fill in the parts that are known. */
+ else if (TYPE_IS_FAT_OR_THIN_POINTER_P (result_type))
+ {
+ tree storage_type
+ = build_unc_object_type_from_ptr (result_type, type,
+ get_identifier ("ALLOC"), false);
+ tree template_type = TREE_TYPE (TYPE_FIELDS (storage_type));
+ tree storage_ptr_type = build_pointer_type (storage_type);
+
+ size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TYPE_SIZE_UNIT (storage_type),
+ init);
+
+ /* If the size overflows, pass -1 so Storage_Error will be raised. */
+ if (TREE_CODE (size) == INTEGER_CST && !valid_constant_size_p (size))
+ size = size_int (-1);
+
+ storage = build_call_alloc_dealloc (NULL_TREE, size, storage_type,
+ gnat_proc, gnat_pool, gnat_node);
+ storage = convert (storage_ptr_type, gnat_protect_expr (storage));
+ storage_deref = build_unary_op (INDIRECT_REF, NULL_TREE, storage);
+ TREE_THIS_NOTRAP (storage_deref) = 1;
+
+ /* If there is an initializing expression, then make a constructor for
+ the entire object including the bounds and copy it into the object.
+ If there is no initializing expression, just set the bounds. */
+ if (init)
+ {
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 2);
+
+ CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (storage_type),
+ build_template (template_type, type, init));
+ CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (storage_type)),
+ init);
+ storage_init
+ = build_binary_op (INIT_EXPR, NULL_TREE, storage_deref,
+ gnat_build_constructor (storage_type, v));
+ }
+ else
+ storage_init
+ = build_binary_op (INIT_EXPR, NULL_TREE,
+ build_component_ref (storage_deref, NULL_TREE,
+ TYPE_FIELDS (storage_type),
+ false),
+ build_template (template_type, type, NULL_TREE));
+
+ return build2 (COMPOUND_EXPR, result_type,
+ storage_init, convert (result_type, storage));
+ }
+
+ size = TYPE_SIZE_UNIT (type);
+
+ /* If we have an initializing expression, see if its size is simpler
+ than the size from the type. */
+ if (!ignore_init_type && init && TYPE_SIZE_UNIT (TREE_TYPE (init))
+ && (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (init))) == INTEGER_CST
+ || CONTAINS_PLACEHOLDER_P (size)))
+ size = TYPE_SIZE_UNIT (TREE_TYPE (init));
+
+ /* If the size is still self-referential, reference the initializing
+ expression, if it is present. If not, this must have been a
+ call to allocate a library-level object, in which case we use
+ the maximum size. */
+ if (CONTAINS_PLACEHOLDER_P (size))
+ {
+ if (!ignore_init_type && init)
+ size = substitute_placeholder_in_expr (size, init);
+ else
+ size = max_size (size, true);
+ }
+
+ /* If the size overflows, pass -1 so Storage_Error will be raised. */
+ if (TREE_CODE (size) == INTEGER_CST && !valid_constant_size_p (size))
+ size = size_int (-1);
+
+ storage = convert (result_type,
+ build_call_alloc_dealloc (NULL_TREE, size, type,
+ gnat_proc, gnat_pool,
+ gnat_node));
+
+ /* If we have an initial value, protect the new address, assign the value
+ and return the address with a COMPOUND_EXPR. */
+ if (init)
+ {
+ storage = gnat_protect_expr (storage);
+ storage_deref = build_unary_op (INDIRECT_REF, NULL_TREE, storage);
+ TREE_THIS_NOTRAP (storage_deref) = 1;
+ storage_init
+ = build_binary_op (INIT_EXPR, NULL_TREE, storage_deref, init);
+ return build2 (COMPOUND_EXPR, result_type, storage_init, storage);
+ }
+
+ return storage;
+}
+
+/* Indicate that we need to take the address of T and that it therefore
+ should not be allocated in a register. Returns true if successful. */
+
+bool
+gnat_mark_addressable (tree t)
+{
+ while (true)
+ switch (TREE_CODE (t))
+ {
+ case ADDR_EXPR:
+ case COMPONENT_REF:
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case VIEW_CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ CASE_CONVERT:
+ t = TREE_OPERAND (t, 0);
+ break;
+
+ case COMPOUND_EXPR:
+ t = TREE_OPERAND (t, 1);
+ break;
+
+ case CONSTRUCTOR:
+ TREE_ADDRESSABLE (t) = 1;
+ return true;
+
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ TREE_ADDRESSABLE (t) = 1;
+ return true;
+
+ case FUNCTION_DECL:
+ TREE_ADDRESSABLE (t) = 1;
+ return true;
+
+ case CONST_DECL:
+ return DECL_CONST_CORRESPONDING_VAR (t)
+ && gnat_mark_addressable (DECL_CONST_CORRESPONDING_VAR (t));
+
+ default:
+ return true;
+ }
+}
+
+/* Save EXP for later use or reuse. This is equivalent to save_expr in tree.c
+ but we know how to handle our own nodes. */
+
+tree
+gnat_save_expr (tree exp)
+{
+ tree type = TREE_TYPE (exp);
+ enum tree_code code = TREE_CODE (exp);
+
+ if (TREE_CONSTANT (exp) || code == SAVE_EXPR || code == NULL_EXPR)
+ return exp;
+
+ if (code == UNCONSTRAINED_ARRAY_REF)
+ {
+ tree t = build1 (code, type, gnat_save_expr (TREE_OPERAND (exp, 0)));
+ TREE_READONLY (t) = TYPE_READONLY (type);
+ return t;
+ }
+
+ /* If this is a COMPONENT_REF of a fat pointer, save the entire fat pointer.
+ This may be more efficient, but will also allow us to more easily find
+ the match for the PLACEHOLDER_EXPR. */
+ if (code == COMPONENT_REF
+ && TYPE_IS_FAT_POINTER_P (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ return build3 (code, type, gnat_save_expr (TREE_OPERAND (exp, 0)),
+ TREE_OPERAND (exp, 1), TREE_OPERAND (exp, 2));
+
+ return save_expr (exp);
+}
+
+/* Protect EXP for immediate reuse. This is a variant of gnat_save_expr that
+ is optimized under the assumption that EXP's value doesn't change before
+ its subsequent reuse(s) except through its potential reevaluation. */
+
+tree
+gnat_protect_expr (tree exp)
+{
+ tree type = TREE_TYPE (exp);
+ enum tree_code code = TREE_CODE (exp);
+
+ if (TREE_CONSTANT (exp) || code == SAVE_EXPR || code == NULL_EXPR)
+ return exp;
+
+ /* If EXP has no side effects, we theoretically don't need to do anything.
+ However, we may be recursively passed more and more complex expressions
+ involving checks which will be reused multiple times and eventually be
+ unshared for gimplification; in order to avoid a complexity explosion
+ at that point, we protect any expressions more complex than a simple
+ arithmetic expression. */
+ if (!TREE_SIDE_EFFECTS (exp))
+ {
+ tree inner = skip_simple_arithmetic (exp);
+ if (!EXPR_P (inner) || REFERENCE_CLASS_P (inner))
+ return exp;
+ }
+
+ /* If this is a conversion, protect what's inside the conversion. */
+ if (code == NON_LVALUE_EXPR
+ || CONVERT_EXPR_CODE_P (code)
+ || code == VIEW_CONVERT_EXPR)
+ return build1 (code, type, gnat_protect_expr (TREE_OPERAND (exp, 0)));
+
+ /* If we're indirectly referencing something, we only need to protect the
+ address since the data itself can't change in these situations. */
+ if (code == INDIRECT_REF || code == UNCONSTRAINED_ARRAY_REF)
+ {
+ tree t = build1 (code, type, gnat_protect_expr (TREE_OPERAND (exp, 0)));
+ TREE_READONLY (t) = TYPE_READONLY (type);
+ return t;
+ }
+
+ /* If this is a COMPONENT_REF of a fat pointer, save the entire fat pointer.
+ This may be more efficient, but will also allow us to more easily find
+ the match for the PLACEHOLDER_EXPR. */
+ if (code == COMPONENT_REF
+ && TYPE_IS_FAT_POINTER_P (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ return build3 (code, type, gnat_protect_expr (TREE_OPERAND (exp, 0)),
+ TREE_OPERAND (exp, 1), TREE_OPERAND (exp, 2));
+
+ /* If this is a fat pointer or something that can be placed in a register,
+ just make a SAVE_EXPR. Likewise for a CALL_EXPR as large objects are
+ returned via invisible reference in most ABIs so the temporary will
+ directly be filled by the callee. */
+ if (TYPE_IS_FAT_POINTER_P (type)
+ || TYPE_MODE (type) != BLKmode
+ || code == CALL_EXPR)
+ return save_expr (exp);
+
+ /* Otherwise reference, protect the address and dereference. */
+ return
+ build_unary_op (INDIRECT_REF, type,
+ save_expr (build_unary_op (ADDR_EXPR,
+ build_reference_type (type),
+ exp)));
+}
+
+/* This is equivalent to stabilize_reference_1 in tree.c but we take an extra
+ argument to force evaluation of everything. */
+
+static tree
+gnat_stabilize_reference_1 (tree e, bool force)
+{
+ enum tree_code code = TREE_CODE (e);
+ tree type = TREE_TYPE (e);
+ tree result;
+
+ /* We cannot ignore const expressions because it might be a reference
+ to a const array but whose index contains side-effects. But we can
+ ignore things that are actual constant or that already have been
+ handled by this function. */
+ if (TREE_CONSTANT (e) || code == SAVE_EXPR)
+ return e;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case tcc_exceptional:
+ case tcc_declaration:
+ case tcc_comparison:
+ case tcc_expression:
+ case tcc_reference:
+ case tcc_vl_exp:
+ /* If this is a COMPONENT_REF of a fat pointer, save the entire
+ fat pointer. This may be more efficient, but will also allow
+ us to more easily find the match for the PLACEHOLDER_EXPR. */
+ if (code == COMPONENT_REF
+ && TYPE_IS_FAT_POINTER_P (TREE_TYPE (TREE_OPERAND (e, 0))))
+ result
+ = build3 (code, type,
+ gnat_stabilize_reference_1 (TREE_OPERAND (e, 0), force),
+ TREE_OPERAND (e, 1), TREE_OPERAND (e, 2));
+ /* If the expression has side-effects, then encase it in a SAVE_EXPR
+ so that it will only be evaluated once. */
+ /* The tcc_reference and tcc_comparison classes could be handled as
+ below, but it is generally faster to only evaluate them once. */
+ else if (TREE_SIDE_EFFECTS (e) || force)
+ return save_expr (e);
+ else
+ return e;
+ break;
+
+ case tcc_binary:
+ /* Recursively stabilize each operand. */
+ result
+ = build2 (code, type,
+ gnat_stabilize_reference_1 (TREE_OPERAND (e, 0), force),
+ gnat_stabilize_reference_1 (TREE_OPERAND (e, 1), force));
+ break;
+
+ case tcc_unary:
+ /* Recursively stabilize each operand. */
+ result
+ = build1 (code, type,
+ gnat_stabilize_reference_1 (TREE_OPERAND (e, 0), force));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* See similar handling in gnat_stabilize_reference. */
+ TREE_READONLY (result) = TREE_READONLY (e);
+ TREE_SIDE_EFFECTS (result) |= TREE_SIDE_EFFECTS (e);
+ TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (e);
+
+ if (code == INDIRECT_REF
+ || code == UNCONSTRAINED_ARRAY_REF
+ || code == ARRAY_REF
+ || code == ARRAY_RANGE_REF)
+ TREE_THIS_NOTRAP (result) = TREE_THIS_NOTRAP (e);
+
+ return result;
+}
+
+/* This is equivalent to stabilize_reference in tree.c but we know how to
+ handle our own nodes and we take extra arguments. FORCE says whether to
+ force evaluation of everything. We set SUCCESS to true unless we walk
+ through something we don't know how to stabilize. */
+
+tree
+gnat_stabilize_reference (tree ref, bool force, bool *success)
+{
+ tree type = TREE_TYPE (ref);
+ enum tree_code code = TREE_CODE (ref);
+ tree result;
+
+ /* Assume we'll success unless proven otherwise. */
+ if (success)
+ *success = true;
+
+ switch (code)
+ {
+ case CONST_DECL:
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ /* No action is needed in this case. */
+ return ref;
+
+ case ADDR_EXPR:
+ CASE_CONVERT:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case VIEW_CONVERT_EXPR:
+ result
+ = build1 (code, type,
+ gnat_stabilize_reference (TREE_OPERAND (ref, 0), force,
+ success));
+ break;
+
+ case INDIRECT_REF:
+ case UNCONSTRAINED_ARRAY_REF:
+ result = build1 (code, type,
+ gnat_stabilize_reference_1 (TREE_OPERAND (ref, 0),
+ force));
+ break;
+
+ case COMPONENT_REF:
+ result = build3 (COMPONENT_REF, type,
+ gnat_stabilize_reference (TREE_OPERAND (ref, 0), force,
+ success),
+ TREE_OPERAND (ref, 1), NULL_TREE);
+ break;
+
+ case BIT_FIELD_REF:
+ result = build3 (BIT_FIELD_REF, type,
+ gnat_stabilize_reference (TREE_OPERAND (ref, 0), force,
+ success),
+ TREE_OPERAND (ref, 1), TREE_OPERAND (ref, 2));
+ break;
+
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ result = build4 (code, type,
+ gnat_stabilize_reference (TREE_OPERAND (ref, 0), force,
+ success),
+ gnat_stabilize_reference_1 (TREE_OPERAND (ref, 1),
+ force),
+ NULL_TREE, NULL_TREE);
+ break;
+
+ case CALL_EXPR:
+ result = gnat_stabilize_reference_1 (ref, force);
+ break;
+
+ case COMPOUND_EXPR:
+ result = build2 (COMPOUND_EXPR, type,
+ gnat_stabilize_reference (TREE_OPERAND (ref, 0), force,
+ success),
+ gnat_stabilize_reference (TREE_OPERAND (ref, 1), force,
+ success));
+ break;
+
+ case CONSTRUCTOR:
+ /* Constructors with 1 element are used extensively to formally
+ convert objects to special wrapping types. */
+ if (TREE_CODE (type) == RECORD_TYPE
+ && vec_safe_length (CONSTRUCTOR_ELTS (ref)) == 1)
+ {
+ tree index = (*CONSTRUCTOR_ELTS (ref))[0].index;
+ tree value = (*CONSTRUCTOR_ELTS (ref))[0].value;
+ result
+ = build_constructor_single (type, index,
+ gnat_stabilize_reference_1 (value,
+ force));
+ }
+ else
+ {
+ if (success)
+ *success = false;
+ return ref;
+ }
+ break;
+
+ case ERROR_MARK:
+ ref = error_mark_node;
+
+ /* ... fall through to failure ... */
+
+ /* If arg isn't a kind of lvalue we recognize, make no change.
+ Caller should recognize the error for an invalid lvalue. */
+ default:
+ if (success)
+ *success = false;
+ return ref;
+ }
+
+ /* TREE_THIS_VOLATILE and TREE_SIDE_EFFECTS set on the initial expression
+ may not be sustained across some paths, such as the way via build1 for
+ INDIRECT_REF. We reset those flags here in the general case, which is
+ consistent with the GCC version of this routine.
+
+ Special care should be taken regarding TREE_SIDE_EFFECTS, because some
+ paths introduce side-effects where there was none initially (e.g. if a
+ SAVE_EXPR is built) and we also want to keep track of that. */
+ TREE_READONLY (result) = TREE_READONLY (ref);
+ TREE_SIDE_EFFECTS (result) |= TREE_SIDE_EFFECTS (ref);
+ TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (ref);
+
+ if (code == INDIRECT_REF
+ || code == UNCONSTRAINED_ARRAY_REF
+ || code == ARRAY_REF
+ || code == ARRAY_RANGE_REF)
+ TREE_THIS_NOTRAP (result) = TREE_THIS_NOTRAP (ref);
+
+ return result;
+}
+
+/* If EXPR is an expression that is invariant in the current function, in the
+ sense that it can be evaluated anywhere in the function and any number of
+ times, return EXPR or an equivalent expression. Otherwise return NULL. */
+
+tree
+gnat_invariant_expr (tree expr)
+{
+ tree type = TREE_TYPE (expr), t;
+
+ expr = remove_conversions (expr, false);
+
+ while ((TREE_CODE (expr) == CONST_DECL
+ || (TREE_CODE (expr) == VAR_DECL && TREE_READONLY (expr)))
+ && decl_function_context (expr) == current_function_decl
+ && DECL_INITIAL (expr))
+ expr = remove_conversions (DECL_INITIAL (expr), false);
+
+ if (TREE_CONSTANT (expr))
+ return fold_convert (type, expr);
+
+ t = expr;
+
+ while (true)
+ {
+ switch (TREE_CODE (t))
+ {
+ case COMPONENT_REF:
+ if (TREE_OPERAND (t, 2) != NULL_TREE)
+ return NULL_TREE;
+ break;
+
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ if (!TREE_CONSTANT (TREE_OPERAND (t, 1))
+ || TREE_OPERAND (t, 2) != NULL_TREE
+ || TREE_OPERAND (t, 3) != NULL_TREE)
+ return NULL_TREE;
+ break;
+
+ case BIT_FIELD_REF:
+ case VIEW_CONVERT_EXPR:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ break;
+
+ case INDIRECT_REF:
+ if (!TREE_READONLY (t)
+ || TREE_SIDE_EFFECTS (t)
+ || !TREE_THIS_NOTRAP (t))
+ return NULL_TREE;
+ break;
+
+ default:
+ goto object;
+ }
+
+ t = TREE_OPERAND (t, 0);
+ }
+
+object:
+ if (TREE_SIDE_EFFECTS (t))
+ return NULL_TREE;
+
+ if (TREE_CODE (t) == CONST_DECL
+ && (DECL_EXTERNAL (t)
+ || decl_function_context (t) != current_function_decl))
+ return fold_convert (type, expr);
+
+ if (!TREE_READONLY (t))
+ return NULL_TREE;
+
+ if (TREE_CODE (t) == CONSTRUCTOR || TREE_CODE (t) == PARM_DECL)
+ return fold_convert (type, expr);
+
+ if (TREE_CODE (t) == VAR_DECL
+ && (DECL_EXTERNAL (t)
+ || decl_function_context (t) != current_function_decl))
+ return fold_convert (type, expr);
+
+ return NULL_TREE;
+}